diff --git a/CMakeLists.txt b/CMakeLists.txt index 0436f5b25923927edaa7568ba57c7b948446f8b1..b78b89690858b91a95273b67272b72b9dd771b0f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.16) +cmake_minimum_required(VERSION 3.0) project( TDengine @@ -35,7 +35,7 @@ endif(${BUILD_TEST}) add_subdirectory(source) add_subdirectory(tools) add_subdirectory(tests) -add_subdirectory(example) +add_subdirectory(examples/c) # docs add_subdirectory(docs) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index db49ab27d76f03bbaab0e0bf4aeba74b2f7ae361..a2b55e3acca0c141a2d550ccabb5bb129adb3d7e 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -269,7 +269,7 @@ pipeline { } } stage('linux test') { - agent{label " slave3_0 || slave15 || slave16 || slave17 "} + agent{label " worker03 || slave215 || slave217 || slave219 "} options { skipDefaultCheckout() } when { changeRequest() @@ -287,9 +287,9 @@ pipeline { ''' sh ''' cd ${WKC}/tests/parallel_test - export DEFAULT_RETRY_TIME=1 + export DEFAULT_RETRY_TIME=2 date - timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME} -l ${WKDIR}/log -o 480 + timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480 ''' } } diff --git a/cmake/cmake.define b/cmake/cmake.define index a8bab17aba8a412099b34c6d82c9787468bc89e8..8d71870e7d8ce3e554dd9c6810ea3829e5e9511a 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.16) +cmake_minimum_required(VERSION 3.0) set(CMAKE_VERBOSE_MAKEFILE OFF) @@ -46,7 +46,7 @@ ENDIF () IF (TD_WINDOWS) MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}") - SET(COMMON_FLAGS "/w /D_WIN32 /Zi") + SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi") SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO") # IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900)) # SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18") diff --git a/cmake/cmake.options b/cmake/cmake.options index cb6fd1400d43b6073d81ab43e46140343b277512..ab3c5ac1ad08b98ee2dbe09692584be63e477d71 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -49,7 +49,7 @@ IF(${TD_WINDOWS}) option( BUILD_TEST "If build unit tests using googletest" - OFF + ON ) ELSE () diff --git a/cmake/cmake.platform b/cmake/cmake.platform index 0312f92a5b4116cad03d4bb9c2e7556d7a35deb2..acf17e9427bc453e1ece67cca5cbfe45f8827337 100644 --- a/cmake/cmake.platform +++ b/cmake/cmake.platform @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.16) +cmake_minimum_required(VERSION 3.0) MESSAGE("Current system is ${CMAKE_SYSTEM_NAME}") diff --git a/contrib/test/craft/raftMain.c b/contrib/test/craft/raftMain.c index 12be3deb2e33aba9be9b45acd1595a749ab1b2c5..e1c66422b3b90b23ff8c6f01cf07aa8adace5983 100644 --- a/contrib/test/craft/raftMain.c +++ b/contrib/test/craft/raftMain.c @@ -243,7 +243,7 @@ void console(SRaftServer *pRaftServer) { } else if (strcmp(cmd, "dropnode") == 0) { - char host[HOST_LEN]; + char host[HOST_LEN] = {0}; uint32_t port; parseAddr(param1, host, HOST_LEN, &port); uint64_t rid = raftId(host, port); @@ -258,7 +258,7 @@ void console(SRaftServer *pRaftServer) { } else if (strcmp(cmd, "put") == 0) { - char buf[256]; + char buf[256] = {0}; snprintf(buf, sizeof(buf), "%s--%s", param1, param2); putValue(&pRaftServer->raft, buf); diff --git a/docs-cn/02-intro.md b/docs-cn/02-intro.md index 949c21472dd29d51f2703034bd38ab95037e09c6..673c2e96b65814fc1cd572d54f948793ed6fa521 100644 --- a/docs-cn/02-intro.md +++ b/docs-cn/02-intro.md @@ -62,7 +62,7 @@ TDengine的主要功能如下:
-![TDengine技术生态图](eco_system.webp) +![TDengine Database 技术生态图](eco_system.webp)
图 1. TDengine技术生态图
diff --git a/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx b/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx index e63ffce6dd07366da99fe1f41d0a2a8d7a623f31..99a92573c87d0f90f699a8d1352619f4df4aef39 100644 --- a/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx @@ -52,7 +52,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, :::info -- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 16K,一条 SQL 语句总长度不能超过 1M 。 +- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 48K,一条 SQL 语句总长度不能超过 1M 。 - TDengine 支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开 20 个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程频繁切换,带来额外开销。 ::: diff --git a/docs-cn/07-develop/06-subscribe.mdx b/docs-cn/07-develop/06-subscribe.mdx index ad5561fa09087c4c562ac340506f56d756bd98b2..0f531e07c9dce7dbb03bacebf8e5cbefae82671f 100644 --- a/docs-cn/07-develop/06-subscribe.mdx +++ b/docs-cn/07-develop/06-subscribe.mdx @@ -145,7 +145,7 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { taos_unsubscribe(tsub, keep); ``` -其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下,每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。 +其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下(注:`taos.cfg` 配置文件中 `DataDir` 参数值默认为 **/var/lib/taos/**,但是 Windows 服务器上本身不存在该目录,所以需要在 Windows 的配置文件中修改 `DataDir` 参数值为相应的已存在目录"),每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。 代码介绍完毕,我们来看一下实际的运行效果。假设: diff --git a/docs-cn/12-taos-sql/01-data-type.md b/docs-cn/12-taos-sql/01-data-type.md index be5c9a8cb4ed7f4ed9f9c7e11faf1b0f8f6e51b8..8ac6ee3b872bd31f616ea0aea3fd4a093abb4402 100644 --- a/docs-cn/12-taos-sql/01-data-type.md +++ b/docs-cn/12-taos-sql/01-data-type.md @@ -4,6 +4,8 @@ title: 支持的数据类型 description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等" --- +## 时间戳 + 使用 TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则: - 时间格式为 `YYYY-MM-DD HH:mm:ss.MS`,默认时间分辨率为毫秒。比如:`2017-08-12 18:25:58.128` @@ -12,39 +14,59 @@ description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类 - Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度逻辑类似。) - 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n (自然月) 和 y (自然年)。 -TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。(从 2.1.5.0 版本开始支持纳秒精度) +TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。 ```sql CREATE DATABASE db_name PRECISION 'ns'; ``` +## 数据类型 -在 TDengine 中,普通表的数据模型中可使用以下 10 种数据类型。 +在 TDengine 中,普通表的数据模型中可使用以下数据类型。 | # | **类型** | **Bytes** | **说明** | | --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18.0 版本开始,已经去除了这一时间范围限制)(从 2.1.5.0 版本开始支持纳秒精度) | -| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL | -| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用作 NULL | -| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | -| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | -| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | -| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用作 NULL | -| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用作 NULL | -| 9 | BOOL | 1 | 布尔型,{true, false} | -| 10 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 | -| 11 | JSON | | json 数据类型, 只有 tag 可以是 json 格式 | - -:::tip -TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。 +| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒,详细说明见上节。 | +| 2 | INT | 4 | 整型,范围 [-2^31, 2^31-1] | +| 3 | INT UNSIGNED| 4| 无符号整数,[0, 2^32-1] +| 4 | BIGINT | 8 | 长整型,范围 [-2^63, 2^63-1] | +| 5 | BIGINT UNSIGNED | 8 | 长整型,范围 [0, 2^64-1] | +| 6 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | +| 7 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | +| 8 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。 | +| 9 | SMALLINT | 2 | 短整型, 范围 [-32768, 32767] | +| 10 | SMALLINT UNSIGNED | 2| 无符号短整型,范围 [0, 655357] | +| 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] | +| 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] | +| 13 | BOOL | 1 | 布尔型,{true, false} | +| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 | +| 15 | JSON | | json 数据类型, 只有 tag 可以是 json 格式 | +| 16 | VARCHAR | 自定义 | BINARY类型的别名 | -::: :::note -虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。 +- TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。 +- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。 +- BINARY 类型理论上最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 +- SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。 ::: + +## 常量 +TDengine支持多个类型的常量,细节如下表: + +| # | **语法** | **类型** | **说明** | +| --- | :-------: | --------- | -------------------------------------- | +| 1 | [{+ \| -}]123 | BIGINT | 整型数值的字面量的类型均为BIGINT。如果用户输入超过了BIGINT的表示范围,TDengine 按BIGINT对数值进行截断。| +| 2 | 123.45 | DOUBLE | 浮点数值的字面量的类型均为DOUBLE。TDengine依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型。| +| 3 | 1.2E3 | DOUBLE | 科学计数法的字面量的类型为DOUBLE。| +| 4 | 'abc' | BINARY | 单引号括住的内容为字符串字面值,其类型为BINARY,BINARY的size为实际的字符个数。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 \'。| +| 5 | "abc" | BINARY | 双引号括住的内容为字符串字面值,其类型为BINARY,BINARY的size为实际的字符个数。对于字符串内的双引号,可以用转义字符反斜线加单引号来表示,即 \"。 | +| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | TIMESTAMP关键字表示后面的字符串字面量需要被解释为TIMESTAMP类型。字符串需要满足YYYY-MM-DD HH:mm:ss.MS格式,其时间分辨率为当前数据库的时间分辨率。 | +| 7 | {TRUE \| FALSE} | BOOL | 布尔类型字面量。 | +| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | 空值字面量。可以用于任意类型。| + :::note -SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。 +- TDengine依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999会认为超过长整型的上边界而溢出,而9999999999999999999.0会被认为是有效的浮点数。 ::: diff --git a/docs-cn/12-taos-sql/03-table.md b/docs-cn/12-taos-sql/03-table.md index 675c157b3def0d670f771f55b767f3ca4f2a28af..d7235f312933ec46ed427d5da7e2c5a229fa2926 100644 --- a/docs-cn/12-taos-sql/03-table.md +++ b/docs-cn/12-taos-sql/03-table.md @@ -12,7 +12,7 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam 1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键; 2. 表名最大长度为 192; -3. 表的每行长度不能超过 16k 个字符;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) +3. 表的每行长度不能超过 48KB;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) 4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写 5. 使用数据类型 binary 或 nchar,需指定其最长的字节数,如 binary(20),表示 20 字节; 6. 为了兼容支持更多形式的表名,TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 diff --git a/docs-cn/12-taos-sql/04-stable.md b/docs-cn/12-taos-sql/04-stable.md index a3c227317c85917b64b2477994d335710610ec70..3901427736e80bc8dd0dd87b454947af6e586561 100644 --- a/docs-cn/12-taos-sql/04-stable.md +++ b/docs-cn/12-taos-sql/04-stable.md @@ -86,7 +86,7 @@ ALTER STABLE stb_name MODIFY COLUMN field_name data_type(length); ALTER STABLE stb_name ADD TAG new_tag_name tag_type; ``` -为 STable 增加一个新的标签,并指定新标签的类型。标签总数不能超过 128 个,总长度不超过 16k 个字符。 +为 STable 增加一个新的标签,并指定新标签的类型。标签总数不能超过 128 个,总长度不超过 16KB 。 ### 删除标签 diff --git a/docs-cn/12-taos-sql/07-function.md b/docs-cn/12-taos-sql/07-function.md index f6e564419ddaa18931b0f0e0e4e7b5b3219a92f6..7674967f09fb0c9e3069097dbc2bf35e93256992 100644 --- a/docs-cn/12-taos-sql/07-function.md +++ b/docs-cn/12-taos-sql/07-function.md @@ -1,1794 +1,1217 @@ --- -sidebar_label: SQL 函数 -title: SQL 函数 +sidebar_label: 函数 +title: 函数 +toc_max_heading_level: 4 --- -## 聚合函数 +## 单行函数 -TDengine 支持针对数据的聚合查询。提供支持的聚合和选择函数如下: +单行函数为查询结果中的每一行返回一个结果行。 -### COUNT +### 数学函数 -``` -SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]; -``` +#### ABS -**功能说明**:统计表/超级表中记录行数或某列的非空值个数。 +```sql + SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause] +``` -**返回数据类型**:长整型 INT64。 +**功能说明**:获得指定列的绝对值 -**应用字段**:应用全部字段。 +**返回结果类型**:如果输入值为整数,输出值是 UBIGINT 类型。如果输入值是 FLOAT/DOUBLE 数据类型,输出值是 DOUBLE 数据类型。 -**适用于**:表、超级表。 +**适用数据类型**:数值类型。 -**使用说明**: +**嵌套子查询支持**:适用于内层查询和外层查询。 -- 可以使用星号(\*)来替代具体的字段,使用星号(\*)返回全部记录数量。 -- 针对同一表的(不包含 NULL 值)字段查询结果均相同。 -- 如果统计对象是具体的列,则返回该列中非 NULL 值的记录数量。 +**适用于**: 表和超级表 -**示例**: +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -``` -taos> SELECT COUNT(*), COUNT(voltage) FROM meters; - count(*) | count(voltage) | -================================================ - 9 | 9 | -Query OK, 1 row(s) in set (0.004475s) +#### ACOS -taos> SELECT COUNT(*), COUNT(voltage) FROM d1001; - count(*) | count(voltage) | -================================================ - 3 | 3 | -Query OK, 1 row(s) in set (0.001075s) +```sql + SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -### AVG - -``` -SELECT AVG(field_name) FROM tb_name [WHERE clause]; -``` +**功能说明**:获得指定列的反余弦结果 -**功能说明**:统计表/超级表中某列的平均值。 +**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL -**返回数据类型**:双精度浮点数 Double。 +**适用数据类型**:数值类型。 -**应用字段**:不能应用在 timestamp、binary、nchar、bool 字段。 +**嵌套子查询支持**:适用于内层查询和外层查询。 -**适用于**:表、超级表。 +**适用于**: 表和超级表 -**示例**: +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -``` -taos> SELECT AVG(current), AVG(voltage), AVG(phase) FROM meters; - avg(current) | avg(voltage) | avg(phase) | -==================================================================================== - 11.466666751 | 220.444444444 | 0.293333333 | -Query OK, 1 row(s) in set (0.004135s) +#### ASIN -taos> SELECT AVG(current), AVG(voltage), AVG(phase) FROM d1001; - avg(current) | avg(voltage) | avg(phase) | -==================================================================================== - 11.733333588 | 219.333333333 | 0.316666673 | -Query OK, 1 row(s) in set (0.000943s) +```sql + SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -### TWA - -``` -SELECT TWA(field_name) FROM tb_name WHERE clause; -``` +**功能说明**:获得指定列的反正弦结果 -**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。 +**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL -**返回数据类型**:双精度浮点数 Double。 +**适用数据类型**:数值类型。 -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 +**嵌套子查询支持**:适用于内层查询和外层查询。 -**适用于**:表、超级表。 +**适用于**: 表和超级表 -**使用说明**: +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -- 从 2.1.3.0 版本开始,TWA 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。 -### IRATE +#### ATAN -``` -SELECT IRATE(field_name) FROM tb_name WHERE clause; +```sql + SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**功能说明**:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。 +**功能说明**:获得指定列的反正切结果 -**返回数据类型**:双精度浮点数 Double。 +**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 +**适用数据类型**:数值类型。 -**适用于**:表、超级表。 +**嵌套子查询支持**:适用于内层查询和外层查询。 -**使用说明**: +**适用于**: 表和超级表 -- 从 2.1.3.0 版本开始此函数可用,IRATE 可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。 +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -### SUM + +#### CEIL ``` -SELECT SUM(field_name) FROM tb_name [WHERE clause]; +SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:统计表/超级表中某列的和。 +**功能说明**:获得指定列的向上取整数的结果。 -**返回数据类型**:双精度浮点数 Double 和长整型 INT64。 +**返回结果类型**:与指定列的原始数据类型一致。例如,如果指定列的原始数据类型为 Float,那么返回的数据类型也为 Float;如果指定列的原始数据类型为 Double,那么返回的数据类型也为 Double。 -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 +**适用数据类型**:数值类型。 -**适用于**:表、超级表。 +**适用于**: 普通表、超级表。 -**示例**: +**嵌套子查询支持**:适用于内层查询和外层查询。 -``` -taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM meters; - sum(current) | sum(voltage) | sum(phase) | -================================================================================ - 103.200000763 | 1984 | 2.640000001 | -Query OK, 1 row(s) in set (0.001702s) +**使用说明**: -taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM d1001; - sum(current) | sum(voltage) | sum(phase) | -================================================================================ - 35.200000763 | 658 | 0.950000018 | -Query OK, 1 row(s) in set (0.000980s) -``` +- 支持 +、-、\*、/ 运算,如 ceil(col1) + ceil(col2)。 +- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -### STDDEV +#### COS -``` -SELECT STDDEV(field_name) FROM tb_name [WHERE clause]; +```sql + SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**功能说明**:统计表中某列的均方差。 +**功能说明**:获得指定列的余弦结果 -**返回数据类型**:双精度浮点数 Double。 +**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 +**适用数据类型**:数值类型。 -**适用于**:表、超级表(从 2.0.15.1 版本开始) +**嵌套子查询支持**:适用于内层查询和外层查询。 -**示例**: +**适用于**: 表和超级表 -``` -taos> SELECT STDDEV(current) FROM d1001; - stddev(current) | -============================ - 1.020892909 | -Query OK, 1 row(s) in set (0.000915s) -``` +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -### LEASTSQUARES +#### FLOOR ``` -SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]; +SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。 - -**返回数据类型**:字符串表达式(斜率, 截距)。 - -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 - -**适用于**:表。 +**功能说明**:获得指定列的向下取整数的结果。 + 其他使用说明参见 CEIL 函数描述。 -**示例**: +#### LOG -``` -taos> SELECT LEASTSQUARES(current, 1, 1) FROM d1001; - leastsquares(current, 1, 1) | -===================================================== -{slop:1.000000, intercept:9.733334} | -Query OK, 1 row(s) in set (0.000921s) +```sql + SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause] ``` -### MODE +**功能说明**:获得指定列对于底数 base 的对数 -``` -SELECT MODE(field_name) FROM tb_name [WHERE clause]; -``` +**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL -**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,输出空。不能匹配标签、时间戳输出。 +**适用数据类型**:数值类型。 -**返回数据类型**:同应用的字段。 +**嵌套子查询支持**:适用于内层查询和外层查询。 -**应用字段**:适合于除时间主列外的任何类型字段。 +**适用于**: 表和超级表 -**使用说明**:由于返回数据量未知,考虑到内存因素,为了函数可以正常返回结果,建议不重复的数据量在 10 万级别,否则会报错。 +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -**支持的版本**:2.6.0.0 及以后的版本。 -**示例**: +#### POW +```sql + SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause] ``` -taos> select voltage from d002; - voltage | -======================== - 1 | - 1 | - 2 | - 19 | -Query OK, 4 row(s) in set (0.003545s) -taos> select mode(voltage) from d002; - mode(voltage) | -======================== - 1 | -Query OK, 1 row(s) in set (0.019393s) -``` +**功能说明**:获得指定列的指数为 power 的幂 -### HYPERLOGLOG +**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL -``` -SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` +**适用数据类型**:数值类型。 -**功能说明**: - - 采用 hyperloglog 算法,返回某列的基数。该算法在数据量很大的情况下,可以明显降低内存的占用,但是求出来的基数是个估算值,标准误差(标准误差是多次实验,每次的平均数的标准差,不是与真实结果的误差)为 0.81%。 - - 在数据量较少的时候该算法不是很准确,可以使用 select count(data) from (select unique(col) as data from table) 的方法。 +**嵌套子查询支持**:适用于内层查询和外层查询。 -**返回结果类型**:整形。 +**适用于**: 表和超级表 -**应用字段**:适合于任何类型字段。 +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -**支持的版本**:2.6.0.0 及以后的版本。 -**示例**: +#### ROUND ``` -taos> select dbig from shll; - dbig | -======================== - 1 | - 1 | - 1 | - NULL | - 2 | - 19 | - NULL | - 9 | -Query OK, 8 row(s) in set (0.003755s) - -taos> select hyperloglog(dbig) from shll; - hyperloglog(dbig)| -======================== - 4 | -Query OK, 1 row(s) in set (0.008388s) +SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -## 选择函数 +**功能说明**:获得指定列的四舍五入的结果。 + 其他使用说明参见 CEIL 函数描述。 -在使用所有的选择函数的时候,可以同时指定输出 ts 列或标签列(包括 tbname),这样就可以方便地知道被选出的值是源于哪个数据行的。 -### MIN +#### SIN +```sql + SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; -``` - -**功能说明**:统计表/超级表中某列的值最小值。 -**返回数据类型**:同应用的字段。 +**功能说明**:获得指定列的正弦结果 -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 +**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL -**适用于**:表、超级表。 +**适用数据类型**:数值类型。 -**示例**: +**嵌套子查询支持**:适用于内层查询和外层查询。 -``` -taos> SELECT MIN(current), MIN(voltage) FROM meters; - min(current) | min(voltage) | -====================================== - 10.20000 | 218 | -Query OK, 1 row(s) in set (0.001765s) +**适用于**: 表和超级表 -taos> SELECT MIN(current), MIN(voltage) FROM d1001; - min(current) | min(voltage) | -====================================== - 10.30000 | 218 | -Query OK, 1 row(s) in set (0.000950s) -``` +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -### MAX +#### SQRT -``` -SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; +```sql + SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**功能说明**:统计表/超级表中某列的值最大值。 +**功能说明**:获得指定列的平方根 -**返回数据类型**:同应用的字段。 +**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 +**适用数据类型**:数值类型。 -**适用于**:表、超级表。 +**嵌套子查询支持**:适用于内层查询和外层查询。 -**示例**: +**适用于**: 表和超级表 -``` -taos> SELECT MAX(current), MAX(voltage) FROM meters; - max(current) | max(voltage) | -====================================== - 13.40000 | 223 | -Query OK, 1 row(s) in set (0.001123s) +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -taos> SELECT MAX(current), MAX(voltage) FROM d1001; - max(current) | max(voltage) | -====================================== - 12.60000 | 221 | -Query OK, 1 row(s) in set (0.000987s) +#### TAN + +```sql + SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -### FIRST +**功能说明**:获得指定列的正切结果 -``` -SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` +**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL -**功能说明**:统计表/超级表中某列的值最先写入的非 NULL 值。 +**适用数据类型**:数值类型。 -**返回数据类型**:同应用的字段。 +**嵌套子查询支持**:适用于内层查询和外层查询。 -**应用字段**:所有字段。 +**适用于**: 表和超级表 -**适用于**:表、超级表。 +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -**使用说明**: +### 字符串函数 -- 如果要返回各个列的首个(时间戳最小)非 NULL 值,可以使用 FIRST(\*); -- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL; -- 如果结果集中所有列全部为 NULL 值,则不返回结果。 +字符串函数的输入参数为字符串类型,返回结果为数值类型或字符串类型。 -**示例**: +#### CHAR_LENGTH ``` -taos> SELECT FIRST(*) FROM meters; - first(ts) | first(current) | first(voltage) | first(phase) | -========================================================================================= -2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | -Query OK, 1 row(s) in set (0.004767s) - -taos> SELECT FIRST(current) FROM d1002; - first(current) | -======================= - 10.20000 | -Query OK, 1 row(s) in set (0.001023s) + SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -### LAST +**功能说明**:以字符计数的字符串长度。 -``` -SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` +**返回结果类型**:INT。如果输入值为NULL,输出值为NULL。 -**功能说明**:统计表/超级表中某列的值最后写入的非 NULL 值。 +**适用数据类型**:VARCHAR, NCHAR -**返回数据类型**:同应用的字段。 +**嵌套子查询支持**:适用于内层查询和外层查询。 -**应用字段**:所有字段。 +**适用于**: 表和超级表 -**适用于**:表、超级表。 +#### CONCAT -**使用说明**: +```sql + SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] +``` -- 如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*); -- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。 -- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 +**功能说明**:字符串连接函数。 +**返回结果类型**:如果所有参数均为 VARCHAR 类型,则结果类型为 VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果输入值为NULL,输出值为NULL。 -**示例**: +**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为2个,最大参数个数为8个。 -``` -taos> SELECT LAST(*) FROM meters; - last(ts) | last(current) | last(voltage) | last(phase) | -======================================================================================== -2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | -Query OK, 1 row(s) in set (0.001452s) +**嵌套子查询支持**:适用于内层查询和外层查询。 -taos> SELECT LAST(current) FROM d1002; - last(current) | -======================= - 10.30000 | -Query OK, 1 row(s) in set (0.000843s) -``` +**适用于**: 表和超级表 -### TOP + +#### CONCAT_WS ``` -SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; + SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] ``` -**功能说明**: 统计表/超级表中某列的值最大 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 +**功能说明**:带分隔符的字符串连接函数。 -**返回数据类型**:同应用的字段。 +**返回结果类型**:如果所有参数均为VARCHAR类型,则结果类型为VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果输入值为NULL,输出值为NULL。如果separator值不为NULL,其他输入为NULL,输出为空串。 -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 +**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为3个,最大参数个数为9个。 -**适用于**:表、超级表。 +**嵌套子查询支持**:适用于内层查询和外层查询。 -**使用说明**: +**适用于**: 表和超级表 -- *k*值取值范围 1≤*k*≤100; -- 系统同时返回该记录关联的时间戳列; -- 限制:TOP 函数不支持 FILL 子句。 -**示例**: +#### LENGTH ``` -taos> SELECT TOP(current, 3) FROM meters; - ts | top(current, 3) | -================================================= -2018-10-03 14:38:15.000 | 12.60000 | -2018-10-03 14:38:16.600 | 13.40000 | -2018-10-03 14:38:16.800 | 12.30000 | -Query OK, 3 row(s) in set (0.001548s) - -taos> SELECT TOP(current, 2) FROM d1001; - ts | top(current, 2) | -================================================= -2018-10-03 14:38:15.000 | 12.60000 | -2018-10-03 14:38:16.800 | 12.30000 | -Query OK, 2 row(s) in set (0.000810s) + SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -### BOTTOM +**功能说明**:以字节计数的字符串长度。 -``` -SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; -``` +**返回结果类型**:INT。 -**功能说明**:统计表/超级表中某列的值最小 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 +**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。 -**返回数据类型**:同应用的字段。 +**嵌套子查询支持**:适用于内层查询和外层查询。 -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 +**适用于**: 表和超级表 -**适用于**:表、超级表。 -**使用说明**: +#### LOWER -- *k*值取值范围 1≤*k*≤100; -- 系统同时返回该记录关联的时间戳列; -- 限制:BOTTOM 函数不支持 FILL 子句。 +``` + SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause] +``` -**示例**: +**功能说明**:将字符串参数值转换为全小写字母。 -``` -taos> SELECT BOTTOM(voltage, 2) FROM meters; - ts | bottom(voltage, 2) | -=============================================== -2018-10-03 14:38:15.000 | 218 | -2018-10-03 14:38:16.650 | 218 | -Query OK, 2 row(s) in set (0.001332s) +**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。 -taos> SELECT BOTTOM(current, 2) FROM d1001; - ts | bottom(current, 2) | -================================================= -2018-10-03 14:38:05.000 | 10.30000 | -2018-10-03 14:38:16.800 | 12.30000 | -Query OK, 2 row(s) in set (0.000793s) -``` +**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。 -### PERCENTILE +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表 + + +#### LTRIM ``` -SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause]; + SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**功能说明**:统计表中某列的值百分比分位数。 - -**返回数据类型**: 双精度浮点数 Double。 +**功能说明**:返回清除左边空格后的字符串。 -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 +**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。 -**适用于**:表。 +**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。 -**使用说明**:*P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX。 +**嵌套子查询支持**:适用于内层查询和外层查询。 -**示例**: +**适用于**: 表和超级表 -``` -taos> SELECT PERCENTILE(current, 20) FROM d1001; -percentile(current, 20) | -============================ - 11.100000191 | -Query OK, 1 row(s) in set (0.000787s) -``` -### APERCENTILE +#### RTRIM ``` -SELECT APERCENTILE(field_name, P[, algo_type]) -FROM { tb_name | stb_name } [WHERE clause] + SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**功能说明**:统计表/超级表中指定列的值百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。 +**功能说明**:返回清除右边空格后的字符串。 -**返回数据类型**: 双精度浮点数 Double。 +**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。 -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 +**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。 -**适用于**:表、超级表。 +**嵌套子查询支持**:适用于内层查询和外层查询。 -**使用说明** +**适用于**: 表和超级表 -- **P**值有效取值范围 0≤P≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX; -- **algo_type**的有效输入:**default** 和 **t-digest** -- 用于指定计算近似分位数的算法。可不提供第三个参数的输入,此时将使用 default 的算法进行计算,即 apercentile(column_name, 50, "default") 与 apercentile(column_name, 50) 等价。 -- 当使用“t-digest”参数的时候,将使用 t-digest 方式采样计算近似分位数。但该参数指定计算算法的功能从 2.2.0.x 版本开始支持,2.2.0.0 之前的版本不支持指定使用算法的功能。 -**嵌套子查询支持**:适用于内层查询和外层查询。 +#### SUBSTR ``` -taos> SELECT APERCENTILE(current, 20) FROM d1001; -apercentile(current, 20) | -============================ - 10.300000191 | -Query OK, 1 row(s) in set (0.000645s) - -taos> select apercentile (count, 80, 'default') from stb1; - apercentile (c0, 80, 'default') | -================================== - 601920857.210056424 | -Query OK, 1 row(s) in set (0.012363s) - -taos> select apercentile (count, 80, 't-digest') from stb1; - apercentile (c0, 80, 't-digest') | -=================================== - 605869120.966666579 | -Query OK, 1 row(s) in set (0.011639s) -``` - -### LAST_ROW - -``` -SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; + SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause] ``` -**功能说明**:返回表/超级表的最后一条记录。 - -**返回数据类型**:同应用的字段。 - -**应用字段**:所有字段。 - -**适用于**:表、超级表。 +**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。 -**使用说明**: +**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。 -- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 -- 不能与 INTERVAL 一起使用。 +**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。输入参数pos可以为正数,也可以为负数。如果pos是正数,表示开始位置从字符串开头正数计算。如果pos为负数,表示开始位置从字符串结尾倒数计算。如果输入参数len被忽略,返回的子串包含从pos开始的整个字串。 -**示例**: +**嵌套子查询支持**:适用于内层查询和外层查询。 -``` - taos> SELECT LAST_ROW(current) FROM meters; - last_row(current) | - ======================= - 12.30000 | - Query OK, 1 row(s) in set (0.001238s) +**适用于**: 表和超级表 - taos> SELECT LAST_ROW(current) FROM d1002; - last_row(current) | - ======================= - 10.30000 | - Query OK, 1 row(s) in set (0.001042s) -``` -### INTERP [2.3.1 及之后的版本] +#### UPPER ``` -SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; + SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**功能说明**:返回表/超级表的指定时间截面指定列的记录值(插值)。 - -**返回数据类型**:同字段类型。 - -**应用字段**:数值型字段。 +**功能说明**:将字符串参数值转换为全大写字母。 -**适用于**:表、超级表、嵌套查询。 +**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。 +**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。 -**使用说明** +**嵌套子查询支持**:适用于内层查询和外层查询。 -- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。 -- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。 -- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1<=timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。如果没有指定 RANGE,那么满足过滤条件的输入数据中第一条记录的 timestamp 即为 timestamp1,最后一条记录的 timestamp 即为 timestamp2,同样也满足 timestamp1 <= timestamp2。 -- INTERP 根据 EVERY 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(EVERY 值)进行插值。如果没有指定 EVERY,则默认窗口大小为无穷大,即从 timestamp1 开始只有一个窗口。 -- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值,如果没有 FILL 字段则默认不插值,即输出为原始记录值或不输出(原始记录不存在)。 -- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 group by tbname 一起使用,当作用嵌套查询外层时内层子查询不能含 GROUP BY 信息。 -- INTERP 的插值结果不受 ORDER BY timestamp 的影响,ORDER BY timestamp 只影响输出结果的排序。 +**适用于**: 表和超级表 -**SQL示例(基于文档中广泛使用的电表 schema )**: -- 单点线性插值 +### 转换函数 -``` - taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:40:00','2017-7-14 18:40:00') FILL(LINEAR); -``` +转换函数将值从一种数据类型转换为另一种数据类型。 -- 在2017-07-14 18:00:00到2017-07-14 19:00:00间每隔5秒钟进行取值(不插值) +#### CAST -``` - taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s); +```sql + SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] ``` -- 在2017-07-14 18:00:00到2017-07-14 19:00:00间每隔5秒钟进行线性插值 +**功能说明**:数据类型转换函数,输入参数 expression 支持普通列、常量、标量函数及它们之间的四则运算,只适用于 select 子句中。 -``` - taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR); -``` +**返回结果类型**:CAST 中指定的类型(type_name),可以是 BIGINT、BIGINT UNSIGNED、BINARY、VARCHAR、NCHAR和TIMESTAMP。 -- 在所有时间范围内每隔 5 秒钟进行向后插值 +**适用数据类型**:输入参数 expression 的类型可以是BLOB、MEDIUMBLOB和JSON外的所有类型 -``` - taos> SELECT INTERP(current) FROM t1 EVERY(5s) FILL(NEXT); -``` - -- 根据 2017-07-14 17:00:00 到 2017-07-14 20:00:00 间的数据进行从 2017-07-14 18:00:00 到 2017-07-14 19:00:00 间每隔 5 秒钟进行线性插值 +**使用说明**: -``` - taos> SELECT INTERP(current) FROM t1 where ts >= '2017-07-14 17:00:00' and ts <= '2017-07-14 20:00:00' RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR); -``` +- 对于不能支持的类型转换会直接报错。 +- 如果输入值为NULL则输出值也为NULL。 +- 对于类型支持但某些值无法正确转换的情况对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况: + 1)字符串类型转换数值类型时可能出现的无效字符情况,例如"a"可能转为0,但不会报错。 + 2)转换到数值类型时,数值大于type_name可表示的范围时,则会溢出,但不会报错。 + 3)转换到字符串类型时,如果转换后长度超过type_name的长度,则会截断,但不会报错。 -### INTERP [2.3.1 之前的版本] +#### TO_ISO8601 -``` -SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; +```sql +SELECT TO_ISO8601(ts_val | ts_col) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:返回表/超级表的指定时间截面、指定字段的记录。 +**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加客户端时区信息。 -**返回数据类型**:同字段类型。 +**返回结果数据类型**:VARCHAR 类型。 -**应用字段**:数值型字段。 +**适用数据类型**:UNIX 时间戳常量或是 TIMESTAMP 类型的列 **适用于**:表、超级表。 -**使用说明**: - -- 从 2.0.15.0 及以后版本可用 -- INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。此外,条件语句里面可附带筛选条件,例如标签、tbname。 -- INTERP 查询要求查询的时间区间必须位于数据集合(表)的所有记录的时间范围之内。如果给定的时间戳位于时间范围之外,即使有插值指令,仍然不返回结果。 -- 单个 INTERP 函数查询只能够针对一个时间点进行查询,如果需要返回等时间间隔的断面数据,可以通过 INTERP 配合 EVERY 的方式来进行查询处理(而不是使用 INTERVAL),其含义是每隔固定长度的时间进行插值 - -**示例**: - -``` - taos> SELECT INTERP(*) FROM meters WHERE ts='2017-7-14 18:40:00.004'; - interp(ts) | interp(current) | interp(voltage) | interp(phase) | - ========================================================================================== - 2017-07-14 18:40:00.004 | 9.84020 | 216 | 0.32222 | - Query OK, 1 row(s) in set (0.002652s) -``` +**使用说明**: -如果给定的时间戳无对应的数据,在不指定插值生成策略的情况下,不会返回结果,如果指定了插值策略,会根据插值策略返回结果。 +- 如果输入是 UNIX 时间戳常量,返回格式精度由时间戳的位数决定; +- 如果输入是 TIMSTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。 -``` - taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005'; - Query OK, 0 row(s) in set (0.004022s) - taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005' FILL(PREV); - interp(ts) | interp(current) | interp(voltage) | interp(phase) | - ========================================================================================== - 2017-07-14 18:40:00.005 | 9.88150 | 217 | 0.32500 | - Query OK, 1 row(s) in set (0.003056s) -``` +#### TO_JSON -如下所示代码表示在时间区间 `['2017-7-14 18:40:00', '2017-7-14 18:40:00.014']` 中每隔 5 毫秒 进行一次断面计算。 - -``` - taos> SELECT INTERP(current) FROM d636 WHERE ts>='2017-7-14 18:40:00' AND ts<='2017-7-14 18:40:00.014' EVERY(5a); - ts | interp(current) | - ================================================= - 2017-07-14 18:40:00.000 | 10.04179 | - 2017-07-14 18:40:00.010 | 10.16123 | - Query OK, 2 row(s) in set (0.003487s) +```sql +SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause]; ``` -### TAIL +**功能说明**: 将字符串常量转换为 JSON 类型。 -``` -SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; -``` +**返回结果数据类型**: JSON -**功能说明**:返回跳过最后 offset_value 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。 +**适用数据类型**: JSON 字符串,形如 '{ "literal" : literal }'。'{}'表示空值。键必须为字符串字面量,值可以为数值字面量、字符串字面量、布尔字面量或空值字面量。str_literal中不支持转义符。 -**参数范围**:k: [1,100] offset_val: [0,100]。 - -**返回结果数据类型**:同应用的字段。 +**适用于**: 表和超级表 -**应用字段**:适合于除时间主列外的任何类型字段。 +**嵌套子查询支持**:适用于内层查询和外层查询。 -**支持版本**:2.6.0.0 及之后的版本。 -**示例**: +#### TO_UNIXTIMESTAMP +```sql +SELECT TO_UNIXTIMESTAMP(datetime_string | ts_col) FROM { tb_name | stb_name } [WHERE clause]; ``` -taos> select ts,dbig from tail2; - ts | dbig | -================================================== -2021-10-15 00:31:33.000 | 1 | -2021-10-17 00:31:31.000 | NULL | -2021-12-24 00:31:34.000 | 2 | -2022-01-01 08:00:05.000 | 19 | -2022-01-01 08:00:06.000 | NULL | -2022-01-01 08:00:07.000 | 9 | -Query OK, 6 row(s) in set (0.001952s) -taos> select tail(dbig,2,2) from tail2; -ts | tail(dbig,2,2) | -================================================== -2021-12-24 00:31:34.000 | 2 | -2022-01-01 08:00:05.000 | 19 | -Query OK, 2 row(s) in set (0.002307s) -``` +**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。 -### UNIQUE +**返回结果数据类型**:长整型 INT64。 -``` -SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause]; -``` +**应用字段**:字符串常量或是 VARCHAR/NCHAR 类型的列。 -**功能说明**:返回该列的数值首次出现的值。该函数功能与 distinct 相似,但是可以匹配标签和时间戳信息。可以针对除时间列以外的字段进行查询,可以匹配标签和时间戳,其中的标签和时间戳是第一次出现时刻的标签和时间戳。 +**适用于**:表、超级表。 -**返回结果数据类型**:同应用的字段。 +**使用说明**: -**应用字段**:适合于除时间类型以外的字段。 +- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 0。 +- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 -**支持版本**:2.6.0.0 及之后的版本。 -**使用说明**: +### 时间和日期函数 -- 该函数可以应用在普通表和超级表上。不能和窗口操作一起使用,例如 interval/state_window/session_window 。 -- 由于返回数据量未知,考虑到内存因素,为了函数可以正常返回结果,建议不重复的数据量在 10 万级别,否则会报错。 +时间和日期函数对时间戳类型进行操作。 -**示例**: +所有返回当前时间的函数,如NOW、TODAY和TIMEZONE,在一条SQL语句中不论出现多少次都只会被计算一次。 -``` -taos> select ts,voltage from unique1; - ts | voltage | -================================================== -2021-10-17 00:31:31.000 | 1 | -2022-01-24 00:31:31.000 | 1 | -2021-10-17 00:31:31.000 | 1 | -2021-12-24 00:31:31.000 | 2 | -2022-01-01 08:00:01.000 | 19 | -2021-10-17 00:31:31.000 | NULL | -2022-01-01 08:00:02.000 | NULL | -2022-01-01 08:00:03.000 | 9 | -Query OK, 8 row(s) in set (0.003018s) +#### NOW -taos> select unique(voltage) from unique1; -ts | unique(voltage) | -================================================== -2021-10-17 00:31:31.000 | 1 | -2021-10-17 00:31:31.000 | NULL | -2021-12-24 00:31:31.000 | 2 | -2022-01-01 08:00:01.000 | 19 | -2022-01-01 08:00:03.000 | 9 | -Query OK, 5 row(s) in set (0.108458s) +```sql +SELECT NOW() FROM { tb_name | stb_name } [WHERE clause]; +SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW(); +INSERT INTO tb_name VALUES (NOW(), ...); ``` -## 计算函数 - -### DIFF - - ```sql - SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause]; - ``` - -**功能说明**:统计表中某列的值与前一行对应值的差。 ignore_negative 取值为 0|1 , 可以不填,默认值为 0. 不忽略负值。ignore_negative 为 1 时表示忽略负数。 +**功能说明**:返回客户端当前系统时间。 -**返回结果数据类型**:同应用字段。 +**返回结果数据类型**:TIMESTAMP 时间戳类型。 -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 +**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。 **适用于**:表、超级表。 -**使用说明**: - -- 输出结果行数是范围内总行数减一,第一行没有结果输出。 -- 从 2.1.3.0 版本开始,DIFF 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。 -- 从 2.6.0 开始,DIFF 函数支持 ignore_negative 参数 - -**示例**: - - ```sql - taos> SELECT DIFF(current) FROM d1001; - ts | diff(current) | - ================================================= - 2018-10-03 14:38:15.000 | 2.30000 | - 2018-10-03 14:38:16.800 | -0.30000 | - Query OK, 2 row(s) in set (0.001162s) - ``` - -### DERIVATIVE - -``` -SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause]; -``` - -**功能说明**:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。 - -**返回数据类型**:双精度浮点数。 - -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 - -**适用于**:表、超级表 - -**使用说明**: +**使用说明**: -- 从 2.1.3.0 及以后版本可用;输出结果行数是范围内总行数减一,第一行没有结果输出。 -- DERIVATIVE 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。 +- 支持时间加减操作,如 NOW() + 1s, 支持的时间单位如下: + b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 +- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 -**示例**: -``` -taos> select derivative(current, 10m, 0) from t1; - ts | derivative(current, 10m, 0) | -======================================================== - 2021-08-20 10:11:22.790 | 0.500000000 | - 2021-08-20 11:11:22.791 | 0.166666620 | - 2021-08-20 12:11:22.791 | 0.000000000 | - 2021-08-20 13:11:22.792 | 0.166666620 | - 2021-08-20 14:11:22.792 | -0.666666667 | -Query OK, 5 row(s) in set (0.004883s) -``` +#### TIMEDIFF -### SPREAD - -``` -SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; +```sql +SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 | ts_col2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:统计表/超级表中某列的最大值和最小值之差。 +**功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。 -**返回数据类型**:双精度浮点数。 +**返回结果数据类型**:长整型 INT64。 -**应用字段**:不能应用在 binary、nchar、bool 类型字段。 +**应用字段**:UNIX 时间戳,日期时间格式的字符串,或者 TIMESTAMP 类型的列。 **适用于**:表、超级表。 -**使用说明**:可用于 TIMESTAMP 字段,此时表示记录的时间覆盖范围。 - -**示例**: - -``` -taos> SELECT SPREAD(voltage) FROM meters; - spread(voltage) | -============================ - 5.000000000 | -Query OK, 1 row(s) in set (0.001792s) +**使用说明**: +- 支持的时间单位 time_unit 如下: + 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。 +- 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。 -taos> SELECT SPREAD(voltage) FROM d1001; - spread(voltage) | -============================ - 3.000000000 | -Query OK, 1 row(s) in set (0.000836s) -``` -### CEIL +#### TIMETRUNCATE +```sql +SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name | stb_name } [WHERE clause]; ``` -SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**功能说明**:获得指定列的向上取整数的结果。 - -**返回结果类型**:与指定列的原始数据类型一致。例如,如果指定列的原始数据类型为 Float,那么返回的数据类型也为 Float;如果指定列的原始数据类型为 Double,那么返回的数据类型也为 Double。 -**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列,无论 tag 列的类型是什么类型。 - -**适用于**: 普通表、超级表。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**使用说明**: - -- 支持 +、-、\*、/ 运算,如 ceil(col1) + ceil(col2)。 -- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -### FLOOR +**功能说明**:将时间戳按照指定时间单位 time_unit 进行截断。 -``` -SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` +**返回结果数据类型**:TIMESTAMP 时间戳类型。 -**功能说明**:获得指定列的向下取整数的结果。 - 其他使用说明参见 CEIL 函数描述。 +**应用字段**:UNIX 时间戳,日期时间格式的字符串,或者 TIMESTAMP 类型的列。 -### ROUND +**适用于**:表、超级表。 -``` -SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` +**使用说明**: +- 支持的时间单位 time_unit 如下: + 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。 +- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 -**功能说明**:获得指定列的四舍五入的结果。 - 其他使用说明参见 CEIL 函数描述。 -### CSUM +#### TIMEZONE ```sql - SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] +SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause]; ``` - **功能说明**:累加和(Cumulative sum),输出行与输入行数相同。 +**功能说明**:返回客户端当前时区信息。 - **返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。 返回结果中同时带有每行记录对应的时间戳。 +**返回结果数据类型**:VARCHAR 类型。 - **适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在标签之上。 +**应用字段**:无 - **嵌套子查询支持**: 适用于内层查询和外层查询。 +**适用于**:表、超级表。 - **使用说明**: - - - 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。 - - 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 - - 使用在超级表上的时候,需要搭配 Group by tbname使用,将结果强制规约到单个时间线。 -**支持版本**: 从2.3.0.x开始支持 - -### MAVG +#### TODAY ```sql - SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] +SELECT TODAY() FROM { tb_name | stb_name } [WHERE clause]; +SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY()]; +INSERT INTO tb_name VALUES (TODAY(), ...); ``` - **功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。 - - **返回结果类型**: 返回双精度浮点数类型。 - - **适用数据类型**: 不能应用在 timestamp、binary、nchar、bool 类型上;在超级表查询中使用时,不能应用在标签之上。 - - **嵌套子查询支持**: 适用于内层查询和外层查询。 - - **使用说明**: - - - 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); - - 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用; - - 该函数可以应用在普通表和超级表上;使用在超级表上的时候,需要搭配 Group by tbname使用,将结果强制规约到单个时间线。 +**功能说明**:返回客户端当日零时的系统时间。 -**支持版本**: 从2.3.0.x开始支持 +**返回结果数据类型**:TIMESTAMP 时间戳类型。 -### SAMPLE +**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。 -```sql - SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] -``` +**适用于**:表、超级表。 - **功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。 +**使用说明**: - **返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。 +- 支持时间加减操作,如 TODAY() + 1s, 支持的时间单位如下: + b(纳秒),u(微秒),a(毫秒),s(秒),m(分),h(小时),d(天),w(周)。 +- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 - **适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。 - **嵌套子查询支持**: 适用于内层查询和外层查询。 +## 聚合函数 - **使用说明**: - - - 不能参与表达式计算;该函数可以应用在普通表和超级表上; - - 使用在超级表上的时候,需要搭配 Group by tbname 使用,将结果强制规约到单个时间线。 +聚合函数为查询结果集的每一个分组返回单个结果行。可以由 GROUP BY 或窗口切分子句指定分组,如果没有,则整个查询结果集视为一个分组。 -**支持版本**: 从2.3.0.x开始支持 +TDengine 支持针对数据的聚合查询。提供如下聚合函数。 -### ASIN +### AVG -```sql - SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause] +``` +SELECT AVG(field_name) FROM tb_name [WHERE clause]; ``` -**功能说明**:获得指定列的反正弦结果 - -**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL +**功能说明**:统计表/超级表中某列的平均值。 -**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列 +**返回数据类型**:双精度浮点数 Double。 -**嵌套子查询支持**:适用于内层查询和外层查询。 +**适用数据类型**:数值类型。 -**使用说明**: +**适用于**:表、超级表。 -- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -- 该函数可以应用在普通表和超级表上。 -- 版本2.6.0.x后支持 -### ACOS +### COUNT -```sql - SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause] +``` +SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]; ``` -**功能说明**:获得指定列的反余弦结果 +**功能说明**:统计表/超级表中记录行数或某列的非空值个数。 -**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL +**返回数据类型**:长整型 INT64。 -**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列 +**适用数据类型**:应用全部字段。 -**嵌套子查询支持**:适用于内层查询和外层查询。 +**适用于**:表、超级表。 -**使用说明**: +**使用说明**: -- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -- 该函数可以应用在普通表和超级表上。 -- 版本2.6.0.x后支持 +- 可以使用星号(\*)来替代具体的字段,使用星号(\*)返回全部记录数量。 +- 针对同一表的(不包含 NULL 值)字段查询结果均相同。 +- 如果统计对象是具体的列,则返回该列中非 NULL 值的记录数量。 -### ATAN -```sql - SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause] +### ELAPSED + +```mysql +SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; ``` -**功能说明**:获得指定列的反正切结果 +**功能说明**:elapsed函数表达了统计周期内连续的时间长度,和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有INTERVAL子句,则返回整个给定时间范围内的有数据覆盖的时间范围。注意,ELAPSED返回的并不是时间范围的绝对值,而是绝对值除以time_unit所得到的单位个数。 -**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL +**返回结果类型**:Double -**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列 +**适用数据类型**:Timestamp类型 -**嵌套子查询支持**:适用于内层查询和外层查询。 +**支持的版本**:2.6.0.0 及以后的版本。 -**使用说明**: +**适用于**: 表,超级表,嵌套查询的外层查询 -- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -- 该函数可以应用在普通表和超级表上。 -- 版本2.6.0.x后支持 +**说明**: +- field_name参数只能是表的第一列,即timestamp主键列。 +- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit参数未指定时,以数据库的时间分辨率为时间单位。 +- 可以和interval组合使用,返回每个时间窗口的时间戳差值。需要特别注意的是,除第一个时间窗口和最后一个时间窗口外,中间窗口的时间戳差值均为窗口长度。 +- order by asc/desc不影响差值的计算结果。 +- 对于超级表,需要和group by tbname子句组合使用,不可以直接使用。 +- 对于普通表,不支持和group by子句组合使用。 +- 对于嵌套查询,仅当内层查询会输出隐式时间戳列时有效。例如select elapsed(ts) from (select diff(value) from sub1)语句,diff函数会让内层查询输出隐式时间戳列,此为主键列,可以用于elapsed函数的第一个参数。相反,例如select elapsed(ts) from (select * from sub1) 语句,ts列输出到外层时已经没有了主键列的含义,无法使用elapsed函数。此外,elapsed函数作为一个与时间线强依赖的函数,形如select elapsed(ts) from (select diff(value) from st group by tbname)尽管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。 +- 不支持与leastsquares、diff、derivative、top、bottom、last_row、interp等函数混合使用。 -### SIN +### LEASTSQUARES -```sql - SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause] +``` +SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]; ``` -**功能说明**:获得指定列的正弦结果 - -**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL +**功能说明**:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。 -**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列 +**返回数据类型**:字符串表达式(斜率, 截距)。 -**嵌套子查询支持**:适用于内层查询和外层查询。 +**适用数据类型**:field_name 必须是数值类型。 -**使用说明**: +**适用于**:表。 -- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -- 该函数可以应用在普通表和超级表上。 -- 版本2.6.0.x后支持 -### COS +### MODE -```sql - SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause] +``` +SELECT MODE(field_name) FROM tb_name [WHERE clause]; ``` -**功能说明**:获得指定列的余弦结果 - -**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL +**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,输出空。 -**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列 +**返回数据类型**:同应用的字段。 -**嵌套子查询支持**:适用于内层查询和外层查询。 +**适用数据类型**: 数值类型。 -**使用说明**: +**适用于**:表和超级表。 -- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -- 该函数可以应用在普通表和超级表上。 -- 版本2.6.0.x后支持 -### TAN +### SPREAD -```sql - SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause] +``` +SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:获得指定列的正切结果 - -**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL +**功能说明**:统计表/超级表中某列的最大值和最小值之差。 -**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列 +**返回数据类型**:双精度浮点数。 -**嵌套子查询支持**:适用于内层查询和外层查询。 +**适用数据类型**:数值类型或TIMESTAMP类型。 -**使用说明**: +**适用于**:表和超级表。 -- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -- 该函数可以应用在普通表和超级表上。 -- 版本2.6.0.x后支持 -### POW +### STDDEV -```sql - SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause] +``` +SELECT STDDEV(field_name) FROM tb_name [WHERE clause]; ``` -**功能说明**:获得指定列的指数为 power 的幂 - -**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL +**功能说明**:统计表中某列的均方差。 -**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列 +**返回数据类型**:双精度浮点数 Double。 -**嵌套子查询支持**:适用于内层查询和外层查询。 +**适用数据类型**:数值类型。 -**使用说明**: +**适用于**:表和超级表。 -- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -- 该函数可以应用在普通表和超级表上。 -- 版本2.6.0.x后支持 -### LOG +### SUM -```sql - SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause] +``` +SELECT SUM(field_name) FROM tb_name [WHERE clause]; ``` -**功能说明**:获得指定列对于底数 base 的对数 - -**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL +**功能说明**:统计表/超级表中某列的和。 -**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列 +**返回数据类型**:双精度浮点数 Double 和长整型 INT64。 -**嵌套子查询支持**:适用于内层查询和外层查询。 +**适用数据类型**:数值类型。 -**使用说明**: +**适用于**:表和超级表。 -- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -- 该函数可以应用在普通表和超级表上。 -- 版本2.6.0.x后支持 -### ABS +### HYPERLOGLOG -```sql - SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause] +``` +SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:获得指定列的绝对值 - -**返回结果类型**:如果输入值为整数,输出值是 UBIGINT 类型。如果输入值是 FLOAT/DOUBLE 数据类型,输出值是 DOUBLE 数据类型。 +**功能说明**: + - 采用 hyperloglog 算法,返回某列的基数。该算法在数据量很大的情况下,可以明显降低内存的占用,但是求出来的基数是个估算值,标准误差(标准误差是多次实验,每次的平均数的标准差,不是与真实结果的误差)为 0.81%。 + - 在数据量较少的时候该算法不是很准确,可以使用 select count(data) from (select unique(col) as data from table) 的方法。 -**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列 +**返回结果类型**:整形。 -**嵌套子查询支持**:适用于内层查询和外层查询。 +**适用数据类型**:任何类型。 -**使用说明**: +**适用于**:表和超级表。 -- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -- 该函数可以应用在普通表和超级表上。 -- 版本2.6.0.x后支持 -### SQRT +### HISTOGRAM -```sql - SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause] +``` +SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause]; ``` -**功能说明**:获得指定列的平方根 - -**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL - -**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上;在超级表查询中使用时,不能应用在 tag 列 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**使用说明**: - -- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 -- 该函数可以应用在普通表和超级表上。 -- 版本2.6.0.x后支持 - -### CAST +**功能说明**:统计数据按照用户指定区间的分布。 -```sql - SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] -``` +**返回结果类型**:如归一化参数 normalized 设置为 1,返回结果为双精度浮点类型 DOUBLE,否则为长整形 INT64。 -**功能说明**:数据类型转换函数,输入参数 expression 支持普通列、常量、标量函数及它们之间的四则运算,不支持 tag 列,只适用于 select 子句中。 +**适用数据类型**:数值型字段。 -**返回结果类型**:CAST 中指定的类型(type_name)。 +**适用于**: 表和超级表。 -**适用数据类型**: +**详细说明**: +1. bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。 +2. bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): + - "user_input": "[1, 3, 5, 7]" + 用户指定 bin 的具体数值。 + + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" + "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点, + 生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。 + + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" + "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点, + 生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。 +3. normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。 -- 输入参数 expression 的类型可以是除 JSON 外目前所有类型字段(BOOL/TINYINT/SMALLINT/INT/BIGINT/FLOAT/DOUBLE/BINARY(M)/TIMESTAMP/NCHAR(M)/TINYINT UNSIGNED/SMALLINT UNSIGNED/INT UNSIGNED/BIGINT UNSIGNED); -- 输出目标类型只支持 BIGINT/BINARY(N)/TIMESTAMP/NCHAR(N)/BIGINT UNSIGNED。 -**使用说明**: +## 选择函数 -- 对于不能支持的类型转换会直接报错。 -- 如果输入值为NULL则输出值也为NULL。 -- 对于类型支持但某些值无法正确转换的情况对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况: - 1)BINARY/NCHAR转BIGINT/BIGINT UNSIGNED时可能出现的无效字符情况,例如"a"可能转为0。 - 2)有符号数或TIMESTAMP转BIGINT UNSIGNED可能遇到的溢出问题。 - 3)BIGINT UNSIGNED转BIGINT可能遇到的溢出问题。 - 4)FLOAT/DOUBLE转BIGINT/BIGINT UNSIGNED可能遇到的溢出问题。 -- 版本2.6.0.x后支持 +选择函数根据语义在查询结果集中选择一行或多行结果返回。用户可以同时指定输出 ts 列或其他列(包括 tbname 和标签列),这样就可以方便地知道被选出的值是源于哪个数据行的。 -### CONCAT +### APERCENTILE -```sql - SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] +``` +SELECT APERCENTILE(field_name, P[, algo_type]) +FROM { tb_name | stb_name } [WHERE clause] ``` -**功能说明**:字符串连接函数。 - -**返回结果类型**:同输入参数类型,BINARY 或者 NCHAR。 +**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。 -**适用数据类型**:输入参数或者全部是 BINARY 格式的字符串或者列,或者全部是 NCHAR 格式的字符串或者列。不能应用在 TAG 列。 +**返回数据类型**: 双精度浮点数 Double。 -**使用说明**: +**适用数据类型**:数值类型。P值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。如果不指定 algo_type 则使用默认算法 。 -- 如果输入值为NULL,输出值为NULL。 -- 该函数最小参数个数为2个,最大参数个数为8个。 -- 该函数可以应用在普通表和超级表上。 -- 该函数适用于内层查询和外层查询。 -- 版本2.6.0.x后支持 +**适用于**:表、超级表。 -### CONCAT_WS +### BOTTOM ``` - SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] +SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:带分隔符的字符串连接函数。 +**功能说明**:统计表/超级表中某列的值最小 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 -**返回结果类型**:同输入参数类型,BINARY 或者 NCHAR。 +**返回数据类型**:同应用的字段。 -**适用数据类型**:输入参数或者全部是 BINARY 格式的字符串或者列,或者全部是 NCHAR 格式的字符串或者列。不能应用在 TAG 列。 +**适用数据类型**:数值类型。 -**使用说明**: +**适用于**:表和超级表。 -- 如果separator值为NULL,输出值为NULL。如果separator值不为NULL,其他输入为NULL,输出为空串 -- 该函数最小参数个数为3个,最大参数个数为9个。 -- 该函数可以应用在普通表和超级表上。 -- 该函数适用于内层查询和外层查询。 -- 版本2.6.0.x后支持 +**使用说明**: + +- *k*值取值范围 1≤*k*≤100; +- 系统同时返回该记录关联的时间戳列; +- 限制:BOTTOM 函数不支持 FILL 子句。 -### LENGTH +### FIRST ``` - SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] +SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:以字节计数的字符串长度。 +**功能说明**:统计表/超级表中某列的值最先写入的非 NULL 值。 -**返回结果类型**:INT。 +**返回数据类型**:同应用的字段。 -**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。 +**适用数据类型**:所有字段。 -**使用说明** +**适用于**:表和超级表。 -- 如果输入值为NULL,输出值为NULL。 -- 该函数可以应用在普通表和超级表上。 -- 函数适用于内层查询和外层查询。 -- 版本2.6.0.x后支持 +**使用说明**: -### CHAR_LENGTH +- 如果要返回各个列的首个(时间戳最小)非 NULL 值,可以使用 FIRST(\*); +- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL; +- 如果结果集中所有列全部为 NULL 值,则不返回结果。 + +### INTERP ``` - SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] +SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; ``` -**功能说明**:以字符计数的字符串长度。 +**功能说明**:返回指定时间截面指定列的记录值或插值。 -**返回结果类型**:INT。 +**返回数据类型**:同字段类型。 -**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。 +**适用数据类型**:数值类型。 + +**适用于**:表、超级表。 **使用说明** -- 如果输入值为NULL,输出值为NULL。 -- 该函数可以应用在普通表和超级表上。 -- 该函数适用于内层查询和外层查询。 -- 版本2.6.0.x后支持 +- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。 +- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。 +- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1<=timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。如果没有指定 RANGE,那么满足过滤条件的输入数据中第一条记录的 timestamp 即为 timestamp1,最后一条记录的 timestamp 即为 timestamp2,同样也满足 timestamp1 <= timestamp2。 +- INTERP 根据 EVERY 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(EVERY 值)进行插值。如果没有指定 EVERY,则默认窗口大小为无穷大,即从 timestamp1 开始只有一个窗口。 +- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值,如果没有 FILL 字段则默认不插值,即输出为原始记录值或不输出(原始记录不存在)。 +- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 group by tbname 一起使用,当作用嵌套查询外层时内层子查询不能含 GROUP BY 信息。 +- INTERP 的插值结果不受 ORDER BY timestamp 的影响,ORDER BY timestamp 只影响输出结果的排序。 -### LOWER +### LAST ``` - SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause] +SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:将字符串参数值转换为全小写字母。 +**功能说明**:统计表/超级表中某列的值最后写入的非 NULL 值。 -**返回结果类型**:同输入类型。 +**返回数据类型**:同应用的字段。 -**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。 +**适用数据类型**:所有字段。 -**使用说明**: +**适用于**:表和超级表。 + +**使用说明**: + +- 如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*); +- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。 +- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 -- 如果输入值为NULL,输出值为NULL。 -- 该函数可以应用在普通表和超级表上。 -- 该函数适用于内层查询和外层查询。 -- 版本2.6.0.x后支持 -### UPPER +### LAST_ROW ``` - SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause] +SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; ``` -**功能说明**:将字符串参数值转换为全大写字母。 +**功能说明**:返回表/超级表的最后一条记录。 + +**返回数据类型**:同应用的字段。 -**返回结果类型**:同输入类型。 +**适用数据类型**:所有字段。 -**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。 +**适用于**:表和超级表。 **使用说明**: -- 如果输入值为NULL,输出值为NULL。 -- 该函数可以应用在普通表和超级表上。 -- 该函数适用于内层查询和外层查询。 -- 版本2.6.0.x后支持 +- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 +- 不能与 INTERVAL 一起使用。 -### LTRIM +### MAX ``` - SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] +SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:返回清除左边空格后的字符串。 +**功能说明**:统计表/超级表中某列的值最大值。 -**返回结果类型**:同输入类型。 +**返回数据类型**:同应用的字段。 -**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。 +**适用数据类型**:数值类型。 -**使用说明**: +**适用于**:表和超级表。 -- 如果输入值为NULL,输出值为NULL。 -- 该函数可以应用在普通表和超级表上。 -- 该函数适用于内层查询和外层查询。 -- 版本2.6.0.x后支持 -### RTRIM +### MIN ``` - SELECT RTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] +SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; ``` -**功能说明**:返回清除右边空格后的字符串。 +**功能说明**:统计表/超级表中某列的值最小值。 -**返回结果类型**:同输入类型。 +**返回数据类型**:同应用的字段。 -**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。 +**适用数据类型**:数值类型。 -**使用说明**: +**适用于**:表和超级表。 -- 如果输入值为NULL,输出值为NULL。 -- 该函数可以应用在普通表和超级表上。 -- 该函数适用于内层查询和外层查询。 -- 版本2.6.0.x后支持 -### SUBSTR +### PERCENTILE ``` - SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause] +SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause]; ``` -**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。 +**功能说明**:统计表中某列的值百分比分位数。 + +**返回数据类型**: 双精度浮点数 Double。 -**返回结果类型**:同输入类型。 +**应用字段**:数值类型。 -**适用数据类型**:输入参数是 BINARY 类型或者 NCHAR 类型的字符串或者列。不能应用在 TAG 列。 +**适用于**:表。 -**使用说明**: +**使用说明**:*P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX。 -- 如果输入值为NULL,输出值为NULL。 -- 输入参数pos可以为正数,也可以为负数。如果pos是正数,表示开始位置从字符串开头正数计算。如果pos为负数,表示开始位置从字符串结尾倒数计算。如果输入参数len被忽略,返回的子串包含从pos开始的整个字串。 -- 该函数可以应用在普通表和超级表上。 -- 该函数适用于内层查询和外层查询。 -- 版本2.6.0.x后支持 -### 四则运算 +### TAIL ``` -SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause]; +SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; ``` -**功能说明**:统计表/超级表中某列或多列间的值加、减、乘、除、取余计算结果。 +**功能说明**:返回跳过最后 offset_val 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。 -**返回数据类型**:双精度浮点数。 +**参数范围**:k: [1,100] offset_val: [0,100]。 -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 +**返回数据类型**:同应用的字段。 + +**适用数据类型**:适合于除时间主列外的任何类型。 **适用于**:表、超级表。 -**使用说明**: -- 支持两列或多列之间进行计算,可使用括号控制计算优先级; -- NULL 字段不参与计算,如果参与计算的某行中包含 NULL,该行的计算结果为 NULL。 +### TOP ``` -taos> SELECT current + voltage * phase FROM d1001; -(current+(voltage*phase)) | -============================ - 78.190000713 | - 84.540003240 | - 80.810000718 | -Query OK, 3 row(s) in set (0.001046s) +SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` -### STATECOUNT +**功能说明**: 统计表/超级表中某列的值最大 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 -``` -SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause]; -``` +**返回数据类型**:同应用的字段。 -**功能说明**:返回满足某个条件的连续记录的个数,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加 1,条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。 +**适用数据类型**:数值类型。 -**参数范围**: +**适用于**:表、超级表。 -- oper : LT (小于)、GT(大于)、LE(小于等于)、GE(大于等于)、NE(不等于)、EQ(等于),不区分大小写。 -- val : 数值型 +**使用说明**: -**返回结果类型**:整形。 +- *k*值取值范围 1≤*k*≤100; +- 系统同时返回该记录关联的时间戳列; +- 限制:TOP 函数不支持 FILL 子句。 -**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上。 +### UNIQUE -**嵌套子查询支持**:不支持应用在子查询上。 +``` +SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause]; +``` -**支持的版本**:2.6 开始的版本。 +**功能说明**:返回该列的数值首次出现的值。该函数功能与 distinct 相似,但是可以匹配标签和时间戳信息。可以针对除时间列以外的字段进行查询,可以匹配标签和时间戳,其中的标签和时间戳是第一次出现时刻的标签和时间戳。 -**使用说明**: +**返回数据类型**:同应用的字段。 -- 该函数可以应用在普通表上,在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname) -- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 +**适用数据类型**:适合于除时间类型以外的字段。 -**示例**: +**适用于**: 表和超级表。 -``` -taos> select ts,dbig from statef2; - ts | dbig | -======================================================== -2021-10-15 00:31:33.000000000 | 1 | -2021-10-17 00:31:31.000000000 | NULL | -2021-12-24 00:31:34.000000000 | 2 | -2022-01-01 08:00:05.000000000 | 19 | -2022-01-01 08:00:06.000000000 | NULL | -2022-01-01 08:00:07.000000000 | 9 | -Query OK, 6 row(s) in set (0.002977s) -taos> select stateCount(dbig,GT,2) from statef2; -ts | dbig | statecount(dbig,gt,2) | -================================================================================ -2021-10-15 00:31:33.000000000 | 1 | -1 | -2021-10-17 00:31:31.000000000 | NULL | NULL | -2021-12-24 00:31:34.000000000 | 2 | -1 | -2022-01-01 08:00:05.000000000 | 19 | 1 | -2022-01-01 08:00:06.000000000 | NULL | NULL | -2022-01-01 08:00:07.000000000 | 9 | 2 | -Query OK, 6 row(s) in set (0.002791s) -``` +## 时序数据特有函数 -### STATEDURATION +时序数据特有函数是 TDengine 为了满足时序数据的查询场景而量身定做出来的。在通用数据库中,实现类似功能通常需要复杂的查询语法,且效率很低。TDengine 以函数的方式内置了这些功能,最大程度的减轻了用户的使用成本。 + +### CSUM ```sql -SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [WHERE clause]; + SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**功能说明**:返回满足某个条件的连续记录的时间长度,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加上两个记录之间的时间长度(第一个满足条件的记录时间长度记为 0),条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。 - -**参数范围**: - -- oper : LT (小于)、GT(大于)、LE(小于等于)、GE(大于等于)、NE(不等于)、EQ(等于),不区分大小写。 -- val : 数值型 -- unit : 时间长度的单位,范围[1s、1m、1h ],不足一个单位舍去。默认为 1s。 +**功能说明**:累加和(Cumulative sum),输出行与输入行数相同。 -**返回结果类型**:整形。 +**返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。 返回结果中同时带有每行记录对应的时间戳。 -**适用数据类型**:不能应用在 timestamp、binary、nchar、bool 类型字段上。 +**适用数据类型**:数值类型。 -**嵌套子查询支持**:不支持应用在子查询上。 +**嵌套子查询支持**: 适用于内层查询和外层查询。 -**支持的版本**:2.6 开始的版本。 +**适用于**:表和超级表 -**使用说明**: +**使用说明**: + + - 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。 + - 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 + - 使用在超级表上的时候,需要搭配 Group by tbname使用,将结果强制规约到单个时间线。 -- 该函数可以应用在普通表上,在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname) -- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 -**示例**: +### DERIVATIVE ``` -taos> select ts,dbig from statef2; - ts | dbig | -======================================================== -2021-10-15 00:31:33.000000000 | 1 | -2021-10-17 00:31:31.000000000 | NULL | -2021-12-24 00:31:34.000000000 | 2 | -2022-01-01 08:00:05.000000000 | 19 | -2022-01-01 08:00:06.000000000 | NULL | -2022-01-01 08:00:07.000000000 | 9 | -Query OK, 6 row(s) in set (0.002407s) - -taos> select stateDuration(dbig,GT,2) from statef2; -ts | dbig | stateduration(dbig,gt,2) | -=================================================================================== -2021-10-15 00:31:33.000000000 | 1 | -1 | -2021-10-17 00:31:31.000000000 | NULL | NULL | -2021-12-24 00:31:34.000000000 | 2 | -1 | -2022-01-01 08:00:05.000000000 | 19 | 0 | -2022-01-01 08:00:06.000000000 | NULL | NULL | -2022-01-01 08:00:07.000000000 | 9 | 2 | -Query OK, 6 row(s) in set (0.002613s) +SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause]; ``` -## 时间函数 - -从 2.6.0.0 版本开始,TDengine 查询引擎支持以下时间相关函数: +**功能说明**:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。 -### NOW +**返回数据类型**:双精度浮点数。 -```sql -SELECT NOW() FROM { tb_name | stb_name } [WHERE clause]; -SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW(); -INSERT INTO tb_name VALUES (NOW(), ...); -``` +**适用数据类型**:数值类型。 -**功能说明**:返回客户端当前系统时间。 +**适用于**:表、超级表 -**返回结果数据类型**:TIMESTAMP 时间戳类型。 +**使用说明**: DERIVATIVE 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。 -**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。 -**适用于**:表、超级表。 +### DIFF -**使用说明**: + ```sql + SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause]; + ``` -- 支持时间加减操作,如 NOW() + 1s, 支持的时间单位如下: - b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 -- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 +**功能说明**:统计表中某列的值与前一行对应值的差。 ignore_negative 取值为 0|1 , 可以不填,默认值为 0. 不忽略负值。ignore_negative 为 1 时表示忽略负数。 -**示例**: +**返回数据类型**:同应用字段。 -```sql -taos> SELECT NOW() FROM meters; - now() | -========================== - 2022-02-02 02:02:02.456 | -Query OK, 1 row(s) in set (0.002093s) +**适用数据类型**:数值类型。 -taos> SELECT NOW() + 1h FROM meters; - now() + 1h | -========================== - 2022-02-02 03:02:02.456 | -Query OK, 1 row(s) in set (0.002093s) +**适用于**:表、超级表。 -taos> SELECT COUNT(voltage) FROM d1001 WHERE ts < NOW(); - count(voltage) | -============================= - 5 | -Query OK, 5 row(s) in set (0.004475s) +**使用说明**: 输出结果行数是范围内总行数减一,第一行没有结果输出。 -taos> INSERT INTO d1001 VALUES (NOW(), 10.2, 219, 0.32); -Query OK, 1 of 1 row(s) in database (0.002210s) -``` -### TODAY +### IRATE -```sql -SELECT TODAY() FROM { tb_name | stb_name } [WHERE clause]; -SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY()]; -INSERT INTO tb_name VALUES (TODAY(), ...); +``` +SELECT IRATE(field_name) FROM tb_name WHERE clause; ``` -**功能说明**:返回客户端当日零时的系统时间。 +**功能说明**:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。 -**返回结果数据类型**:TIMESTAMP 时间戳类型。 +**返回数据类型**:双精度浮点数 Double。 -**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。 +**适用数据类型**:数值类型。 **适用于**:表、超级表。 -**使用说明**: +### MAVG -- 支持时间加减操作,如 TODAY() + 1s, 支持的时间单位如下: - b(纳秒),u(微秒),a(毫秒),s(秒),m(分),h(小时),d(天),w(周)。 -- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 +```sql + SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] +``` -**示例**: + **功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。 -```sql -taos> SELECT TODAY() FROM meters; - today() | -========================== - 2022-02-02 00:00:00.000 | -Query OK, 1 row(s) in set (0.002093s) + **返回结果类型**: 返回双精度浮点数类型。 + + **适用数据类型**: 数值类型。 -taos> SELECT TODAY() + 1h FROM meters; - today() + 1h | -========================== - 2022-02-02 01:00:00.000 | -Query OK, 1 row(s) in set (0.002093s) + **嵌套子查询支持**: 适用于内层查询和外层查询。 -taos> SELECT COUNT(voltage) FROM d1001 WHERE ts < TODAY(); - count(voltage) | -============================= - 5 | -Query OK, 5 row(s) in set (0.004475s) + **适用于**:表和超级表 -taos> INSERT INTO d1001 VALUES (TODAY(), 10.2, 219, 0.32); -Query OK, 1 of 1 row(s) in database (0.002210s) -``` + **使用说明**: + + - 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); + - 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用; + - 使用在超级表上的时候,需要搭配 Group by tbname使用,将结果强制规约到单个时间线。 -### TIMEZONE +### SAMPLE ```sql -SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause]; + SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] ``` -**功能说明**:返回客户端当前时区信息。 + **功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。 -**返回结果数据类型**:BINARY 类型。 + **返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。 -**应用字段**:无 + **适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。 -**适用于**:表、超级表。 + **嵌套子查询支持**: 适用于内层查询和外层查询。 -**示例**: + **适用于**:表和超级表 -```sql -taos> SELECT TIMEZONE() FROM meters; - timezone() | -================================= - UTC (UTC, +0000) | -Query OK, 1 row(s) in set (0.002093s) -``` + **使用说明**: + + - 不能参与表达式计算;该函数可以应用在普通表和超级表上; + - 使用在超级表上的时候,需要搭配 Group by tbname 使用,将结果强制规约到单个时间线。 -### TO_ISO8601 +### STATECOUNT -```sql -SELECT TO_ISO8601(ts_val | ts_col) FROM { tb_name | stb_name } [WHERE clause]; +``` +SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加客户端时区信息。 +**功能说明**:返回满足某个条件的连续记录的个数,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加 1,条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。 -**返回结果数据类型**:BINARY 类型。 +**参数范围**: -**应用字段**:UNIX 时间戳常量或是 TIMESTAMP 类型的列 +- oper : LT (小于)、GT(大于)、LE(小于等于)、GE(大于等于)、NE(不等于)、EQ(等于),不区分大小写。 +- val : 数值型 -**适用于**:表、超级表。 +**返回结果类型**:整形。 -**使用说明**: +**适用数据类型**:数值类型。 -- 如果输入是 UNIX 时间戳常量,返回格式精度由时间戳的位数决定; -- 如果输入是 TIMSTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。 +**嵌套子查询支持**:不支持应用在子查询上。 -**示例**: +**适用于**:表和超级表。 -```sql -taos> SELECT TO_ISO8601(1643738400) FROM meters; - to_iso8601(1643738400) | -============================== - 2022-02-02T02:00:00+0800 | +**使用说明**: -taos> SELECT TO_ISO8601(ts) FROM meters; - to_iso8601(ts) | -============================== - 2022-02-02T02:00:00+0800 | - 2022-02-02T02:00:00+0800 | - 2022-02-02T02:00:00+0800 | -``` +- 该函数可以应用在普通表上,在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname) +- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 -### TO_UNIXTIMESTAMP + +### STATEDURATION ```sql -SELECT TO_UNIXTIMESTAMP(datetime_string | ts_col) FROM { tb_name | stb_name } [WHERE clause]; +SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。 +**功能说明**:返回满足某个条件的连续记录的时间长度,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加上两个记录之间的时间长度(第一个满足条件的记录时间长度记为 0),条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。 -**返回结果数据类型**:长整型 INT64。 +**参数范围**: + +- oper : LT (小于)、GT(大于)、LE(小于等于)、GE(大于等于)、NE(不等于)、EQ(等于),不区分大小写。 +- val : 数值型 +- unit : 时间长度的单位,范围[1s、1m、1h ],不足一个单位舍去。默认为 1s。 -**应用字段**:字符串常量或是 BINARY/NCHAR 类型的列。 +**返回结果类型**:整形。 -**适用于**:表、超级表。 +**适用数据类型**:数值类型。 -**使用说明**: +**嵌套子查询支持**:不支持应用在子查询上。 -- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 0。 -- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 +**适用于**:表和超级表。 -**示例**: +**使用说明**: -```sql -taos> SELECT TO_UNIXTIMESTAMP("2022-02-02T02:00:00.000Z") FROM meters; -to_unixtimestamp("2022-02-02T02:00:00.000Z") | -============================================== - 1643767200000 | +- 该函数可以应用在普通表上,在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname) +- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 -taos> SELECT TO_UNIXTIMESTAMP(col_binary) FROM meters; - to_unixtimestamp(col_binary) | -======================================== - 1643767200000 | - 1643767200000 | - 1643767200000 | -``` -### TIMETRUNCATE +### TWA -```sql -SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name | stb_name } [WHERE clause]; +``` +SELECT TWA(field_name) FROM tb_name WHERE clause; ``` -**功能说明**:将时间戳按照指定时间单位 time_unit 进行截断。 +**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。 -**返回结果数据类型**:TIMESTAMP 时间戳类型。 +**返回数据类型**:双精度浮点数 Double。 -**应用字段**:UNIX 时间戳,日期时间格式的字符串,或者 TIMESTAMP 类型的列。 +**适用数据类型**:数值类型。 **适用于**:表、超级表。 -**使用说明**: -- 支持的时间单位 time_unit 如下: - 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。 -- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 +**使用说明**: TWA 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。 -**示例**: -```sql -taos> SELECT TIMETRUNCATE(1643738522000, 1h) FROM meters; - timetruncate(1643738522000, 1h) | -=================================== - 2022-02-02 02:00:00.000 | -Query OK, 1 row(s) in set (0.001499s) +## 系统信息函数 -taos> SELECT TIMETRUNCATE("2022-02-02 02:02:02", 1h) FROM meters; - timetruncate("2022-02-02 02:02:02", 1h) | -=========================================== - 2022-02-02 02:00:00.000 | -Query OK, 1 row(s) in set (0.003903s) +### DATABASE -taos> SELECT TIMETRUNCATE(ts, 1h) FROM meters; - timetruncate(ts, 1h) | -========================== - 2022-02-02 02:00:00.000 | - 2022-02-02 02:00:00.000 | - 2022-02-02 02:00:00.000 | -Query OK, 3 row(s) in set (0.003903s) +``` +SELECT DATABASE(); ``` -### TIMEDIFF +**说明**:返回当前登录的数据库。如果登录的时候没有指定默认数据库,且没有使用USE命令切换数据库,则返回NULL。 -```sql -SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 | ts_col2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause]; -``` -**功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。 +### CLIENT_VERSION -**返回结果数据类型**:长整型 INT64。 +``` +SELECT CLIENT_VERSION(); +``` -**应用字段**:UNIX 时间戳,日期时间格式的字符串,或者 TIMESTAMP 类型的列。 +**说明**:返回客户端版本。 -**适用于**:表、超级表。 +### SERVER_VERSION -**使用说明**: -- 支持的时间单位 time_unit 如下: - 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。 -- 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。 +``` +SELECT SERVER_VERSION(); +``` -**示例**: +**说明**:返回服务端版本。 + +### SERVER_STATUS -```sql -taos> SELECT TIMEDIFF(1643738400000, 1643742000000) FROM meters; - timediff(1643738400000, 1643742000000) | -========================================= - 3600000 | -Query OK, 1 row(s) in set (0.002553s) -taos> SELECT TIMEDIFF(1643738400000, 1643742000000, 1h) FROM meters; - timediff(1643738400000, 1643742000000, 1h) | -============================================= - 1 | -Query OK, 1 row(s) in set (0.003726s) - -taos> SELECT TIMEDIFF("2022-02-02 03:00:00", "2022-02-02 02:00:00", 1h) FROM meters; - timediff("2022-02-02 03:00:00", "2022-02-02 02:00:00", 1h) | -============================================================= - 1 | -Query OK, 1 row(s) in set (0.001937s) - -taos> SELECT TIMEDIFF(ts_col1, ts_col2, 1h) FROM meters; - timediff(ts_col1, ts_col2, 1h) | -=================================== - 1 | -Query OK, 1 row(s) in set (0.001937s) ``` +SELECT SERVER_VERSION(); +``` + +**说明**:返回服务端当前的状态。 diff --git a/docs-cn/12-taos-sql/08-interval.md b/docs-cn/12-taos-sql/08-interval.md index 7c796e0046c5a740d393d71861828eb30bb3a5cc..b0619ea5ce3759e9bca1234b76e2a16176511547 100644 --- a/docs-cn/12-taos-sql/08-interval.md +++ b/docs-cn/12-taos-sql/08-interval.md @@ -11,7 +11,7 @@ TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如 INTERVAL 子句用于产生相等时间周期的窗口,SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window )大小和每次前向增量时间(forward sliding times)。如图,[t0s, t0e] ,[t1s , t1e], [t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。 -![时间窗口示意图](./timewindow-1.webp) +![TDengine Database 时间窗口示意图](./timewindow-1.webp) INTERVAL 和 SLIDING 子句需要配合聚合和选择函数来使用。以下 SQL 语句非法: @@ -33,7 +33,7 @@ _ 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用) -![时间窗口示意图](./timewindow-3.webp) +![TDengine Database 时间窗口示意图](./timewindow-3.webp) 使用 STATE_WINDOW 来确定状态窗口划分的列。例如: @@ -45,7 +45,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); 会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30]和[2019-04-28 14:23:10,2019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒,超过了连续时间间隔(12 秒)。 -![时间窗口示意图](./timewindow-2.webp) +![TDengine Database 时间窗口示意图](./timewindow-2.webp) 在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用) diff --git a/docs-cn/12-taos-sql/09-limit.md b/docs-cn/12-taos-sql/09-limit.md index 3c86a3862174377e6a00d046fb69627c773fe76e..7673e24a83cc1ba5335b11f29803cf9f3eae26e5 100644 --- a/docs-cn/12-taos-sql/09-limit.md +++ b/docs-cn/12-taos-sql/09-limit.md @@ -7,9 +7,9 @@ title: 边界限制 - 数据库名最大长度为 32。 - 表名最大长度为 192,不包括数据库名前缀和分隔符 -- 每行数据最大长度 16k 个字符, 从 2.1.7.0 版本开始,每行数据最大长度 48k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 +- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 - 列名最大长度为 64,最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。注:从 2.1.7.0 版本(不含)以前最多允许 4096 列 -- 标签名最大长度为 64,最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16k 个字符。 +- 标签名最大长度为 64,最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB 。 - SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576。 - SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。注: 2.1.7.0 版本(不含)之前为最多允许 1024 列 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。 diff --git a/docs-cn/12-taos-sql/12-keywords/index.md b/docs-cn/12-taos-sql/12-keywords.md similarity index 71% rename from docs-cn/12-taos-sql/12-keywords/index.md rename to docs-cn/12-taos-sql/12-keywords.md index 608d4e080967cfd97072706cf0963ae669960be6..5c68e5da7e8c537e7514c5f9cfba43084d72189b 100644 --- a/docs-cn/12-taos-sql/12-keywords/index.md +++ b/docs-cn/12-taos-sql/12-keywords.md @@ -23,17 +23,17 @@ title: TDengine 参数限制与保留关键字 去掉了 `` ‘“`\ `` (单双引号、撇号、反斜杠、空格) - 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符 -- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符 -- 表的列名:不能包含特殊字符,不能超过 64 个字符 +- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字节 ,每行数据最大长度 48KB +- 表的列名:不能包含特殊字符,不能超过 64 个字节 - 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线” - 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列) -- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置) -- 单条 SQL 语句默认最大字符串长度:1048576 byte,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 byte +- 记录的最大长度:包括时间戳 8 字节,不能超过 48KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 字节 的存储位置) +- 单条 SQL 语句默认最大字符串长度:1048576 字节,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 字节 - 数据库副本数:不能超过 3 -- 用户名:不能超过 23 个 byte -- 用户密码:不能超过 15 个 byte +- 用户名:不能超过 23 个 字节 +- 用户密码:不能超过 15 个 字节 - 标签(Tags)数量:不能超过 128 个,可以 0 个 -- 标签的总长度:不能超过 16K byte +- 标签的总长度:不能超过 16KB - 记录条数:仅受存储空间限制 - 表的个数:仅受节点个数限制 - 库的个数:仅受节点个数限制 @@ -85,3 +85,47 @@ title: TDengine 参数限制与保留关键字 | CONNECTIONS | HAVING | NOT | SOFFSET | VNODES | | CONNS | ID | NOTNULL | STABLE | WAL | | COPY | IF | NOW | STABLES | WHERE | +| _C0 | _QSTART | _QSTOP | _QDURATION | _WSTART | +| _WSTOP | _WDURATION | _ROWTS | + +## 特殊说明 +### TBNAME +`TBNAME` 可以视为超级表中一个特殊的标签,代表子表的表名。 + +获取一个超级表所有的子表名及相关的标签信息: + +```mysql +SELECT TBNAME, location FROM meters; +``` + +统计超级表下辖子表数量: + +```mysql +SELECT COUNT(TBNAME) FROM meters; +``` + +以上两个查询均只支持在WHERE条件子句中添加针对标签(TAGS)的过滤条件。例如: +```mysql +taos> SELECT TBNAME, location FROM meters; + tbname | location | +================================================================== + d1004 | California.SanFrancisco | + d1003 | California.SanFrancisco | + d1002 | California.LosAngeles | + d1001 | California.LosAngeles | +Query OK, 4 row(s) in set (0.000881s) + +taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; + count(tbname) | +======================== + 2 | +Query OK, 1 row(s) in set (0.001091s) +``` +### _QSTART/_QSTOP/_QDURATION +表示查询过滤窗口的起始,结束以及持续时间。 + +### _WSTART/_WSTOP/_WDURATION +窗口切分聚合查询(例如 interval/session window/state window)中表示每个切分窗口的起始,结束以及持续时间。 + +### _c0/_ROWTS +_c0 _ROWTS 等价,表示表或超级表的第一列 diff --git a/docs-cn/12-taos-sql/12-keywords/_category_.yml b/docs-cn/12-taos-sql/12-keywords/_category_.yml deleted file mode 100644 index 67738650a4564477f017542aea81767b3de72922..0000000000000000000000000000000000000000 --- a/docs-cn/12-taos-sql/12-keywords/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: 参数限制与保留关键字 \ No newline at end of file diff --git a/docs-cn/12-taos-sql/13-operators.md b/docs-cn/12-taos-sql/13-operators.md new file mode 100644 index 0000000000000000000000000000000000000000..22b78455fb35e9ebe5978b30505819e1a2b678c8 --- /dev/null +++ b/docs-cn/12-taos-sql/13-operators.md @@ -0,0 +1,66 @@ +--- +sidebar_label: 运算符 +title: 运算符 +--- + +## 算术运算符 + +| # | **运算符** | **支持的类型** | **说明** | +| --- | :--------: | -------------- | -------------------------- | +| 1 | +, - | 数值类型 | 表达正数和负数,一元运算符 | +| 2 | +, - | 数值类型 | 表示加法和减法,二元运算符 | +| 3 | \*, / | 数值类型 | 表示乘法和除法,二元运算符 | +| 4 | % | 数值类型 | 表示取余运算,二元运算符 | + +## 位运算符 + +| # | **运算符** | **支持的类型** | **说明** | +| --- | :--------: | -------------- | ------------------ | +| 1 | & | 数值类型 | 按位与,二元运算符 | +| 2 | \| | 数值类型 | 按位或,二元运算符 | + +## JSON 运算符 + +`->` 运算符可以对 JSON 类型的列按键取值。`->` 左侧是列标识符,右侧是键的字符串常量,如 `col->'name'`,返回键 `'name'` 的值。 + +## 集合运算符 + +集合运算符将两个查询的结果合并为一个结果。包含集合运算符的查询称之为复合查询。复合查询中每条查询的选择列表中的相应表达式在数量上必须匹配,且结果类型以第一条查询为准,后续查询的结果类型必须可转换到第一条查询的结果类型,转换规则同 CAST 函数。 + +TDengine 支持 `UNION ALL` 和 `UNION` 操作符。UNION ALL 将查询返回的结果集合并返回,并不去重。UNION 将查询返回的结果集合并并去重后返回。在同一个 SQL 语句中,集合操作符最多支持 100 个。 + +## 比较运算符 + +| # | **运算符** | **支持的类型** | **说明** | +| --- | :---------------: | -------------------------------------------------------------------- | -------------------- | +| 1 | = | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 相等 | +| 2 | <\>, != | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型,且不可以为表的时间戳主键列 | 不相等 | +| 3 | \>, < | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 大于,小于 | +| 4 | \>=, <= | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 大于等于,小于等于 | +| 5 | IS [NOT] NULL | 所有类型 | 是否为空值 | +| 6 | [NOT] BETWEEN AND | 除 BOOL、BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 闭区间比较 | +| 7 | IN | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型,且不可以为表的时间戳主键列 | 与列表内的任意值相等 | +| 8 | LIKE | BINARY、NCHAR 和 VARCHAR | 通配符匹配 | +| 9 | MATCH, NMATCH | BINARY、NCHAR 和 VARCHAR | 正则表达式匹配 | +| 10 | CONTAINS | JSON | JSON 中是否存在某键 | + +LIKE 条件使用通配符字符串进行匹配检查,规则如下: + +- '%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意 ASCII 字符。 +- 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 \_,即加一个反斜线来进行转义。 +- 通配符字符串最长不能超过 100 字节。不建议使用太长的通配符字符串,否则将有可能严重影响 LIKE 操作的执行性能。 + +MATCH 条件和 NMATCH 条件使用正则表达式进行匹配,规则如下: + +- 支持符合 POSIX 规范的正则表达式,具体规范内容可参见 Regular Expressions。 +- 只能针对子表名(即 tbname)、字符串类型的标签值进行正则表达式过滤,不支持普通列的过滤。 +- 正则匹配字符串长度不能超过 128 字节。可以通过参数 maxRegexStringLen 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启客户端才能生效 + +## 逻辑运算符 + +| # | **运算符** | **支持的类型** | **说明** | +| --- | :--------: | -------------- | --------------------------------------------------------------------------- | +| 1 | AND | BOOL | 逻辑与,如果两个条件均为 TRUE, 则返回 TRUE。如果任一为 FALSE,则返回 FALSE | +| 2 | OR | BOOL | 逻辑或,如果任一条件为 TRUE, 则返回 TRUE。如果两者都是 FALSE,则返回 FALSE | + +TDengine 在计算逻辑条件时,会进行短路径优化,即对于 AND,第一个条件为 FALSE,则不再计算第二个条件,直接返回 FALSE;对于 OR,第一个条件为 TRUE,则不再计算第二个条件,直接返回 TRUE。 diff --git a/docs-cn/12-taos-sql/index.md b/docs-cn/12-taos-sql/index.md index 269bc1d2b5ddfa25c42652d8f639bfe2fb1d42e5..cb01b3a918778abc6c7891c1ff185f1db32d3d36 100644 --- a/docs-cn/12-taos-sql/index.md +++ b/docs-cn/12-taos-sql/index.md @@ -7,8 +7,6 @@ description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQ TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 为了便于用户快速上手,在一定程度上提供与标准 SQL 类似的风格和模式。严格意义上,TAOS SQL 并不是也不试图提供标准的 SQL 语法。此外,由于 TDengine 针对的时序性结构化数据不提供删除功能,因此在 TAO SQL 中不提供数据删除的相关功能。 -TAOS SQL 不支持关键字的缩写,例如 DESCRIBE 不能缩写为 DESC。 - 本章节 SQL 语法遵循如下约定: - <\> 里的内容是用户需要输入的,但不要输入 <\> 本身 @@ -37,4 +35,4 @@ import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-cn/14-reference/02-rest-api/02-rest-api.mdx b/docs-cn/14-reference/02-rest-api/02-rest-api.mdx index c7680ab3e9e109dbb328711f62881283241444fb..43099319b9c5bb1420c199cfa9f7def0b2c44d3d 100644 --- a/docs-cn/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs-cn/14-reference/02-rest-api/02-rest-api.mdx @@ -16,7 +16,7 @@ RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安 在已经安装 TDengine 服务器端的情况下,可以按照如下方式进行验证。 -下面以 Ubuntu 环境中使用 curl 工具(确认已经安装)来验证 RESTful 接口的正常。 +下面以 Ubuntu 环境中使用 curl 工具(确认已经安装)来验证 RESTful 接口的正常,验证前请确认 taosAdapter 服务已开启,在 Linux 系统上此服务默认由 systemd 管理,使用命令 `systemctl start taosadapter` 启动。 下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号: diff --git a/docs-cn/14-reference/03-connector/03-connector.mdx b/docs-cn/14-reference/03-connector/03-connector.mdx index aac358bea0682a9bd0807f10dc0cb2d4ef1d7a7b..7a4a85276ef4bb4ab829250fcf67076962dbb871 100644 --- a/docs-cn/14-reference/03-connector/03-connector.mdx +++ b/docs-cn/14-reference/03-connector/03-connector.mdx @@ -4,7 +4,7 @@ title: 连接器 TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C# 和 Rust 的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。 -![image-connector](./connector.webp) +![TDengine Database connector architecture](./connector.webp) ## 支持的平台 diff --git a/docs-cn/14-reference/03-connector/cpp.mdx b/docs-cn/14-reference/03-connector/cpp.mdx index aba1d6c717dfec9228f38e89f90cbf1be0021045..aecf9fde12dfae8026d5f838d6467340a891f372 100644 --- a/docs-cn/14-reference/03-connector/cpp.mdx +++ b/docs-cn/14-reference/03-connector/cpp.mdx @@ -114,7 +114,6 @@ TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector# 订阅和消费 ```c -{{#include examples/c/subscribe.c}} ``` diff --git a/docs-cn/14-reference/03-connector/java.mdx b/docs-cn/14-reference/03-connector/java.mdx index 1c24afdc4404a887e83a2664a311e16378ef9283..267757160634b28ab198ae0fd759188cf4ccc5cc 100644 --- a/docs-cn/14-reference/03-connector/java.mdx +++ b/docs-cn/14-reference/03-connector/java.mdx @@ -11,7 +11,7 @@ import TabItem from '@theme/TabItem'; `taos-jdbcdriver` 是 TDengine 的官方 Java 语言连接器,Java 开发人员可以通过它开发存取 TDengine 数据库的应用软件。`taos-jdbcdriver` 实现了 JDBC driver 标准的接口,并提供两种形式的连接器。一种是通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能,一种是通过 taosAdapter 提供的 REST 接口连接 TDengine 实例(2.4.0.0 及更高版本)。REST 连接实现的功能集合和原生连接有少量不同。 -![tdengine-connector](tdengine-jdbc-connector.webp) +![TDengine Database Connector Java](tdengine-jdbc-connector.webp) 上图显示了两种 Java 应用使用连接器访问 TDengine 的两种方式: diff --git a/docs-cn/14-reference/03-connector/node.mdx b/docs-cn/14-reference/03-connector/node.mdx index 12345fa9fe995c41828df07703f0efb61a2e029d..9f2bed9e97cb33aeabfce3d69dc3774931b426c0 100644 --- a/docs-cn/14-reference/03-connector/node.mdx +++ b/docs-cn/14-reference/03-connector/node.mdx @@ -14,7 +14,6 @@ import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; -import NodeAsyncQuery from "../../07-develop/04-query-data/_js_async.mdx"; `td2.0-connector` 和 `td2.0-rest-connector` 是 TDengine 的官方 Node.js 语言连接器。Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。 @@ -189,14 +188,8 @@ let cursor = conn.cursor(); ### 查询数据 -#### 同步查询 - -#### 异步查询 - - - ## 更多示例程序 | 示例程序 | 示例程序描述 | diff --git a/docs-cn/14-reference/04-taosadapter.md b/docs-cn/14-reference/04-taosadapter.md index 5fc9a282815813a87c9c17b84e5ffafc2f4692e7..6e259391d40acfd48d8db8db3246ad2196ce0520 100644 --- a/docs-cn/14-reference/04-taosadapter.md +++ b/docs-cn/14-reference/04-taosadapter.md @@ -24,7 +24,7 @@ taosAdapter 提供以下功能: ## taosAdapter 架构图 -![taosAdapter Architecture](taosAdapter-architecture.webp) +![TDengine Database taosAdapter Architecture](taosAdapter-architecture.webp) ## taosAdapter 部署方法 diff --git a/docs-cn/14-reference/06-taosdump.md b/docs-cn/14-reference/06-taosdump.md index 7131493ec9439225d8047288ed86026c887f0aac..3a9f2e9acd215be102991a1d91fba285ef6315bb 100644 --- a/docs-cn/14-reference/06-taosdump.md +++ b/docs-cn/14-reference/06-taosdump.md @@ -38,7 +38,7 @@ taosdump 有两种安装方式: :::tip - taosdump 1.4.1 之后的版本提供 `-I` 参数,用于解析 avro 文件 schema 和数据,如果指定 `-s` 参数将只解析 schema。 -- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数挑战为更小的值进行尝试。 +- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数调整为更小的值进行尝试。 ::: diff --git a/docs-cn/14-reference/07-tdinsight/index.md b/docs-cn/14-reference/07-tdinsight/index.md index d7511fde3b5b92b335d60026e56944b9e2b99398..5990a831b8bc1788deaddfb38f717f2723969362 100644 --- a/docs-cn/14-reference/07-tdinsight/index.md +++ b/docs-cn/14-reference/07-tdinsight/index.md @@ -233,25 +233,25 @@ sudo systemctl enable grafana-server 指向 **Configurations** -> **Data Sources** 菜单,然后点击 **Add data source** 按钮。 -![添加数据源按钮](./assets/howto-add-datasource-button.webp) +![TDengine Database TDinsight 添加数据源按钮](./assets/howto-add-datasource-button.webp) 搜索并选择**TDengine**。 -![添加数据源](./assets/howto-add-datasource-tdengine.webp) +![TDengine Database TDinsight 添加数据源](./assets/howto-add-datasource-tdengine.webp) 配置 TDengine 数据源。 -![数据源配置](./assets/howto-add-datasource.webp) +![TDengine Database TDinsight 数据源配置](./assets/howto-add-datasource.webp) 保存并测试,正常情况下会报告 'TDengine Data source is working'。 -![数据源测试](./assets/howto-add-datasource-test.webp) +![TDengine Database TDinsight 数据源测试](./assets/howto-add-datasource-test.webp) ### 导入仪表盘 指向 **+** / **Create** - **import**(或 `/dashboard/import` url)。 -![导入仪表盘和配置](./assets/import_dashboard.webp) +![TDengine Database TDinsight 导入仪表盘和配置](./assets/import_dashboard.webp) 在 **Import via grafana.com** 位置键入仪表盘 ID `15167` 并 **Load**。 @@ -259,7 +259,7 @@ sudo systemctl enable grafana-server 导入完成后,TDinsight 的完整页面视图如下所示。 -![显示](./assets/TDinsight-full.webp) +![TDengine Database TDinsight 显示](./assets/TDinsight-full.webp) ## TDinsight 仪表盘详细信息 @@ -269,7 +269,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes ### 集群状态 -![tdinsight-mnodes-overview](./assets/TDinsight-1-cluster-status.webp) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-1-cluster-status.webp) 这部分包括集群当前信息和状态,告警信息也在此处(从左到右,从上到下)。 @@ -289,7 +289,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes ### DNodes 状态 -![tdinsight-mnodes-overview](./assets/TDinsight-2-dnodes.webp) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-2-dnodes.webp) - **DNodes Status**:`show dnodes` 的简单表格视图。 - **DNodes Lifetime**:从创建 dnode 开始经过的时间。 @@ -298,14 +298,14 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes ### MNode 概述 -![tdinsight-mnodes-overview](./assets/TDinsight-3-mnodes.webp) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-3-mnodes.webp) 1. **MNodes Status**:`show mnodes` 的简单表格视图。 2. **MNodes Number**:类似于`DNodes Number`,MNodes 数量变化。 ### 请求 -![tdinsight-requests](./assets/TDinsight-4-requests.webp) +![TDengine Database TDinsight requests](./assets/TDinsight-4-requests.webp) 1. **Requests Rate(Inserts per Second)**:平均每秒插入次数。 2. **Requests (Selects)**:查询请求数及变化率(count of second)。 @@ -313,7 +313,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes ### 数据库 -![tdinsight-database](./assets/TDinsight-5-database.webp) +![TDengine Database TDinsight database](./assets/TDinsight-5-database.webp) 数据库使用情况,对变量 `$database` 的每个值即每个数据库进行重复多行展示。 @@ -325,7 +325,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes ### DNode 资源使用情况 -![dnode-usage](./assets/TDinsight-6-dnode-usage.webp) +![TDengine Database TDinsight dnode-usage](./assets/TDinsight-6-dnode-usage.webp) 数据节点资源使用情况展示,对变量 `$fqdn` 即每个数据节点进行重复多行展示。包括: @@ -346,13 +346,13 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes ### 登录历史 -![登录历史](./assets/TDinsight-7-login-history.webp) +![TDengine Database TDinsight 登录历史](./assets/TDinsight-7-login-history.webp) 目前只报告每分钟登录次数。 ### 监控 taosAdapter -![taosadapter](./assets/TDinsight-8-taosadapter.webp) +![TDengine Database TDinsight monitor taosadapter](./assets/TDinsight-8-taosadapter.webp) 支持监控 taosAdapter 请求统计和状态详情。包括: diff --git a/docs-cn/14-reference/13-schemaless/13-schemaless.md b/docs-cn/14-reference/13-schemaless/13-schemaless.md index 4de310c248d7763690acef80cdca1c50f609d63b..f2712f2814593bddd65401cb129c8c58ee55a316 100644 --- a/docs-cn/14-reference/13-schemaless/13-schemaless.md +++ b/docs-cn/14-reference/13-schemaless/13-schemaless.md @@ -82,7 +82,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 :::tip 无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 -16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit) +48KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit) ::: diff --git a/docs-cn/20-third-party/01-grafana.mdx b/docs-cn/20-third-party/01-grafana.mdx index f9f7a26aa1632a07406199d76b3ad4ef9f1ec3e0..40b5c0ff4f2de8ff9eeb3afa61728ca7a899f5ea 100644 --- a/docs-cn/20-third-party/01-grafana.mdx +++ b/docs-cn/20-third-party/01-grafana.mdx @@ -18,21 +18,22 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ ## 配置 Grafana -TDengine 的 Grafana 插件托管在 GitHub,可从 下载,当前最新版本为 3.1.4。 - -推荐使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件安装。 +使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件[安装](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation)。 ```bash -sudo -u grafana grafana-cli \ - --pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v3.1.7/tdengine-datasource-3.1.7.zip \ - plugins install tdengine-datasource +grafana-cli plugins install tdengine-datasource +# with sudo +sudo -u grafana grafana-cli plugins install tdengine-datasource ``` -或者下载到本地并解压到 Grafana 插件目录。 +或者从 [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) 或 [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) 下载 .zip 文件到本地并解压到 Grafana 插件目录。命令行下载示例如下: ```bash -GF_VERSION=3.1.7 +GF_VERSION=3.2.2 +# from GitHub wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip +# from Grafana +wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tdengine-datasource/versions/$GF_VERSION/download ``` 以 CentOS 7.2 操作系统为例,将插件包解压到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。 @@ -41,52 +42,41 @@ wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/td sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/ ``` -:::note -3.1.6 和更早版本未签名,会在 Grafana 7.3+ / 8.x 版本签名检查时失败导致无法加载插件,需要在 grafana.ini 文件中修改配置如下: - -```ini -[plugins] -allow_loading_unsigned_plugins = tdengine-datasource -``` - -::: - -在 Docker 环境下,可以使用如下的环境变量设置自动安装并设置 TDengine 插件: +如果 Grafana 在 Docker 环境下运行,可以使用如下的环境变量设置自动安装 TDengine 数据源插件: ```bash -GF_INSTALL_PLUGINS=https://github.com/taosdata/grafanaplugin/releases/download/v3.1.4/tdengine-datasource-3.1.4.zip;tdengine-datasource -GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource +GF_INSTALL_PLUGINS=tdengine-datasource ``` ## 使用 Grafana ### 配置数据源 -用户可以直接通过 http://localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示: +用户可以直接通过 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示: -![img](./add_datasource1.webp) +![TDengine Database Grafana plugin add data source](./add_datasource1.webp) 点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示: -![img](./add_datasource2.webp) +![TDengine Database Grafana plugin add data source](./add_datasource2.webp) 进入数据源配置页面,按照默认提示修改相应配置即可: -![img](./add_datasource3.webp) +![TDengine Database Grafana plugin add data source](./add_datasource3.webp) -- Host: TDengine 集群中提供 REST 服务 (在 2.4 之前由 taosd 提供, 从 2.4 开始由 taosAdapter 提供)的组件所在服务器的 IP 地址与 TDengine REST 服务的端口号(6041),默认 http://localhost:6041。 +- Host: TDengine 集群中提供 REST 服务 (在 2.4 之前由 taosd 提供, 从 2.4 开始由 taosAdapter 提供)的组件所在服务器的 IP 地址与 TDengine REST 服务的端口号(6041),默认 。 - User:TDengine 用户名。 - Password:TDengine 用户密码。 点击 `Save & Test` 进行测试,成功会有如下提示: -![img](./add_datasource4.webp) +![TDengine Database Grafana plugin add data source](./add_datasource4.webp) ### 创建 Dashboard 回到主界面创建 Dashboard,点击 Add Query 进入面板查询页面: -![img](./create_dashboard1.webp) +![TDengine Database Grafana plugin create dashboard](./create_dashboard1.webp) 如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询,具体说明如下: @@ -96,7 +86,7 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource 按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下: -![img](./create_dashboard2.webp) +![TDengine Database Grafana plugin create dashboard](./create_dashboard2.webp) > 关于如何使用 Grafana 创建相应的监测界面以及更多有关使用 Grafana 的信息,请参考 Grafana 官方的[文档](https://grafana.com/docs/)。 diff --git a/docs-cn/20-third-party/09-emq-broker.md b/docs-cn/20-third-party/09-emq-broker.md index b9d099c145d89c4f8e8a3cfaa994bffa0085e280..833fa97e2e5f9f138718e18bb16aa3e65abca8cc 100644 --- a/docs-cn/20-third-party/09-emq-broker.md +++ b/docs-cn/20-third-party/09-emq-broker.md @@ -45,25 +45,25 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em 使用浏览器打开网址 http://IP:18083 并登录 EMQX Dashboard。初次安装用户名为 `admin` 密码为:`public` -![img](./emqx/login-dashboard.webp) +![TDengine Database EMQX login dashboard](./emqx/login-dashboard.webp) ### 创建规则(Rule) 选择左侧“规则引擎(Rule Engine)”中的“规则(Rule)”并点击“创建(Create)”按钮: -![img](./emqx/rule-engine.webp) +![TDengine Database EMQX rule engine](./emqx/rule-engine.webp) ### 编辑 SQL 字段 -![img](./emqx/create-rule.webp) +![TDengine Database EMQX create rule](./emqx/create-rule.webp) ### 新增“动作(action handler)” -![img](./emqx/add-action-handler.webp) +![TDengine Database EMQX](./emqx/add-action-handler.webp) ### 新增“资源(Resource)” -![img](./emqx/create-resource.webp) +![TDengine Database EMQX create resource](./emqx/create-resource.webp) 选择“发送数据到 Web 服务“并点击“新建资源”按钮: @@ -71,13 +71,13 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em 选择“发送数据到 Web 服务“并填写 请求 URL 为 运行 taosAdapter 的服务器地址和端口(默认为 6041)。其他属性请保持默认值。 -![img](./emqx/edit-resource.webp) +![TDengine Database EMQX edit resource](./emqx/edit-resource.webp) ### 编辑“动作(action)” 编辑资源配置,增加 Authorization 认证的键/值配对项,相关文档请参考[ TDengine REST API 文档](https://docs.taosdata.com/reference/rest-api/)。在消息体中输入规则引擎替换模板。 -![img](./emqx/edit-action.webp) +![TDengine Database EMQX edit action](./emqx/edit-action.webp) ## 编写模拟测试程序 @@ -164,7 +164,7 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em 注意:代码中 CLIENT_NUM 在开始测试中可以先设置一个较小的值,避免硬件性能不能完全处理较大并发客户端数量。 -![img](./emqx/client-num.webp) +![TDengine Database EMQX client num](./emqx/client-num.webp) ## 执行测试模拟发送 MQTT 数据 @@ -173,19 +173,19 @@ npm install mqtt mockjs --save --registry=https://registry.npm.taobao.org node mock.js ``` -![img](./emqx/run-mock.webp) +![TDengine Database EMQX run-mock](./emqx/run-mock.webp) ## 验证 EMQX 接收到数据 在 EMQX Dashboard 规则引擎界面进行刷新,可以看到有多少条记录被正确接收到: -![img](./emqx/check-rule-matched.webp) +![TDengine Database EMQX rule matched](./emqx/check-rule-matched.webp) ## 验证数据写入到 TDengine 使用 TDengine CLI 程序登录并查询相应数据库和表,验证数据是否被正确写入到 TDengine 中: -![img](./emqx/check-result-in-taos.webp) +![TDengine Database EMQX result in taos](./emqx/check-result-in-taos.webp) TDengine 详细使用方法请参考 [TDengine 官方文档](https://docs.taosdata.com/)。 EMQX 详细使用方法请参考 [EMQX 官方文档](https://www.emqx.io/docs/zh/v4.4/rule/rule-engine.html)。 diff --git a/docs-cn/20-third-party/11-kafka.md b/docs-cn/20-third-party/11-kafka.md index 058909ca48da8dd1bb627ea4f984f086dd8aaf8e..8369806adcfe1b195348e7d60160609cde9150e8 100644 --- a/docs-cn/20-third-party/11-kafka.md +++ b/docs-cn/20-third-party/11-kafka.md @@ -7,17 +7,17 @@ TDengine Kafka Connector 包含两个插件: TDengine Source Connector 和 TDeng ## 什么是 Kafka Connect? -Kafka Connect 是 Apache Kafka 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka Broker,Source Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。 +Kafka Connect 是 [Apache Kafka](https://kafka.apache.org/) 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka Broker,Source Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。 -![](kafka/Kafka_Connect.webp) +![TDengine Database Kafka Connector -- Kafka Connect structure](kafka/Kafka_Connect.webp) TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送给 Kafka Connect。TDengine Sink Connector 用于 从 Kafka Connect 接收数据并写入 TDengine。 -![](kafka/streaming-integration-with-kafka-connect.webp) +![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) ## 什么是 Confluent? -Confluent 在 Kafka 的基础上增加很多扩展功能。包括: +[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括: 1. Schema Registry 2. REST 代理 @@ -26,7 +26,7 @@ Confluent 在 Kafka 的基础上增加很多扩展功能。包括: 5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心 这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。 -![](kafka/confluentPlatform.webp) +![TDengine Database Kafka Connector -- Confluent introduction](kafka/confluentPlatform.webp) Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。 @@ -81,10 +81,10 @@ Development: false git clone https://github.com:taosdata/kafka-connect-tdengine.git cd kafka-connect-tdengine mvn clean package -unzip -d $CONFLUENT_HOME/share/confluent-hub-components/ target/components/packages/taosdata-kafka-connect-tdengine-0.1.0.zip +unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip ``` -以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。安装插件的路径在配置文件 `$CONFLUENT_HOME/etc/kafka/connect-standalone.properties` 中。默认的路径为 `$CONFLUENT_HOME/share/confluent-hub-components/`。 +以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。 ### 用 confluent-hub 安装 @@ -98,7 +98,7 @@ confluent local services start ``` :::note -一定要先安装插件再启动 Confluent, 否则会出现找不到类的错误。Kafka Connect 的日志(默认路径: /tmp/confluent.xxxx/connect/logs/connect.log)中会输出成功安装的插件,据此可判断插件是否安装成功。 +一定要先安装插件再启动 Confluent, 否则加载插件会失败。 ::: :::tip @@ -125,6 +125,61 @@ Control Center is [UP] 清空数据可执行 `rm -rf /tmp/confluent.106668`。 ::: +### 验证各个组件是否启动成功 + +输入命令: + +``` +confluent local services status +``` + +如果各组件都启动成功,会得到如下输出: + +``` +Connect is [UP] +Control Center is [UP] +Kafka is [UP] +Kafka REST is [UP] +ksqlDB Server is [UP] +Schema Registry is [UP] +ZooKeeper is [UP] +``` + +### 验证插件是否安装成功 + +在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件: + +``` +confluent local services connect plugin list +``` + +如果成功安装,会输出如下: + +```txt {4,9} +Available Connect Plugins: +[ + { + "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "type": "sink", + "version": "1.0.0" + }, + { + "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector", + "type": "source", + "version": "1.0.0" + }, +...... +``` + +如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径: +``` +echo `cat /tmp/confluent.current`/connect/connect.stdout +``` +该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`。 + +与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path`, 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。 + + ## TDengine Sink Connector 的使用 TDengine Sink Connector 的作用是同步指定 topic 的数据到 TDengine。用户无需提前创建数据库和超级表。可手动指定目标数据库的名字(见配置参数 connection.database), 也可按一定规则生成(见配置参数 connection.database.prefix)。 @@ -144,7 +199,7 @@ vi sink-demo.properties sink-demo.properties 内容如下: ```ini title="sink-demo.properties" -name=tdengine-sink-demo +name=TDengineSinkConnector connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector tasks.max=1 topics=meters @@ -153,6 +208,7 @@ connection.user=root connection.password=taosdata connection.database=power db.schemaless=line +data.precision=ns key.converter=org.apache.kafka.connect.storage.StringConverter value.converter=org.apache.kafka.connect.storage.StringConverter ``` @@ -179,6 +235,7 @@ confluent local services connect connector load TDengineSinkConnector --config . "connection.url": "jdbc:TAOS://127.0.0.1:6030", "connection.user": "root", "connector.class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "data.precision": "ns", "db.schemaless": "line", "key.converter": "org.apache.kafka.connect.storage.StringConverter", "tasks.max": "1", @@ -223,10 +280,10 @@ Database changed. taos> select * from meters; ts | current | voltage | phase | groupid | location | =============================================================================================================================================================== - 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | - 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | - 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles | - 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles | + 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles | Query OK, 4 row(s) in set (0.004208s) ``` @@ -356,21 +413,33 @@ confluent local services connect connector unload TDengineSourceConnector 2. `connection.database.prefix`: 当 connection.database 为 null 时, 目标数据库的前缀。可以包含占位符 '${topic}'。 比如 kafka_${topic}, 对于主题 'orders' 将写入数据库 'kafka_orders'。 默认 null。当为 null 时,目标数据库的名字和主题的名字是一致的。 3. `batch.size`: 分批写入每批记录数。当 Sink Connector 一次接收到的数据大于这个值时将分批写入。 4. `max.retries`: 发生错误时的最大重试次数。默认为 1。 -5. `retry.backoff.ms`: 发送错误时重试的时间间隔。单位毫秒,默认 3000。 -6. `db.schemaless`: 数据格式,必须指定为: line、json、telnet 中的一个。分别代表 InfluxDB 行协议格式、 OpenTSDB JSON 格式、 OpenTSDB Telnet 行协议格式。 +5. `retry.backoff.ms`: 发送错误时重试的时间间隔。单位毫秒,默认为 3000。 +6. `db.schemaless`: 数据格式,可选值为: + 1. line :代表 InfluxDB 行协议格式 + 2. json : 代表 OpenTSDB JSON 格式 + 3. telnet :代表 OpenTSDB Telnet 行协议格式 +7. `data.precision`: 使用 InfluxDB 行协议格式时,时间戳的精度。可选值为: + 1. ms : 表示毫秒 + 2. us : 表示微秒 + 3. ns : 表示纳秒。默认为纳秒。 ### TDengine Source Connector 特有的配置 1. `connection.database`: 源数据库名称,无缺省值。 2. `topic.prefix`: 数据导入 kafka 后 topic 名称前缀。 使用 `topic.prefix` + `connection.database` 名称作为完整 topic 名。默认为空字符串 ""。 -3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认 "1970-01-01 00:00:00"。 -4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认 1000。 +3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认为 "1970-01-01 00:00:00"。 +4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认为 1000。 5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。 -6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认 line。 +6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认为 line。 + +## 其他说明 + +1. 插件的安装位置可以自定义,请参考官方文档:https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。 +2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。 ## 问题反馈 -https://github.com/taosdata/kafka-connect-tdengine/issues +无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues。 ## 参考 diff --git a/docs-cn/21-tdinternal/01-arch.md b/docs-cn/21-tdinternal/01-arch.md index b9b13468fc7fe60daccba809183b2e5e04b4cc81..433cb4808b60ce73c639a23beef45fb8e1afb7dd 100644 --- a/docs-cn/21-tdinternal/01-arch.md +++ b/docs-cn/21-tdinternal/01-arch.md @@ -11,7 +11,7 @@ TDengine 的设计是基于单个硬件、软件系统不可靠,基于任何 TDengine 分布式架构的逻辑结构图如下: -![TDengine架构示意图](./structure.webp) +![TDengine Database 架构示意图](./structure.webp)
图 1 TDengine架构示意图
@@ -63,7 +63,7 @@ TDengine 分布式架构的逻辑结构图如下: 为解释 vnode、mnode、taosc 和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。 -![TDengine典型的操作流程](./message.webp) +![TDengine Database 典型的操作流程](./message.webp)
图 2 TDengine 典型的操作流程
@@ -135,7 +135,7 @@ TDengine 除 vnode 分片之外,还对时序数据按照时间段进行分区 Master Vnode 遵循下面的写入流程: -![TDengine Master写入流程](./write_master.webp) +![TDengine Database Master写入流程](./write_master.webp)
图 3 TDengine Master 写入流程
@@ -150,7 +150,7 @@ Master Vnode 遵循下面的写入流程: 对于 slave vnode,写入流程是: -![TDengine Slave 写入流程](./write_slave.webp) +![TDengine Database Slave 写入流程](./write_slave.webp)
图 4 TDengine Slave 写入流程
@@ -284,7 +284,7 @@ SELECT COUNT(*) FROM d1001 WHERE ts >= '2017-7-14 00:00:00' AND ts < '2017-7-14 TDengine 对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine 引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以有多个,可以随时增加、删除和修改。应用可通过指定标签的过滤条件,对一个 STable 下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示: -![多表聚合查询原理图](./multi_tables.webp) +![TDengine Database 多表聚合查询原理图](./multi_tables.webp)
图 5 多表聚合查询原理图
diff --git a/docs-cn/25-application/01-telegraf.md b/docs-cn/25-application/01-telegraf.md index 5bfc94c53410f6142b3bc24f696334c334cde933..95df8699ef85b02d6e9dba398c787644fc9089b2 100644 --- a/docs-cn/25-application/01-telegraf.md +++ b/docs-cn/25-application/01-telegraf.md @@ -16,7 +16,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 本文介绍不需要写一行代码,通过简单修改几行配置文件,就可以快速搭建一个基于 TDengine + Telegraf + Grafana 的 IT 运维系统。架构如下图: -![IT-DevOps-Solutions-Telegraf.webp](./IT-DevOps-Solutions-Telegraf.webp) +![TDengine Database IT-DevOps-Solutions-Telegraf](./IT-DevOps-Solutions-Telegraf.webp) ## 安装步骤 @@ -75,7 +75,7 @@ sudo systemctl start telegraf 点击左侧齿轮图标并选择 `Plugins`,应该可以找到 TDengine data source 插件图标。 点击左侧加号图标并选择 `Import`,从 `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json` 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘: -![IT-DevOps-Solutions-telegraf-dashboard.webp]./IT-DevOps-Solutions-telegraf-dashboard.webp) +![TDengine Database IT-DevOps-Solutions-telegraf-dashboard](./IT-DevOps-Solutions-telegraf-dashboard.webp) ## 总结 diff --git a/docs-cn/25-application/02-collectd.md b/docs-cn/25-application/02-collectd.md index 5966f2d6544c78adb806d51e8a4157ba7dc420e9..78c61bb969092d7040ddcb3d02ce7bd29a784858 100644 --- a/docs-cn/25-application/02-collectd.md +++ b/docs-cn/25-application/02-collectd.md @@ -16,7 +16,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 本文介绍不需要写一行代码,通过简单修改几行配置文件,就可以快速搭建一个基于 TDengine + collectd / statsD + Grafana 的 IT 运维系统。架构如下图: -![IT-DevOps-Solutions-Collectd-StatsD.webp](./IT-DevOps-Solutions-Collectd-StatsD.webp) +![TDengine Database IT-DevOps-Solutions-Collectd-StatsD](./IT-DevOps-Solutions-Collectd-StatsD.webp) ## 安装步骤 @@ -81,12 +81,12 @@ repeater 部分添加 { host:'', port: -### 18. go 语言编写组件编译失败怎样解决? +### 19. go 语言编写组件编译失败怎样解决? TDengine 2.3.0.0 及之后的版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。 使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 @@ -184,7 +195,7 @@ go env -w GOPROXY=https://goproxy.cn,direct 如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用 `cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。 -### 19. 如何查询数据占用的存储空间大小? +### 20. 如何查询数据占用的存储空间大小? 默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。 @@ -193,3 +204,38 @@ go env -w GOPROXY=https://goproxy.cn,direct 若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。 若仅仅想查看指定(超级)表的数据块分布及大小,可查看[_block_dist 函数](https://docs.taosdata.com/taos-sql/select/#_block_dist-%E5%87%BD%E6%95%B0) + +### 21. 客户端连接串如何保证高可用? + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html) + +### 22. 时间戳的时区信息是怎样处理的? + +TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。 + +客户端在处理时间戳字符串时,会采取如下逻辑: + +1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。 +2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。 +3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。 +4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。 + +### 23. TDengine 2.0 都会用到哪些网络端口? + +使用到的网络端口请看文档:[serverport](/reference/config/#serverport) + +需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。 + +### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功?? + +taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。 + +需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。 + +有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/) + +### 25. 发生了 OOM 怎么办? + +OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。 + +TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 maxVgroupsPerDb 影响,每个 VNode 占用的内存大小受 Blocks 和 Cache 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。 diff --git a/docs-en/02-intro/index.md b/docs-en/02-intro/index.md index 628e87dd59f3c7a3bb00d93ee21b82550993d9ae..f6766f910f4d7560b782bf02ffa97922523e6167 100644 --- a/docs-en/02-intro/index.md +++ b/docs-en/02-intro/index.md @@ -54,7 +54,7 @@ With TDengine, the total cost of ownership of your time-series data platform can ## Technical Ecosystem This is how TDengine would be situated, in a typical time-series data processing platform: -![TDengine Technical Ecosystem ](eco_system.webp) +![TDengine Database Technical Ecosystem ](eco_system.webp)
Figure 1. TDengine Technical Ecosystem
diff --git a/docs-en/05-get-started/_pkg_install.mdx b/docs-en/05-get-started/_pkg_install.mdx index af04d2b70bda7575e57cc49a5aa60f19689113e6..cf10497c96ba1d777e45340b0312d97c127b6fcb 100644 --- a/docs-en/05-get-started/_pkg_install.mdx +++ b/docs-en/05-get-started/_pkg_install.mdx @@ -12,6 +12,6 @@ Between two major release versions, some beta versions may be delivered for user For the details please refer to [Install and Uninstall](/operation/pkg-install)。 -To see the details of versions, please refer to [Download List](https://www.taosdata.com/all-downloads) and [Release Notes](https://github.com/taosdata/TDengine/releases). +To see the details of versions, please refer to [Download List](https://tdengine.com/all-downloads) and [Release Notes](https://github.com/taosdata/TDengine/releases). diff --git a/docs-en/05-get-started/index.md b/docs-en/05-get-started/index.md index 858dd6ac56e3a523220903fc63335dfdc573b752..56958ef3ec1c206ee0cff45c67fd3c3a6fa6753a 100644 --- a/docs-en/05-get-started/index.md +++ b/docs-en/05-get-started/index.md @@ -130,7 +130,7 @@ After TDengine server is running,execute `taosBenchmark` (previously named tao taosBenchmark ``` -This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "California.SanFrancisco" or "California.SanDieo". +This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "California.SanFrancisco" or "California.SanDiego". This command will insert 100 million rows into the database quickly. Time to insert depends on the hardware configuration, it only takes a dozen seconds for a regular PC server. diff --git a/docs-en/07-develop/01-connect/index.md b/docs-en/07-develop/01-connect/index.md index 21b2149f4451e8e5d388a41f1a0a06b6adc00a96..b9217b828d0d08c4ff1eacd27406d4e3bfba8eac 100644 --- a/docs-en/07-develop/01-connect/index.md +++ b/docs-en/07-develop/01-connect/index.md @@ -1,6 +1,6 @@ --- -sidebar_label: Connection -title: Connect to TDengine +sidebar_label: Connect +title: Connect description: "This document explains how to establish connections to TDengine, and briefly introduces how to install and use TDengine connectors." --- diff --git a/docs-en/07-develop/03-insert-data/01-sql-writing.mdx b/docs-en/07-develop/03-insert-data/01-sql-writing.mdx index ae170a2bef3496c49026e05d7d60399cc88e90a7..397b1a14fd76c1372c79eb88575f2bf21cb62050 100644 --- a/docs-en/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs-en/07-develop/03-insert-data/01-sql-writing.mdx @@ -1,5 +1,5 @@ --- -sidebar_label: SQL +sidebar_label: Insert Using SQL title: Insert Using SQL --- @@ -52,7 +52,7 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). :::info -- Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 16K bytes and each SQL statement can't exceed 1MB. +- Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 48K bytes and each SQL statement can't exceed 1MB. - Inserting with multiple threads can also improve performance. However, depending on the system resources on the application side and the server side, when the number of inserting threads grows beyond a specific point the performance may drop instead of improving. The proper number of threads needs to be tested in a specific environment to find the best number. ::: diff --git a/docs-en/07-develop/03-insert-data/index.md b/docs-en/07-develop/03-insert-data/index.md index ba31a951ff0805b48f90c87ddc635c04978d3cd2..1a71e719a56448e4b535632e570ce8a04d2282bb 100644 --- a/docs-en/07-develop/03-insert-data/index.md +++ b/docs-en/07-develop/03-insert-data/index.md @@ -1,5 +1,5 @@ --- -title: Insert +title: Insert Data --- TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, and OpenTSDB JSON protocol. Data can be inserted row by row, or in batches. Data from one or more collection points can be inserted simultaneously. Data can be inserted with multiple threads, and out of order data and historical data can be inserted as well. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create STables and tables in advance if using schemaless protocols, and the schemas can be adjusted automatically based on the data being inserted. diff --git a/docs-en/07-develop/04-query-data/_category_.yml b/docs-en/07-develop/04-query-data/_category_.yml index 5912a48fc31ed36235c0d34d8b0909bf3b518aaa..809db34621a63505ceace7ba182e07c698bdbddb 100644 --- a/docs-en/07-develop/04-query-data/_category_.yml +++ b/docs-en/07-develop/04-query-data/_category_.yml @@ -1 +1 @@ -label: Select Data +label: Query Data diff --git a/docs-en/07-develop/04-query-data/index.mdx b/docs-en/07-develop/04-query-data/index.mdx index 74562c88232afc2f41fdbe5d4c34d582b0b141bd..a212fa9529215fc24c55c95a166cfc1a407359b2 100644 --- a/docs-en/07-develop/04-query-data/index.mdx +++ b/docs-en/07-develop/04-query-data/index.mdx @@ -1,6 +1,6 @@ --- -Sidebar_label: Select -title: Select +Sidebar_label: Query data +title: Query data description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors." --- diff --git a/docs-en/07-develop/06-subscribe.mdx b/docs-en/07-develop/06-subscribe.mdx index 66c8f5129018bee2d9da4a343006d7239cfea856..782fcdbaf221419dd231bd10958e26b8f4f856e5 100644 --- a/docs-en/07-develop/06-subscribe.mdx +++ b/docs-en/07-develop/06-subscribe.mdx @@ -1,5 +1,5 @@ --- -sidebar_label: Subscription +sidebar_label: Data Subscription description: "Lightweight service for data subscription and publishing. Time series data inserted into TDengine continuously can be pushed automatically to subscribing clients." title: Data Subscription --- @@ -108,7 +108,7 @@ if (async) { } ``` -In the above sample code in the else condition, there is an infinite loop. Each time carriage return is entered `taos_consume` is invoked. The return value of `taos_consume` is the selected result set. In the above sample, `print_result` is used to simplify the printing of the result set. Below is the implementation of `print_result`. +In the above sample code in the else condition, there is an infinite loop. Each time carriage return is entered `taos_consume` is invoked. The return value of `taos_consume` is the selected result set. In the above sample, `print_result` is used to simplify the printing of the result set. It is similar to `taos_use_result`. Below is the implementation of `print_result`. ```c void print_result(TAOS_RES* res, int blockFetch) { @@ -151,7 +151,7 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { taos_unsubscribe(tsub, keep); ``` -The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription, the subscription will be restarted from the beginning if the corresponding progress file is removed. +The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription(Note: The default value of `DataDir` in the `taos.cfg` file is **/var/lib/taos/**. However, **/var/lib/taos/** does not exist on the Windows server. So you need to change the `DataDir` value to the corresponding existing directory."), the subscription will be restarted from the beginning if the corresponding progress file is removed. Now let's see the effect of the above sample code, assuming below prerequisites have been done. diff --git a/docs-en/07-develop/07-cache.md b/docs-en/07-develop/07-cache.md index 3d42e22eb3eb0369140e2782de5a01b60156423a..743452faff6a2be8466318a7dab61a44e33c3664 100644 --- a/docs-en/07-develop/07-cache.md +++ b/docs-en/07-develop/07-cache.md @@ -4,15 +4,15 @@ title: Cache description: "The latest row of each table is kept in cache to provide high performance query of latest state." --- -The cache management policy in TDengine is First-In-First-Out (FIFO), which is also known as insert driven cache management policy and different from read driven cache management, i.e. Least-Recent-Used (LRU). It simply stores the latest data in cache and flushes the oldest data in cache to disk when the cache usage reaches a threshold. In IoT use cases, the most cared about data is the latest data, i.e. current state. The cache policy in TDengine is based the nature of IoT data. +The cache management policy in TDengine is First-In-First-Out (FIFO). FIFO is also known as insert driven cache management policy and it is different from read driven cache management, which is more commonly known as Least-Recently-Used (LRU). FIFO simply stores the latest data in cache and flushes the oldest data in cache to disk, when the cache usage reaches a threshold. In IoT use cases, it is the current state i.e. the latest or most recent data that is important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data. -Caching the latest data provides the capability of retrieving data in milliseconds. With this capability, TDengine can be configured properly to be used as caching system without deploying another separate caching system to simplify the system architecture and minimize the operation cost. The cache will be emptied after TDengine is restarted, TDengine doesn't reload data from disk into cache like a real key-value caching system. +Caching the latest data provides the capability of retrieving data in milliseconds. With this capability, TDengine can be configured properly to be used as a caching system without deploying another separate caching system. This simplifies the system architecture and minimizes operational costs. The cache is emptied after TDengine is restarted. TDengine does not reload data from disk into cache, like a key-value caching system. -The memory space used by TDengine cache is fixed in size, according to the configuration based on application requirement and system resources. Independent memory pool is allocated for and managed by each vnode (virtual node) in TDengine, there is no sharing of memory pools between vnodes. All the tables belonging to a vnode share all the cache memory of the vnode. +The memory space used by the TDengine cache is fixed in size and configurable. It should be allocated based on application requirements and system resources. An independent memory pool is allocated for and managed by each vnode (virtual node) in TDengine. There is no sharing of memory pools between vnodes. All the tables belonging to a vnode share all the cache memory of the vnode. -Memory pool is divided into blocks and data is stored in row format in memory and each block follows FIFO policy. The size of each block is determined by configuration parameter `cache`, the number of blocks for each vnode is determined by `blocks`. For each vnode, the total cache size is `cache * blocks`. A cache block needs to ensure that each table can store at least dozens of records to be efficient. +The memory pool is divided into blocks and data is stored in row format in memory and each block follows FIFO policy. The size of each block is determined by configuration parameter `cache` and the number of blocks for each vnode is determined by the parameter `blocks`. For each vnode, the total cache size is `cache * blocks`. A cache block needs to ensure that each table can store at least dozens of records, to be efficient. -`last_row` function can be used to retrieve the last row of a table or a STable to quickly show the current state of devices on monitoring screen. For example the below SQL statement retrieves the latest voltage of all meters in San Francisco of California. +`last_row` function can be used to retrieve the last row of a table or a STable to quickly show the current state of devices on monitoring screen. For example the below SQL statement retrieves the latest voltage of all meters in San Francisco, California. ```sql select last_row(voltage) from meters where location='California.SanFrancisco'; diff --git a/docs-en/07-develop/08-udf.md b/docs-en/07-develop/08-udf.md index 61639e34404477d3bb5785da129a1d922a4d020e..49bc95bd91a4c31d42d2b21ef05d69225f1bd963 100644 --- a/docs-en/07-develop/08-udf.md +++ b/docs-en/07-develop/08-udf.md @@ -1,24 +1,31 @@ --- sidebar_label: UDF -title: User Defined Functions -description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand the query capability" +title: User Defined Functions(UDF) +description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand query capability" --- -In some use cases, the query capability required by application programs can't be achieved directly by builtin functions. With UDF, the functions developed by users can be utilized by query framework to meet some special requirements. UDF normally takes one column of data as input, but can also support the result of sub query as input. +In some use cases, built-in functions are not adequate for the query capability required by application programs. With UDF, the functions developed by users can be utilized by the query framework to meet business and application requirements. UDF normally takes one column of data as input, but can also support the result of a sub-query as input. -From version 2.2.0.0, UDF programmed in C/C++ language can be supported by TDengine. +From version 2.2.0.0, UDF written in C/C++ are supported by TDengine. -Two kinds of functions can be implemented by UDF: scalar function and aggregate function. -## Define UDF +## Types of UDF + +Two kinds of functions can be implemented by UDF: scalar functions and aggregate functions. + +Scalar functions return multiple rows and aggregate functions return either 0 or 1 row. + +In the case of a scalar function you only have to implement the "normal" function template. + +In the case of an aggregate function, in addition to the "normal" function, you also need to implement the "merge" and "finalize" function templates even if the implementation is empty. This will become clear in the sections below. ### Scalar Function -Below function template can be used to define your own scalar function. +As mentioned earlier, a scalar UDF only has to implement the "normal" function template. The function template below can be used to define your own scalar function. `void udfNormalFunc(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)` -`udfNormalFunc` is the place holder of function name, a function implemented based on the above template can be used to perform scalar computation on data rows. The parameters are fixed to control the data exchange between UDF and TDengine. +`udfNormalFunc` is the place holder for a function name. A function implemented based on the above template can be used to perform scalar computation on data rows. The parameters are fixed to control the data exchange between UDF and TDengine. - Definitions of the parameters: @@ -30,20 +37,24 @@ Below function template can be used to define your own scalar function. - numOfRows:the number of rows in the input data - ts: the column of timestamp corresponding to the input data - dataOutput:the buffer for output data, total size is `oBytes * numberOfRows` - - interBuf:the buffer for intermediate result, its size is specified by `BUFSIZE` parameter when creating a UDF. It's normally used when the intermediate result is not same as the final result, it's allocated and freed by TDengine. + - interBuf:the buffer for an intermediate result. Its size is specified by the `BUFSIZE` parameter when creating a UDF. It's normally used when the intermediate result is not same as the final result. This buffer is allocated and freed by TDengine. - tsOutput:the column of timestamps corresponding to the output data; it can be used to output timestamp together with the output data if it's not NULL - numOfOutput:the number of rows in output data - buf:for the state exchange between UDF and TDengine - [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) is one example of the simplest UDF implementations, i.e. one instance of the above `udfNormalFunc` template. It adds one to each value of a column passed in which can be filtered using `where` clause and outputs the result. + [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) is one example of a very simple UDF implementation, i.e. one instance of the above `udfNormalFunc` template. It adds one to each value of a passed in column, which can be filtered using the `where` clause, and outputs the result. ### Aggregate Function -Below function template can be used to define your own aggregate function. +For aggregate UDF, as mentioned earlier you must implement a "normal" function template (described above) and also implement the "merge" and "finalize" templates. -`void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)` +#### Merge Function Template -`udfMergeFunc` is the place holder of function name, the function implemented with the above template is used to aggregate the intermediate result, only can be used in the aggregate query for STable. +The function template below can be used to define your own merge function for an aggregate UDF. + +`void udfMergeFunc(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)` + +`udfMergeFunc` is the place holder for a function name. The function implemented with the above template is used to aggregate intermediate results and can only be used in the aggregate query for STable. Definitions of the parameters: @@ -53,17 +64,11 @@ Definitions of the parameters: - numOfOutput:number of rows in the output data - buf:for the state exchange between UDF and TDengine -[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) is an user defined aggregate function to get the maximum from the absolute value of a column. - -The internal processing is that the data affected by the select statement will be divided into multiple row blocks and `udfNormalFunc`, i.e. `abs_max` in this case, is performed on each row block to generate the intermediate of each sub table, then `udfMergeFunc`, i.e. `abs_max_merge` in this case, is performed on the intermediate result of sub tables to aggregate to generate the final or intermediate result of STable. The intermediate result of STable is finally processed by `udfFinalizeFunc` to generate the final result, which contain either 0 or 1 row. - -Other typical scenarios, like covariance, can also be achieved by aggregate UDF. +#### Finalize Function Template -### Finalize +The function template below can be used to finalize the result of your own UDF, normally used when interBuf is used. -Below function template can be used to finalize the result of your own UDF, normally used when interBuf is used. - -`void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)` +`void udfFinalizeFunc(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)` `udfFinalizeFunc` is the place holder of function name, definitions of the parameter are as below: @@ -72,47 +77,64 @@ Below function template can be used to finalize the result of your own UDF, norm - numOfOutput:number of output data, can only be 0 or 1 for aggregate function - buf:for state exchange between UDF and TDengine -## UDF Conventions +### Example abs_max.c + +[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) is an example of a user defined aggregate function to get the maximum from the absolute values of a column. + +The internal processing happens as follows. The results of the select statement are divided into multiple row blocks and `udfNormalFunc`, i.e. `abs_max` in this case, is performed on each row block to generate the intermediate results for each sub table. Then `udfMergeFunc`, i.e. `abs_max_merge` in this case, is performed on the intermediate result of sub tables to aggregate and generate the final or intermediate result of STable. The intermediate result of STable is finally processed by `udfFinalizeFunc`, i.e. `abs_max_finalize` in this example, to generate the final result, which contains either 0 or 1 row. + +Other typical aggregation functions such as covariance, can also be implemented using aggregate UDF. -The naming of 3 kinds of UDF, i.e. udfNormalFunc, udfMergeFunc, and udfFinalizeFunc is required to have same prefix, i.e. the actual name of udfNormalFunc, which means udfNormalFunc doesn't need a suffix following the function name. While udfMergeFunc should be udfNormalFunc followed by `_merge`, udfFinalizeFunc should be udfNormalFunc followed by `_finalize`. The naming convention is part of UDF framework, TDengine follows this convention to invoke corresponding actual functions.\ +## UDF Naming Conventions -According to the kind of UDF to implement, the functions that need to be implemented are different. +The naming convention for the 3 kinds of function templates required by UDF is as follows: + - udfNormalFunc, udfMergeFunc, and udfFinalizeFunc are required to have same prefix, i.e. the actual name of udfNormalFunc. The udfNormalFunc doesn't need a suffix following the function name. + - udfMergeFunc should be udfNormalFunc followed by `_merge` + - udfFinalizeFunc should be udfNormalFunc followed by `_finalize`. + +The naming convention is part of TDengine's UDF framework. TDengine follows this convention to invoke the corresponding actual functions. -- Scalar function:udfNormalFunc is required -- Aggregate function:udfNormalFunc, udfMergeFunc (if query on STable) and udfFinalizeFunc are required +Depending on whether you are creating a scalar UDF or aggregate UDF, the functions that you need to implement are different. -To be more accurate, assuming we want to implement a UDF named "foo". If the function is a scalar function, what we really need to implement is `foo`; if the function is aggregate function, we need to implement `foo`, `foo_merge`, and `foo_finalize`. For aggregate UDF, even though one of the three functions is not necessary, there must be an empty implementation. +- Scalar function:udfNormalFunc is required. +- Aggregate function:udfNormalFunc, udfMergeFunc (if query on STable) and udfFinalizeFunc are required. + +For clarity, assuming we want to implement a UDF named "foo": +- If the function is a scalar function, we only need to implement the "normal" function template and it should be named simply `foo`. +- If the function is an aggregate function, we need to implement `foo`, `foo_merge`, and `foo_finalize`. Note that for aggregate UDF, even though one of the three functions is not necessary, there must be an empty implementation. ## Compile UDF -The source code of UDF in C can't be utilized by TDengine directly. UDF can only be loaded into TDengine after compiling to dynamically linked library. +The source code of UDF in C can't be utilized by TDengine directly. UDF can only be loaded into TDengine after compiling to dynamically linked library (DLL). -For example, the example UDF `add_one.c` mentioned in previous sections need to be compiled into DLL using below command on Linux Shell. +For example, the example UDF `add_one.c` mentioned earlier, can be compiled into DLL using the command below, in a Linux Shell. ```bash gcc -g -O0 -fPIC -shared add_one.c -o add_one.so ``` -The generated DLL file `dd_one.so` can be used later when creating UDF. It's recommended to use GCC not older than 7.5. +The generated DLL file `add_one.so` can be used later when creating a UDF. It's recommended to use GCC not older than 7.5. ## Create and Use UDF +When a UDF is created in a TDengine instance, it is available across the databases in that instance. + ### Create UDF -SQL command can be executed on the same hos where the generated UDF DLL resides to load the UDF DLL into TDengine, this operation can't be done through REST interface or web console. Once created, all the clients of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted. +SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted. -When creating UDF, it needs to be clarified as either scalar function or aggregate function. If the specified type is wrong, the SQL statements using the function would fail with error. Besides, the input type and output type don't need to be same in UDF, but the input data type and output data type need to be consistent with the UDF definition. +When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input type and output type don't need to be the same in UDF, but the input data type and output data type must be consistent with the UDF definition. - Create Scalar Function ```sql -CREATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; +CREATE FUNCTION userDefinedFunctionName AS "/absolute/path/to/userDefinedFunctionName.so" OUTPUTTYPE [BUFSIZE B]; ``` -- ids(X):the function name to be sued in SQL statement, must be consistent with the function name defined by `udfNormalFunc` -- ids(Y):the absolute path of the DLL file including the implementation of the UDF, the path needs to be quoted by single or double quotes -- typename(Z):the output data type, the value is the literal string of the type -- B:the size of intermediate buffer, in bytes; it's an optional parameter and the range is [0,512] +- userDefinedFunctionName:The function name to be used in SQL statement which must be consistent with the function name defined by `udfNormalFunc` and is also the name of the compiled DLL (.so file). +- path:The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes. +- outputtype:The output data type, the value is the literal string of the supported TDengine data type. +- B:the size of intermediate buffer, in bytes; it is an optional parameter and the range is [0,512]. For example, below SQL statement can be used to create a UDF from `add_one.so`. @@ -123,17 +145,17 @@ CREATE FUNCTION add_one AS "/home/taos/udf_example/add_one.so" OUTPUTTYPE INT; - Create Aggregate Function ```sql -CREATE AGGREGATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; +CREATE AGGREGATE FUNCTION userDefinedFunctionName AS "/absolute/path/to/userDefinedFunctionName.so" OUTPUTTYPE [ BUFSIZE B ]; ``` -- ids(X):the function name to be sued in SQL statement, must be consistent with the function name defined by `udfNormalFunc` -- ids(Y):the absolute path of the DLL file including the implementation of the UDF, the path needs to be quoted by single or double quotes -- typename(Z):the output data type, the value is the literal string of the type +- userDefinedFunctionName:the function name to be used in SQL statement which must be consistent with the function name defined by `udfNormalFunc` and is also the name of the compiled DLL (.so file). +- path:the absolute path of the DLL file including the name of the shared object file (.so). The path needs to be quoted by single or double quotes. +- OUTPUTTYPE:the output data type, the value is the literal string of the type - B:the size of intermediate buffer, in bytes; it's an optional parameter and the range is [0,512] For details about how to use intermediate result, please refer to example program [demo.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c). -For example, below SQL statement can be used to create a UDF rom `demo.so`. +For example, below SQL statement can be used to create a UDF from `demo.so`. ```sql CREATE AGGREGATE FUNCTION demo AS "/home/taos/udf_example/demo.so" OUTPUTTYPE DOUBLE bufsize 14; @@ -176,11 +198,11 @@ In current version there are some restrictions for UDF 1. Only Linux is supported when creating and invoking UDF for both client side and server side 2. UDF can't be mixed with builtin functions 3. Only one UDF can be used in a SQL statement -4. Single column is supported as input for UDF +4. Only a single column is supported as input for UDF 5. Once created successfully, UDF is persisted in MNode of TDengineUDF 6. UDF can't be created through REST interface 7. The function name used when creating UDF in SQL must be consistent with the function name defined in the DLL, i.e. the name defined by `udfNormalFunc` -8. The name name of UDF name should not conflict with any of builtin functions +8. The name of a UDF should not conflict with any of TDengine's built-in functions ## Examples diff --git a/docs-en/10-cluster/02-cluster-mgmt.md b/docs-en/10-cluster/02-cluster-mgmt.md index 9d717be236e3e89114f58fc492223e3ad94fc9ea..674c92e2766a4eb304079140af19c8efea72d55e 100644 --- a/docs-en/10-cluster/02-cluster-mgmt.md +++ b/docs-en/10-cluster/02-cluster-mgmt.md @@ -3,16 +3,16 @@ sidebar_label: Operation title: Manage DNODEs --- -The previous section [Deployment](/cluster/deploy) introduced how to deploy and start a cluster from scratch. Once a cluster is ready, the dnode status in the cluster can be shown at any time, new dnode can be added to scale out the cluster, an existing dnode can be removed, even load balance can be performed manually. +The previous section, [Deployment],(/cluster/deploy) showed you how to deploy and start a cluster from scratch. Once a cluster is ready, the status of dnode(s) in the cluster can be shown at any time. Dnodes can be managed from the TDengine CLI. New dnode(s) can be added to scale out the cluster, an existing dnode can be removed and you can even perform load balancing manually, if necessary. :::note -All the commands to be introduced in this chapter need to be run through TDengine CLI, sometimes it's necessary to use root privilege. +All the commands introduced in this chapter must be run in the TDengine CLI - `taos`. Note that sometimes it is necessary to use root privilege. ::: ## Show DNODEs -The below command can be executed in TDengine CLI `taos` to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes, etc. It's suggested to execute this command to check after adding or removing a dnode. +The below command can be executed in TDengine CLI `taos` to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes and so on. We recommend executing this command after adding or removing a dnode. ```sql SHOW DNODES; @@ -30,7 +30,7 @@ Query OK, 1 row(s) in set (0.008298s) ## Show VGROUPs -To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located in different dnodes, scaling out can be achieved by adding more vnodes from more dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode according to system resources of the dnodes. +To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located on different dnodes. One way of scaling out is to add more vnodes on dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode based on system resources of the dnodes. Launch TDengine CLI `taos` and execute below command: @@ -87,7 +87,7 @@ taos> show dnodes; Query OK, 2 row(s) in set (0.001017s) ``` -It can be seen that the status of the new dnode is "offline", once the dnode is started and connects the firstEp of the cluster, execute the command again and get the example output below, from which it can be seen that two dnodes are both in "ready" status. +It can be seen that the status of the new dnode is "offline". Once the dnode is started and connects to the firstEp of the cluster, you can execute the command again and get the example output below. As can be seen, both dnodes are in "ready" status. ``` taos> show dnodes; @@ -132,12 +132,12 @@ taos> show dnodes; Query OK, 1 row(s) in set (0.001137s) ``` -In the above example, when `show dnodes` is executed the first time, two dnodes are shown. Then `drop dnode 2` is executed, after that from the output of executing `show dnodes` again it can be seen that only the dnode with ID 1 is still in the cluster. +In the above example, when `show dnodes` is executed the first time, two dnodes are shown. After `drop dnode 2` is executed, you can execute `show dnodes` again and it can be seen that only the dnode with ID 1 is still in the cluster. :::note -- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Normally, before dropping a dnode, the data belonging to the dnode needs to be migrated to other place. -- Please be noted that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped. +- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Before dropping a dnode, the data belonging to the dnode MUST be migrated/backed up according to your data retention, data security or other SOPs. +- Please note that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped. - Once a dnode is dropped, other dnodes in the cluster will be notified of the drop and will not accept the request from the dropped dnode. - dnodeID is allocated automatically and can't be manually modified. dnodeID is generated in ascending order without duplication. diff --git a/docs-en/10-cluster/03-ha-and-lb.md b/docs-en/10-cluster/03-ha-and-lb.md index 6e0c386abe4100ec59f60c1c90b3305e0d187c79..bd718eef9f8dc181628132de831dbca2af59d158 100644 --- a/docs-en/10-cluster/03-ha-and-lb.md +++ b/docs-en/10-cluster/03-ha-and-lb.md @@ -7,7 +7,7 @@ title: High Availability and Load Balancing High availability of vnode and mnode can be achieved through replicas in TDengine. -The number of vnodes is associated with each DB, there can be multiple DBs in a TDengine cluster. A different number of replicas can be configured for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas, the default value is 1. With single replica, the high availability of the system can't be guaranteed. Whenever one node is down, the data service will be unavailable. The number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation would fail with error "more dnodes are needed". The SQL statement below is used to create a database named "demo" with 3 replicas. +A TDengine cluster can have multiple databases. Each database has a number of vnodes associated with it. A different number of replicas can be configured for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas. The default value for `replica` is 1. Naturally, a single replica cannot guarantee high availability since if one node is down, the data service is unavailable. Note that the number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation will fail with error "more dnodes are needed". The SQL statement below is used to create a database named "demo" with 3 replicas. ```sql CREATE DATABASE demo replica 3; @@ -15,19 +15,19 @@ CREATE DATABASE demo replica 3; The data in a DB is divided into multiple shards and stored in multiple vgroups. The number of vnodes in each vgroup is determined by the number of replicas set for the DB. The vnodes in each vgroup store exactly the same data. For the purpose of high availability, the vnodes in a vgroup must be located in different dnodes on different hosts. As long as over half of the vnodes in a vgroup are in an online state, the vgroup is able to provide data access. Otherwise the vgroup can't provide data access for reading or inserting data. -There may be data for multiple DBs in a dnode. Once a dnode is down, multiple DBs may be affected. However, it's hard to say the cluster is guaranteed to work properly as long as over half of dnodes are online because vnodes are introduced and there may be complex mapping between vnodes and dnodes. +There may be data for multiple DBs in a dnode. When a dnode is down, multiple DBs may be affected. While in theory, the cluster will provide data access for reading or inserting data if over half the vnodes in vgroups are online, because of the possibly complex mapping between vnodes and dnodes, it is difficult to guarantee that the cluster will work properly if over half of the dnodes are online. ## High Availability of Mnode -Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured using system parameter `numOfMNodes`, the valid time range is [1,3]. To make sure the data consistency between mnodes, the data replication between mnodes is performed in a synchronous way. +Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured using system parameter `numOfMNodes`. The valid range for `numOfMnodes` is [1,3]. To ensure data consistency between mnodes, data replication between mnodes is performed synchronously. -There may be multiple dnodes in a cluster, but only one mnode can be started in each dnode. Which one or ones of the dnodes will be designated as mnodes is automatically determined by TDengine according to the cluster configuration and system resources. Command `show mnodes` can be executed in TDengine `taos` to show the mnodes in the cluster. +There may be multiple dnodes in a cluster, but only one mnode can be started in each dnode. Which one or ones of the dnodes will be designated as mnodes is automatically determined by TDengine according to the cluster configuration and system resources. The command `show mnodes` can be executed in TDengine `taos` to show the mnodes in the cluster. ```sql SHOW MNODES; ``` -The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode, because there must be at least one mnode otherwise the cluster doesn't work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched. +The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched. For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher. @@ -36,15 +36,16 @@ If high availability is important for your system, both vnode and mnode must be ::: -## Load Balance +## Load Balancing -Load balance will be triggered in 3 cases without manual intervention. +Load balancing will be triggered in 3 cases without manual intervention. -- When a new dnode is joined in the cluster, automatic load balancing may be triggered, some data from some dnodes may be transferred to the new dnode automatically. +- When a new dnode joins the cluster, automatic load balancing may be triggered. Some data from other dnodes may be transferred to the new dnode automatically. - When a dnode is removed from the cluster, the data from this dnode will be transferred to other dnodes automatically. - When a dnode is too hot, i.e. too much data has been stored in it, automatic load balancing may be triggered to migrate some vnodes from this dnode to other dnodes. + :::tip -Automatic load balancing is controlled by parameter `balance`, 0 means disabled and 1 means enabled. +Automatic load balancing is controlled by the parameter `balance`, 0 means disabled and 1 means enabled. This is set in the file [taos.cfg](https://docs.tdengine.com/reference/config/#balance). ::: @@ -52,22 +53,22 @@ Automatic load balancing is controlled by parameter `balance`, 0 means disabled When a dnode is offline, it can be detected by the TDengine cluster. There are two cases: -- The dnode becomes online again before the threshold configured in `offlineThreshold` is reached, it is still in the cluster and data replication is started automatically. The dnode can work properly after the data syncup is finished. +- The dnode comes online before the threshold configured in `offlineThreshold` is reached. The dnode is still in the cluster and data replication is started automatically. The dnode can work properly after the data sync is finished. -- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join in the cluster automatically, it can only be joined manually by the system operator. +- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster. :::note -If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted after all the vnodes or mnodes in the group become online and can exchange status, then the vgroup (or mnode group) is able to provide service. +If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service. ::: ## Arbitrator -If the number of replicas is set to an even number like 2, when half of the vnodes in a vgroup don't work a master node can't be voted. A similar case is also applicable to mnode if the number of mnodes is set to an even number like 2. +The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a master node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc. -To resolve this problem, a new arbitrator component named `tarbitrator`, abbreviated for TDengine Arbitrator, was introduced. Arbitrator simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally. +To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally. -Normally, it's suggested to configure a replica number of each DB or system parameter `numOfMNodes` to an odd number. However, if a user is very sensitive to storage space, a replica number of 2 plus arbitrator component can be used to achieve both lower cost of storage space and high availability. +Normally, it's prudent to configure the replica number for each DB or system parameter `numOfMNodes` to be an odd number. However, if a user is very sensitive to storage space, a replica number of 2 plus arbitrator component can be used to achieve both lower cost of storage space and high availability. Arbitrator component is installed with the server package. For details about how to install, please refer to [Install](/operation/pkg-install). The `-p` parameter of `tarbitrator` can be used to specify the port on which it provides service. diff --git a/docs-en/12-taos-sql/01-data-type.md b/docs-en/12-taos-sql/01-data-type.md index 86ec941f955516e99e6bb54730a55083bc26ed09..d038219c8ac66db52416001f7a79c71018e2ca33 100644 --- a/docs-en/12-taos-sql/01-data-type.md +++ b/docs-en/12-taos-sql/01-data-type.md @@ -1,49 +1,69 @@ --- title: Data Types -description: "The data types supported by TDengine include timestamp, float, JSON, etc" +description: "TDengine supports a variety of data types including timestamp, float, JSON and many others." --- -When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows or querying data, timestamp must follow the rules below: +## TIMESTAMP -- the format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128` -- internal function `now` can be used to get the current timestamp of the client side -- the current timestamp of the client side is applied when `now` is used to insert data +When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below: + +- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128` +- Internal function `now` can be used to get the current timestamp on the client side +- The current timestamp of the client side is applied when `now` is used to insert data - Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from 1970-01-01 00:00:00.000 (UTC/GMT) -- timestamp can be applied with add/subtract operation, for example `now-2h` means 2 hours back from the time at which query is executed,the unit can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operation. +- Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations. -Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`, like below, the default time precision is millisecond. +Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds. ```sql CREATE DATABASE db_name PRECISION 'ns'; ``` +## Data Types + In TDengine, the data types below can be used when specifying a column or tag. | # | **type** | **Bytes** | **Description** | | --- | :-------: | --------- | ------------------------- | | 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported | -| 2 | INT | 4 | Integer, the value range is [-2^31+1, 2^31-1], while -2^31 is treated as NULL | -| 3 | BIGINT | 8 | Long integer, the value range is [-2^63+1, 2^63-1], while -2^63 is treated as NULL | -| 4 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] | -| 5 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] | -| 6 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. The string length can be up to 16374 bytes. The string value must be quoted with single quotes. The literal single quote inside the string must be preceded with back slash like `\'` | -| 7 | SMALLINT | 2 | Short integer, the value range is [-32767, 32767], while -32768 is treated as NULL | -| 8 | TINYINT | 1 | Single-byte integer, the value range is [-127, 127], while -128 is treated as NULL | -| 9 | BOOL | 1 | Bool, the value range is {true, false} | -| 10 | NCHAR | User Defined| Multiple-Byte string that can include like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | -| 11 | JSON | | json type can only be used on tag, a tag of json type is excluded with any other tags of any other type | - -:::tip -TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes. - -::: +| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1] | +| 3 |INT UNSIGNED|4 | Unsigned integer, the value range is [0, 2^31-1] | +| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1] | +| 5 | BIGINT UNSIGNED | 8 | Unsigned long integer, the value range is [0, 2^63-1] | +| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] | +| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] | +| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. The string length can be up to 16374 bytes. The string value must be quoted with single quotes. The literal single quote inside the string must be preceded with back slash like `\'` | +| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767] | +| 10 | SMALLINT UNSIGNED | 2 | Unsigned short integer, the value range is [0, 32767] | +| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127] | +| 12 | TINYINT UNSIGNED | 1 | Unsigned single-byte integer, the value range is [0, 127] | +| 13 | BOOL | 1 | Bool, the value range is {true, false} | +| 14 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | +| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type | +| 16 | VARCHAR | User Defined| Alias of BINARY type | :::note -Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multiple-byte characters must be stored in NCHAR type. +- TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes. +- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type. +- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. ::: +## Constants +TDengine supports constants of multiple data type. + +| # | **Syntax** | **Type** | **Description** | +| --- | :-------: | --------- | -------------------------------------- | +| 1 | [{+ \| -}]123 | BIGINT | Numeric constants are treated as BIGINT type. The value will be truncated if it exceeds the range of BIGINT type. | +| 2 | 123.45 | DOUBLE | Floating number constants are treated as DOUBLE type. TDengine determines whether it's a floating number based on if decimal point or scientific notation is used. | +| 3 | 1.2E3 | DOUBLE | Constants in scientific notation are treated ad DOUBLE type. | +| 4 | 'abc' | BINARY | String constants enclosed by single quotes are treated as BINARY type. Its size is determined as the acutal length. Single quote itself can be included by preceding backslash, i.e. `\'`, in a string constant. | +| 5 | "abc" | BINARY | String constants enclosed by double quotes are treated as BINARY type. Its size is determined as the acutal length. Double quote itself can be included by preceding backslash, i.e. `\"`, in a string constant. | +| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | A string constant following `TIMESTAMP` keyword is treated as TIMESTAMP type. The string should be in the format of "YYYY-MM-DD HH:mm:ss.MS". Its time precision is same as that of the current database being used. | +| 7 | {TRUE \| FALSE} | BOOL | BOOL type contant. | +| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | NULL constant, it can be used for any type.| + :::note -Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. +- TDengine determines whether it's a floating number based on if decimal point or scientific notation is used. So whether the value is determined as overflow depends on both the value and the determined type. For example, 9999999999999999999 is determined as overflow because it exceeds the upper limit of BIGINT type, while 9999999999999999999.0 is considered as a valid floating number because it is within the range of DOUBLE type. ::: diff --git a/docs-en/12-taos-sql/02-database.md b/docs-en/12-taos-sql/02-database.md index 98b75b30b3ebebb33ce1afe413554f218092bfeb..80581b2f1bc7ce9cd046c18873d3f22b6804d8cf 100644 --- a/docs-en/12-taos-sql/02-database.md +++ b/docs-en/12-taos-sql/02-database.md @@ -4,7 +4,7 @@ title: Database description: "create and drop database, show or change database parameters" --- -## Create Datable +## Create Database ``` CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; @@ -12,11 +12,11 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; :::info -1. KEEP specifies the number of days for which the data in the database to be created will be kept, the default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold. +1. KEEP specifies the number of days for which the data in the database will be retained. The default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold. 2. UPDATE specifies whether the data can be updated and how the data can be updated. - 1. UPDATE set to 0 means update operation is not allowed, the data with an existing timestamp will be dropped silently. - 2. UPDATE set to 1 means the whole row will be updated, the columns for which no value is specified will be set to NULL - 3. UPDATE set to 2 means updating a part of columns for a row is allowed, the columns for which no value is specified will be kept as no change + 1. UPDATE set to 0 means update operation is not allowed. The update for data with an existing timestamp will be discarded silently and the original record in the database will be preserved as is. + 2. UPDATE set to 1 means the whole row will be updated. The columns for which no value is specified will be set to NULL. + 3. UPDATE set to 2 means updating a subset of columns for a row is allowed. The columns for which no value is specified will be kept unchanged. 3. The maximum length of database name is 33 bytes. 4. The maximum length of a SQL statement is 65,480 bytes. 5. Below are the parameters that can be used when creating a database @@ -35,7 +35,7 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; - maxVgroupsPerDb: [Description](/reference/config/#maxvgroupsperdb) - comp: [Description](/reference/config/#comp) - precision: [Description](/reference/config/#precision) -6. Please note that all of the parameters mentioned in this section can be configured in configuration file `taosd.cfg` at server side and used by default, the default parameters can be overriden if they are specified in `create database` statement. +6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement. ::: @@ -52,7 +52,7 @@ USE db_name; ``` :::note -This way is not applicable when using a REST connection +This way is not applicable when using a REST connection. In a REST connection the database name must be specified before a table or stable name. For e.g. to query the stable "meters" in database "test" the query would be "SELECT count(*) from test.meters" ::: @@ -63,13 +63,13 @@ DROP DATABASE [IF EXISTS] db_name; ``` :::note -All data in the database will be deleted too. This command must be used with caution. +All data in the database will be deleted too. This command must be used with extreme caution. Please follow your organization's data integrity, data backup, data security or any other applicable SOPs before using this command. ::: ## Change Database Configuration -Some examples are shown below to demonstrate how to change the configuration of a database. Please note that some configuration parameters can be changed after the database is created, but some others can't, for details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/). +Some examples are shown below to demonstrate how to change the configuration of a database. Please note that some configuration parameters can be changed after the database is created, but some cannot. For details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/). ``` ALTER DATABASE db_name COMP 2; @@ -81,7 +81,7 @@ COMP parameter specifies whether the data is compressed and how the data is comp ALTER DATABASE db_name REPLICA 2; ``` -REPLICA parameter specifies the number of replications of the database. +REPLICA parameter specifies the number of replicas of the database. ``` ALTER DATABASE db_name KEEP 365; @@ -124,4 +124,4 @@ SHOW DATABASES; SHOW CREATE DATABASE db_name; ``` -This command is useful when migrating the data from one TDengine cluster to another one. This command can be used to get the CREATE statement, which can be used in another TDengine to create the exact same database. +This command is useful when migrating the data from one TDengine cluster to another. This command can be used to get the CREATE statement, which can be used in another TDengine instance to create the exact same database. diff --git a/docs-en/12-taos-sql/03-table.md b/docs-en/12-taos-sql/03-table.md index 678965893e8b386d9f2842c6e4e650c2d650e080..f065a8e2396583bb7a512446b513ed60056ad55e 100644 --- a/docs-en/12-taos-sql/03-table.md +++ b/docs-en/12-taos-sql/03-table.md @@ -12,10 +12,10 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam :::info -1. The first column of a table must be of TIMESTAMP type, and it will be set as the primary key automatically +1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key. 2. The maximum length of the table name is 192 bytes. -3. The maximum length of each row is 16k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted. -4. The name of the subtable can only consist of English characters, digits and underscore, and can't start with a digit. Table names are case insensitive. +3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted. +4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive. 5. The maximum length in bytes must be specified when using BINARY or NCHAR types. 6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. Only ASCII visible characters can be used with escape character. For example \`aBc\` and \`abc\` are different table names but `abc` and `aBc` are same table names because they are both converted to `abc` internally. @@ -44,7 +44,7 @@ The tags for which no value is specified will be set to NULL. CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...; ``` -This can be used to create a lot of tables in a single SQL statement to accelerate the speed of the creating tables. +This can be used to create a lot of tables in a single SQL statement while making table creation much faster. :::info @@ -111,7 +111,7 @@ If a table is created using a super table as template, the table definition can ALTER TABLE tb_name MODIFY COLUMN field_name data_type(length); ``` -The type of a column is variable length, like BINARY or NCHAR, this can be used to change (or increase) the length of the column. +If the type of a column is variable length, like BINARY or NCHAR, this command can be used to change the length of the column. :::note If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, and the change will be automatically applied to all the subtables created using this super table as template. For tables created in the normal way, the table definition can be changed directly on the table. diff --git a/docs-en/12-taos-sql/04-stable.md b/docs-en/12-taos-sql/04-stable.md index 7354484f754b513ac2b8828ac1e13bc550a29efd..b8a608792ab327a81129d29ddd0ff44d7af6e6c5 100644 --- a/docs-en/12-taos-sql/04-stable.md +++ b/docs-en/12-taos-sql/04-stable.md @@ -9,7 +9,7 @@ Keyword `STable`, abbreviated for super table, is supported since version 2.0.15 ::: -## Crate STable +## Create STable ``` CREATE STable [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]); @@ -19,7 +19,7 @@ The SQL statement of creating a STable is similar to that of creating a table, b :::info -1. The tag types specified in TAGS should NOT be timestamp. Since 2.1.3.0 timestamp type can be used in TAGS column, but its value must be fixed and arithmetic operation can't be applied on it. +1. A tag can be of type timestamp, since version 2.1.3.0, but its value must be fixed and arithmetic operations cannot be performed on it. Prior to version 2.1.3.0, tag types specified in TAGS could not be of type timestamp. 2. The tag names specified in TAGS should NOT be the same as other columns. 3. The tag names specified in TAGS should NOT be the same as any reserved keywords.(Please refer to [keywords](/taos-sql/keywords/) 4. The maximum number of tags specified in TAGS is 128, there must be at least one tag, and the total length of all tag columns should NOT exceed 16KB. @@ -76,7 +76,7 @@ ALTER STable stb_name DROP COLUMN field_name; ALTER STable stb_name MODIFY COLUMN field_name data_type(length); ``` -This command can be used to change (or increase, more specifically) the length of a column of variable length types, like BINARY or NCHAR. +This command can be used to change (or more specifically, increase) the length of a column of variable length types, like BINARY or NCHAR. ## Change Tags of A STable @@ -94,7 +94,7 @@ This command is used to add a new tag for a STable and specify the tag type. ALTER STable stb_name DROP TAG tag_name; ``` -The tag will be removed automatically from all the subtables created using the super table as template once a tag is removed from a super table. +The tag will be removed automatically from all the subtables, created using the super table as template, once a tag is removed from a super table. ### Change A Tag @@ -102,7 +102,7 @@ The tag will be removed automatically from all the subtables created using the s ALTER STable stb_name CHANGE TAG old_tag_name new_tag_name; ``` -The tag name will be changed automatically for all the subtables created using the super table as template once a tag name is changed for a super table. +The tag name will be changed automatically for all the subtables, created using the super table as template, once a tag name is changed for a super table. ### Change Tag Length @@ -110,7 +110,7 @@ The tag name will be changed automatically for all the subtables created using t ALTER STable stb_name MODIFY TAG tag_name data_type(length); ``` -This command can be used to change (or increase, more specifically) the length of a tag of variable length types, like BINARY or NCHAR. +This command can be used to change (or more specifically, increase) the length of a tag of variable length types, like BINARY or NCHAR. :::note Changing tag values can be applied to only subtables. All other tag operations, like add tag, remove tag, however, can be applied to only STable. If a new tag is added for a STable, the tag will be added with NULL value for all its subtables. diff --git a/docs-en/12-taos-sql/06-select.md b/docs-en/12-taos-sql/06-select.md index d9c39845f8576bb309d159b1c8cb6728a22c9c5d..8a017cf92e40aa4a854dcd531b7df291a9243515 100644 --- a/docs-en/12-taos-sql/06-select.md +++ b/docs-en/12-taos-sql/06-select.md @@ -21,7 +21,7 @@ SELECT select_expr [, select_expr ...] ## Wildcard -Wilcard \* can be used to specify all columns. The result includes only data columns for normal tables. +Wildcard \* can be used to specify all columns. The result includes only data columns for normal tables. ``` taos> SELECT * FROM d1001; @@ -51,14 +51,14 @@ taos> SELECT * FROM meters; Query OK, 9 row(s) in set (0.002022s) ``` -Wildcard can be used with table name as prefix, both below SQL statements have same effects and return all columns. +Wildcard can be used with table name as prefix. Both SQL statements below have the same effect and return all columns. ```SQL SELECT * FROM d1001; SELECT d1001.* FROM d1001; ``` -In JOIN query, however, with or without table name prefix will return different results. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table. +In a JOIN query, however, the results are different with or without a table name prefix. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table. ``` taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts; @@ -76,7 +76,7 @@ taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts; Query OK, 1 row(s) in set (0.020443s) ``` -Wilcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row. +Wildcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row. ``` taos> SELECT COUNT(*) FROM d1001; @@ -96,7 +96,7 @@ Query OK, 1 row(s) in set (0.000849s) ## Tags -Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please note that, however, wildcard \* doesn't represent any tag column, that means tag columns must be specified explicitly like the example below. +Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please note however, that, wildcard \* cannot be used to represent any tag column. This means that tag columns must be specified explicitly like the example below. ``` taos> SELECT location, groupid, current FROM d1001 LIMIT 2; @@ -109,7 +109,7 @@ Query OK, 2 row(s) in set (0.003112s) ## Get distinct values -`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table, it can also be used to get all the unique values of data columns from a table or subtable. +`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table. It can also be used to get all the unique values of data columns from a table or subtable. ```sql SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name; @@ -118,15 +118,15 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name; :::info -1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output. -2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision nature of floating numbers. +1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output. +2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers. 3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in the same SQL statement. ::: ## Columns Names of Result Set -When using `SELECT`, the column names in the result set will be same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example +When using `SELECT`, the column names in the result set will be the same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example ``` taos> SELECT ts, ts AS primary_key_ts FROM d1001; @@ -161,7 +161,7 @@ SELECT * FROM d1001; ## Special Query -Some special query functionalities can be performed without `FORM` sub-clause. For example, below statement can be used to get the current database in use. +Some special query functions can be invoked without `FROM` sub-clause. For example, the statement below can be used to get the current database in use. ``` taos> SELECT DATABASE(); @@ -181,7 +181,7 @@ taos> SELECT DATABASE(); Query OK, 1 row(s) in set (0.000184s) ``` -Below statement can be used to get the version of client or server. +The statement below can be used to get the version of client or server. ``` taos> SELECT CLIENT_VERSION(); @@ -197,7 +197,7 @@ taos> SELECT SERVER_VERSION(); Query OK, 1 row(s) in set (0.000077s) ``` -Below statement is used to check the server status. One integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing the connection from a connection pool when using the wrong heartbeat checking SQL statement. +The statement below is used to check the server status. An integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing the connection from a connection pool when using the wrong heartbeat checking SQL statement. ``` taos> SELECT SERVER_STATUS(); @@ -284,7 +284,7 @@ taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; Query OK, 1 row(s) in set (0.001091s) ``` -- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of number types, columns can be renamed in the result set. +- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of numerical types, columns can be renamed in the result set. - Arithmetic operation on columns can't be used in where clause. For example, `where a*2>6;` is not allowed but `where a>6/2;` can be used instead for the same purpose. - Arithmetic operation on columns can't be used as the objectives of select statement. For example, `select min(2*a) from t;` is not allowed but `select 2*min(a) from t;` can be used instead. - Logical operation can be used in `WHERE` clause to filter numeric values, wildcard can be used to filter string values. @@ -318,13 +318,13 @@ Logical operations in below table can be used in the `where` clause to filter th - Operator `like` is used together with wildcards to match strings - '%' matches 0 or any number of characters, '\_' matches any single ASCII character. - `\_` is used to match the \_ in the string. - - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. Too long wildcard string may slowdown the execution performance of `LIKE` operator. + - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. A very long wildcard string may slowdown the execution performance of `LIKE` operator. - `AND` keyword can be used to filter multiple columns simultaneously. AND/OR operation can be performed on single or multiple columns from version 2.3.0.0. However, before 2.3.0.0 `OR` can't be used on multiple columns. - For timestamp column, only one condition can be used; for other columns or tags, `OR` keyword can be used to combine multiple logical operators. For example, `((value > 20 AND value < 30) OR (value < 12))`. - From version 2.3.0.0, multiple conditions can be used on timestamp column, but the result set can only contain single time range. - From version 2.0.17.0, operator `BETWEEN AND` can be used in where clause, for example `WHERE col2 BETWEEN 1.5 AND 3.25` means the filter condition is equal to "1.5 ≤ col2 ≤ 3.25". -- From version 2.1.4.0, operator `IN` can be used in the where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDiego')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating precision, only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`. -- From version 2.3.0.0, regular expression is supported in the where clause with keyword `match` or `nmatch`, the regular expression is case insensitive. +- From version 2.1.4.0, operator `IN` can be used in the where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDiego')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating point precision errors. Only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`. +- From version 2.3.0.0, regular expression is supported in the where clause with keyword `match` or `nmatch`. The regular expression is case insensitive. ## Regular Expression @@ -364,7 +364,7 @@ FROM temp_STable t1, temp_STable t2 WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; ``` -Similary, join operation can be performed on the result set of multiple sub queries. +Similarly, join operations can be performed on the result set of multiple sub queries. :::note Restrictions on join operation: @@ -380,7 +380,7 @@ Restrictions on join operation: ## Nested Query -Nested query is also called sub query, that means in a single SQL statement the result of inner query can be used as the data source of the outer query. +Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query. From 2.2.0.0, unassociated sub query can be used in the `FROM` clause. Unassociated means the sub query doesn't use the parameters in the parent query. More specifically, in the `tb_name_list` of `SELECT` statement, an independent SELECT statement can be used. So a complete nested query looks like: @@ -390,14 +390,14 @@ SELECT ... FROM (SELECT ... FROM ...) ...; :::info -- Only one layer of nesting is allowed, that means no sub query is allowed in a sub query -- The result set returned by the inner query will be used as a "virtual table" by the outer query, the "virtual table" can be renamed using `AS` keyword for easy reference in the outer query. +- Only one layer of nesting is allowed, that means no sub query is allowed within a sub query +- The result set returned by the inner query will be used as a "virtual table" by the outer query. The "virtual table" can be renamed using `AS` keyword for easy reference in the outer query. - Sub query is not allowed in continuous query. - JOIN operation is allowed between tables/STables inside both inner and outer queries. Join operation can be performed on the result set of the inner query. - UNION operation is not allowed in either inner query or outer query. -- The functionalities that can be used in the inner query is same as non-nested query. - - `ORDER BY` inside the inner query doesn't make any sense but will slow down the query performance significantly, so please avoid such usage. -- Compared to the non-nested query, the functionalities that can be used in the outer query have such restrictions as: +- The functions that can be used in the inner query are the same as those that can be used in a non-nested query. + - `ORDER BY` inside the inner query is unnecessary and will slow down the query performance significantly. It is best to avoid the use of `ORDER BY` inside the inner query. +- Compared to the non-nested query, the functionality that can be used in the outer query has the following restrictions: - Functions - If the result set returned by the inner query doesn't contain timestamp column, then functions relying on timestamp can't be used in the outer query, like `TOP`, `BOTTOM`, `FIRST`, `LAST`, `DIFF`. - Functions that need to scan the data twice can't be used in the outer query, like `STDDEV`, `PERCENTILE`. @@ -442,8 +442,8 @@ The sum of col1 and col2 for rows later than 2018-06-01 08:00:00.000 and whose c SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5; ``` -The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutpu.csv` with below SQL statement: +The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutput.csv` with below SQL statement: ```SQL -SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv; +SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutput.csv; ``` diff --git a/docs-en/12-taos-sql/07-function.md b/docs-en/12-taos-sql/07-function.md index 0d6e7f25649872f514dce21bcba38a3af4ba7a5d..129b7eb0c35b4409e8003855fb4facacb8e0c830 100644 --- a/docs-en/12-taos-sql/07-function.md +++ b/docs-en/12-taos-sql/07-function.md @@ -1,1868 +1,1253 @@ --- title: Functions +toc_max_heading_level: 4 --- -## Aggregate Functions - -Aggregate queries are supported in TDengine by the following aggregate functions and selection functions. - -### COUNT +## Single-Row Functions -``` -SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]; -``` +Single-Row functions return a result row for each row in the query result. -**Description**: Get the number of rows or the number of non-null values in a table or a super table. +### Numeric Functions -**Return value type**: Long integer INT64 +#### ABS -**Applicable column types**: All +```sql +SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause] +``` -**Applicable table types**: table, super table, sub table +**Description**: The absolute of a specific column. -**More explanation**: +**Return value type**: UBIGINT if the input value is integer; DOUBLE if the input value is FLOAT/DOUBLE. -- Wildcard (\*) can be used to represent all columns, it's used to get the number of all rows -- The number of non-NULL values will be returned if this function is used on a specific column +**Applicable data types**: Numeric types. -**Examples**: +**Applicable table types**: table, STable. -``` -taos> SELECT COUNT(*), COUNT(voltage) FROM meters; - count(*) | count(voltage) | -================================================ - 9 | 9 | -Query OK, 1 row(s) in set (0.004475s) +**Applicable nested query**: Inner query and Outer query. -taos> SELECT COUNT(*), COUNT(voltage) FROM d1001; - count(*) | count(voltage) | -================================================ - 3 | 3 | -Query OK, 1 row(s) in set (0.001075s) -``` +**More explanations**: +- Can't be used with aggregate functions. -### AVG +#### ACOS -``` -SELECT AVG(field_name) FROM tb_name [WHERE clause]; +```sql +SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Get the average value of a column in a table or STable +**Description**: The anti-cosine of a specific column -**Return value type**: Double precision floating number +**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL -**Applicable column types**: Data types except for timestamp, binary, nchar and bool +**Applicable data types**: Numeric types. **Applicable table types**: table, STable -**Examples**: - -``` -taos> SELECT AVG(current), AVG(voltage), AVG(phase) FROM meters; - avg(current) | avg(voltage) | avg(phase) | -==================================================================================== - 11.466666751 | 220.444444444 | 0.293333333 | -Query OK, 1 row(s) in set (0.004135s) +**Applicable nested query**: Inner query and Outer query -taos> SELECT AVG(current), AVG(voltage), AVG(phase) FROM d1001; - avg(current) | avg(voltage) | avg(phase) | -==================================================================================== - 11.733333588 | 219.333333333 | 0.316666673 | -Query OK, 1 row(s) in set (0.000943s) -``` +**More explanations**: +- Can't be used with aggregate functions -### TWA +#### ASIN -``` -SELECT TWA(field_name) FROM tb_name WHERE clause; +```sql +SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Time weighted average on a specific column within a time range +**Description**: The anti-sine of a specific column -**Return value type**: Double precision floating number +**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL -**Applicable column types**: Data types except for timestamp, binary, nchar and bool +**Applicable data types**: Numeric types. **Applicable table types**: table, STable -**More explanations**: +**Applicable nested query**: Inner query and Outer query -- From version 2.1.3.0, function TWA can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. +**More explanations**: +- Can't be used with aggregate functions -### IRATE +#### ATAN -``` -SELECT IRATE(field_name) FROM tb_name WHERE clause; +```sql +SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values. +**Description**: anti-tangent of a specific column -**Return value type**: Double precision floating number +**Description**: The anti-cosine of a specific column -**Applicable column types**: Data types except for timestamp, binary, nchar and bool +**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL + +**Applicable data types**: Numeric types. **Applicable table types**: table, STable -**More explanations**: +**Applicable nested query**: Inner query and Outer query -- From version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. +**More explanations**: +- Can't be used with aggregate functions -### SUM +#### CEIL ``` -SELECT SUM(field_name) FROM tb_name [WHERE clause]; +SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The sum of a specific column in a table or STable +**Description**: The rounded up value of a specific column -**Return value type**: Double precision floating number or long integer +**Return value type**: Same as the column being used -**Applicable column types**: Data types except for timestamp, binary, nchar and bool +**Applicable data types**: Numeric types. **Applicable table types**: table, STable -**Examples**: - -``` -taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM meters; - sum(current) | sum(voltage) | sum(phase) | -================================================================================ - 103.200000763 | 1984 | 2.640000001 | -Query OK, 1 row(s) in set (0.001702s) +**Applicable nested query**: Inner query and outer query -taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM d1001; - sum(current) | sum(voltage) | sum(phase) | -================================================================================ - 35.200000763 | 658 | 0.950000018 | -Query OK, 1 row(s) in set (0.000980s) -``` +**More explanations**: +- Arithmetic operation can be performed on the result of `ceil` function +- Can't be used with aggregate functions -### STDDEV +#### COS -``` -SELECT STDDEV(field_name) FROM tb_name [WHERE clause]; +```sql +SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: Standard deviation of a specific column in a table or STable +**Description**: The cosine of a specific column -**Return value type**: Double precision floating number +**Description**: The anti-cosine of a specific column -**Applicable column types**: Data types except for timestamp, binary, nchar and bool +**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL -**Applicable table types**: table, STable (starting from version 2.0.15.1) +**Applicable data types**: Numeric types. -**Examples**: +**Applicable table types**: table, STable -``` -taos> SELECT STDDEV(current) FROM d1001; - stddev(current) | -============================ - 1.020892909 | -Query OK, 1 row(s) in set (0.000915s) -``` +**Applicable nested query**: Inner query and Outer query -### LEASTSQUARES +**More explanations**: +- Can't be used with aggregate functions + +#### FLOOR ``` -SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]; +SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value. - -**Return value type**: A string in the format of "(slope, intercept)" - -**Applicable column types**: Data types except for timestamp, binary, nchar and bool - -**Applicable table types**: table only - -**Examples**: +**Description**: The rounded down value of a specific column -``` -taos> SELECT LEASTSQUARES(current, 1, 1) FROM d1001; - leastsquares(current, 1, 1) | -===================================================== -{slop:1.000000, intercept:9.733334} | -Query OK, 1 row(s) in set (0.000921s) -``` +**More explanations**: The restrictions are same as those of the `CEIL` function. -### MODE +#### LOG -``` -SELECT MODE(field_name) FROM tb_name [WHERE clause]; +```sql +SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence. It can't be used on timestamp column or tags. +**Description**: The log of a specific with `base` as the radix -**Return value type**:Same as the data type of the column being operated +**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL -**Applicable column types**:Data types except for timestamp +**Applicable data types**: Numeric types. -**More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned. +**Applicable table types**: table, STable -**Applicable version**:From version 2.6.0.0 +**Applicable nested query**: Inner query and Outer query -**Examples**: +**More explanations**: +- Can't be used with aggregate functions -``` -taos> select voltage from d002; - voltage | -======================== - 1 | - 1 | - 2 | - 19 | -Query OK, 4 row(s) in set (0.003545s) +#### POW -taos> select mode(voltage) from d002; - mode(voltage) | -======================== - 1 | -Query OK, 1 row(s) in set (0.019393s) +```sql +SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause] ``` -### HYPERLOGLOG - -``` -SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` +**Description**: The power of a specific column with `power` as the index -**Description**:The cardinal number of a specific column is returned by using hyperloglog algorithm. +**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL -**Return value type**:Integer +**Applicable data types**: Numeric types. -**Applicable column types**:Any data type +**Applicable table types**: table, STable -**More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case. +**Applicable nested query**: Inner query and Outer query -**Applicable versions**:From version 2.6.0.0 +**More explanations**: +- Can't be used with aggregate functions -**Examples**: +#### ROUND ``` -taos> select dbig from shll; - dbig | -======================== - 1 | - 1 | - 1 | - NULL | - 2 | - 19 | - NULL | - 9 | -Query OK, 8 row(s) in set (0.003755s) - -taos> select hyperloglog(dbig) from shll; - hyperloglog(dbig)| -======================== - 4 | -Query OK, 1 row(s) in set (0.008388s) +SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -## Selection Functions +**Description**: The rounded value of a specific column. -When any select function is used, timestamp column or tag columns including `tbname` can be specified to show that the selected value are from which rows. +**More explanations**: The restrictions are same as `CEIL` function. -### MIN +#### SIN -``` -SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; +```sql +SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The minimum value of a specific column in a table or STable +**Description**: The sine of a specific column -**Return value type**: Same as the data type of the column being operated +**Description**: The anti-cosine of a specific column -**Applicable column types**: Data types except for timestamp, binary, nchar and bool +**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL -**Applicable table types**: table, STable +**Applicable data types**: Numeric types. -**Examples**: +**Applicable table types**: table, STable -``` -taos> SELECT MIN(current), MIN(voltage) FROM meters; - min(current) | min(voltage) | -====================================== - 10.20000 | 218 | -Query OK, 1 row(s) in set (0.001765s) +**Applicable nested query**: Inner query and Outer query -taos> SELECT MIN(current), MIN(voltage) FROM d1001; - min(current) | min(voltage) | -====================================== - 10.30000 | 218 | -Query OK, 1 row(s) in set (0.000950s) -``` +**More explanations**: +- Can't be used with aggregate functions -### MAX +#### SQRT -``` -SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; +```sql +SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The maximum value of a specific column of a table or STable +**Description**: The square root of a specific column -**Return value type**: Same as the data type of the column being operated +**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL -**Applicable column types**: Data types except for timestamp, binary, nchar and bool +**Applicable data types**: Numeric types. **Applicable table types**: table, STable -**Examples**: - -``` -taos> SELECT MAX(current), MAX(voltage) FROM meters; - max(current) | max(voltage) | -====================================== - 13.40000 | 223 | -Query OK, 1 row(s) in set (0.001123s) +**Applicable nested query**: Inner query and Outer query -taos> SELECT MAX(current), MAX(voltage) FROM d1001; - max(current) | max(voltage) | -====================================== - 12.60000 | 221 | -Query OK, 1 row(s) in set (0.000987s) -``` +**More explanations**: +- Can't be used with aggregate functions -### FIRST +#### TAN -``` -SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; +```sql +SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The first non-null value of a specific column in a table or STable +**Description**: The tangent of a specific column + +**Description**: The anti-cosine of a specific column -**Return value type**: Same as the column being operated +**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL -**Applicable column types**: Any data type +**Applicable data types**: Numeric types. **Applicable table types**: table, STable -**More explanations**: - -- FIRST(\*) can be used to get the first non-null value of all columns -- NULL will be returned if all the values of the specified column are all NULL -- No result will NOT be returned if all the columns in the result set are all NULL +**Applicable nested query**: Inner query and Outer query -**Examples**: +**More explanations**: +- Can't be used with aggregate functions -``` -taos> SELECT FIRST(*) FROM meters; - first(ts) | first(current) | first(voltage) | first(phase) | -========================================================================================= -2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | -Query OK, 1 row(s) in set (0.004767s) +### String Functions -taos> SELECT FIRST(current) FROM d1002; - first(current) | -======================= - 10.20000 | -Query OK, 1 row(s) in set (0.001023s) -``` +String functiosn take strings as input and output numbers or strings. -### LAST +#### CHAR_LENGTH ``` -SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; +SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The last non-NULL value of a specific column in a table or STable +**Description**: The length in number of characters of a string -**Return value type**: Same as the column being operated +**Return value type**: Integer -**Applicable column types**: Any data type +**Applicable data types**: VARCHAR or NCHAR **Applicable table types**: table, STable -**More explanations**: - -- LAST(\*) can be used to get the last non-NULL value of all columns -- If the values of a column in the result set are all NULL, NULL is returned for that column; if all columns in the result are all NULL, no result will be returned. -- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times. - -**Examples**: +**Applicable nested query**: Inner query and Outer query -``` -taos> SELECT LAST(*) FROM meters; - last(ts) | last(current) | last(voltage) | last(phase) | -======================================================================================== -2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | -Query OK, 1 row(s) in set (0.001452s) +**More explanations** -taos> SELECT LAST(current) FROM d1002; - last(current) | -======================= - 10.30000 | -Query OK, 1 row(s) in set (0.000843s) -``` +- If the input value is NULL, the output is NULL too -### TOP +#### CONCAT -``` -SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; +```sql +SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. +**Description**: The concatenation result of two or more strings, the number of strings to be concatenated is at least 2 and at most 8 -**Return value type**: Same as the column being operated +**Return value type**: If all input strings are VARCHAR type, the result is VARCHAR type too. If any one of input strings is NCHAR type, then the result is NCHAR. -**Applicable column types**: Data types except for timestamp, binary, nchar and bool +**Applicable data types**: VARCHAR, NCHAR. At least 2 input strings are requird, and at most 8 input strings are allowed. **Applicable table types**: table, STable -**More explanations**: - -- _k_ must be in range [1,100] -- The timestamp associated with the selected values are returned too -- Can't be used with `FILL` - -**Examples**: - -``` -taos> SELECT TOP(current, 3) FROM meters; - ts | top(current, 3) | -================================================= -2018-10-03 14:38:15.000 | 12.60000 | -2018-10-03 14:38:16.600 | 13.40000 | -2018-10-03 14:38:16.800 | 12.30000 | -Query OK, 3 row(s) in set (0.001548s) - -taos> SELECT TOP(current, 2) FROM d1001; - ts | top(current, 2) | -================================================= -2018-10-03 14:38:15.000 | 12.60000 | -2018-10-03 14:38:16.800 | 12.30000 | -Query OK, 2 row(s) in set (0.000810s) -``` +**Applicable nested query**: Inner query and Outer query -### BOTTOM +#### CONCAT_WS ``` -SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; +SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. +**Description**: The concatenation result of two or more strings with separator, the number of strings to be concatenated is at least 3 and at most 9 -**Return value type**: Same as the column being operated +**Return value type**: If all input strings are VARCHAR type, the result is VARCHAR type too. If any one of input strings is NCHAR type, then the result is NCHAR. -**Applicable column types**: Data types except for timestamp, binary, nchar and bool +**Applicable data types**: VARCHAR, NCHAR. At least 3 input strings are requird, and at most 9 input strings are allowed. **Applicable table types**: table, STable +**Applicable nested query**: Inner query and Outer query + **More explanations**: -- _k_ must be in range [1,100] -- The timestamp associated with the selected values are returned too -- Can't be used with `FILL` +- If the value of `separator` is NULL, the output is NULL. If the value of `separator` is not NULL but other input are all NULL, the output is empty string. -**Examples**: +#### LENGTH ``` -taos> SELECT BOTTOM(voltage, 2) FROM meters; - ts | bottom(voltage, 2) | -=============================================== -2018-10-03 14:38:15.000 | 218 | -2018-10-03 14:38:16.650 | 218 | -Query OK, 2 row(s) in set (0.001332s) - -taos> SELECT BOTTOM(current, 2) FROM d1001; - ts | bottom(current, 2) | -================================================= -2018-10-03 14:38:05.000 | 10.30000 | -2018-10-03 14:38:16.800 | 12.30000 | -Query OK, 2 row(s) in set (0.000793s) +SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -### PERCENTILE - -``` -SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause]; -``` +**Description**: The length in bytes of a string -**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned. +**Return value type**: Integer -**Return value type**: Double precision floating point +**Applicable data types**: VARCHAR or NCHAR +**Applicable table types**: table, STable -**Applicable column types**: Data types except for timestamp, binary, nchar and bool +**Applicable nested query**: Inner query and Outer query -**Applicable table types**: table +**More explanations** -**More explanations**: _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX. +- If the input value is NULL, the output is NULL too -**Examples**: +#### LOWER ``` -taos> SELECT PERCENTILE(current, 20) FROM d1001; -percentile(current, 20) | -============================ - 11.100000191 | -Query OK, 1 row(s) in set (0.000787s) +SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -### APERCENTILE +**Description**: Convert the input string to lower case -``` -SELECT APERCENTILE(field_name, P[, algo_type]) -FROM { tb_name | stb_name } [WHERE clause] -``` - -**Description**: Similar to `PERCENTILE`, but a simulated result is returned - -**Return value type**: Double precision floating point +**Return value type**: Same as input -**Applicable column types**: Data types except for timestamp, binary, nchar and bool +**Applicable data types**: VARCHAR or NCHAR **Applicable table types**: table, STable -**More explanations** - -- _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX. -- **algo_type** can only be input as `default` or `t-digest`, if it's not specified `default` will be used, i.e. `apercentile(column_name, 50)` is same as `apercentile(column_name, 50, "default")`. -- When `t-digest` is used, `t-digest` sampling is used to calculate. It can be used from version 2.2.0.0. - -**Nested query**: It can be used in both the outer query and inner query in a nested query. - -``` -taos> SELECT APERCENTILE(current, 20) FROM d1001; -apercentile(current, 20) | -============================ - 10.300000191 | -Query OK, 1 row(s) in set (0.000645s) +**Applicable nested query**: Inner query and Outer query -taos> select apercentile (count, 80, 'default') from stb1; - apercentile (c0, 80, 'default') | -================================== - 601920857.210056424 | -Query OK, 1 row(s) in set (0.012363s) +**More explanations** -taos> select apercentile (count, 80, 't-digest') from stb1; - apercentile (c0, 80, 't-digest') | -=================================== - 605869120.966666579 | -Query OK, 1 row(s) in set (0.011639s) -``` +- If the input value is NULL, the output is NULL too -### LAST_ROW +#### LTRIM ``` -SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; +SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The last row of a table or STable +**Description**: Remove the left leading blanks of a string -**Return value type**: Same as the column being operated +**Return value type**: Same as input -**Applicable column types**: Any data type +**Applicable data types**: VARCHAR or NCHAR **Applicable table types**: table, STable -**More explanations**: - -- When it's used against a STable, multiple rows with the same and largest timestamp may exist, in this case one of them is returned randomly and it's not guaranteed that the result is same if the query is run multiple times. -- Can't be used with `INTERVAL`. - -**Examples**: - -``` - taos> SELECT LAST_ROW(current) FROM meters; - last_row(current) | - ======================= - 12.30000 | - Query OK, 1 row(s) in set (0.001238s) - - taos> SELECT LAST_ROW(current) FROM d1002; - last_row(current) | - ======================= - 10.30000 | - Query OK, 1 row(s) in set (0.001042s) -``` - -### INTERP [From version 2.3.1] - -``` -SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; -``` - -**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. - -**Return value type**: Same as the column being operated - -**Applicable column types**: Numeric data types - -**Applicable table types**: table, STable, nested query +**Applicable nested query**: Inner query and Outer query **More explanations** -- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter. -- The input data of `INTERP` is the value of the specified column, `where` can be used to filter the original data. If no `where` condition is specified then all original data is the input. -- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. If `RANGE` is not specified, then the timestamp of the first row that matches the filter condition is treated as timestamp1, the timestamp of the last row that matches the filter condition is treated as timestamp2. -- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. If `EVERY` parameter is not used, the time windows will be considered as no ending timestamp, i.e. there is only one time window from timestamp1. -- Interpolation is performed based on `FILL` parameter. No interpolation is performed if `FILL` is not used, that means either the original data that matches is returned or nothing is returned. -- `INTERP` can only be used to interpolate in single timeline. So it must be used with `group by tbname` when it's used on a STable. It can't be used with `GROUP BY` when it's used in the inner query of a nested query. -- The result of `INTERP` is not influenced by `ORDER BY TIMESTAMP`, which impacts the output order only.. - -**Examples**: Based on the `meters` schema used throughout the documents - -- Single point linear interpolation between "2017-07-14 18:40:00" and "2017-07-14 18:40:00: - -``` - taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:40:00','2017-7-14 18:40:00') FILL(LINEAR); -``` - -- Get original data every 5 seconds, no interpolation, between "2017-07-14 18:00:00" and "2017-07-14 19:00:00: - -``` - taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s); -``` - -- Linear interpolation every 5 seconds between "2017-07-14 18:00:00" and "2017-07-14 19:00:00: - -``` - taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR); -``` - -- Backward interpolation every 5 seconds - -``` - taos> SELECT INTERP(current) FROM t1 EVERY(5s) FILL(NEXT); -``` - -- Linear interpolation every 5 seconds between "2017-07-14 17:00:00" and "2017-07-14 20:00:00" - -``` - taos> SELECT INTERP(current) FROM t1 where ts >= '2017-07-14 17:00:00' and ts <= '2017-07-14 20:00:00' RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR); -``` +- If the input value is NULL, the output is NULL too -### INTERP [Prior to version 2.3.1] +#### RTRIM ``` -SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; +SELECT RTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The value of a specific column that matches the specified time slice +**Description**: Remove the right tailing blanks of a string -**Return value type**: Same as the column being operated +**Return value type**: Same as input -**Applicable column types**: Numeric data type +**Applicable data types**: VARCHAR or NCHAR **Applicable table types**: table, STable -**More explanations**: - -- It can be used from version 2.0.15.0 -- Time slice must be specified. If there is no data matching the specified time slice, interpolation is performed based on `FILL` parameter. Conditions such as tags or `tbname` can be used `Where` clause can be used to filter data. -- The timestamp specified must be within the time range of the data rows of the table or STable. If it is beyond the valid time range, nothing is returned even with `FILL` parameter. -- `INTERP` can be used to query only single time point once. `INTERP` can be used with `EVERY` to get the interpolation value every time interval. -- **Examples**: - -``` - taos> SELECT INTERP(*) FROM meters WHERE ts='2017-7-14 18:40:00.004'; - interp(ts) | interp(current) | interp(voltage) | interp(phase) | - ========================================================================================== - 2017-07-14 18:40:00.004 | 9.84020 | 216 | 0.32222 | - Query OK, 1 row(s) in set (0.002652s) -``` - -If there is no data corresponding to the specified timestamp, an interpolation value is returned if interpolation policy is specified by `FILL` parameter; or nothing is returned. - -``` - taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005'; - Query OK, 0 row(s) in set (0.004022s) - - taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005' FILL(PREV); - interp(ts) | interp(current) | interp(voltage) | interp(phase) | - ========================================================================================== - 2017-07-14 18:40:00.005 | 9.88150 | 217 | 0.32500 | - Query OK, 1 row(s) in set (0.003056s) -``` - -Interpolation is performed every 5 milliseconds between `['2017-7-14 18:40:00', '2017-7-14 18:40:00.014']` - -``` - taos> SELECT INTERP(current) FROM d636 WHERE ts>='2017-7-14 18:40:00' AND ts<='2017-7-14 18:40:00.014' EVERY(5a); - ts | interp(current) | - ================================================= - 2017-07-14 18:40:00.000 | 10.04179 | - 2017-07-14 18:40:00.010 | 10.16123 | - Query OK, 2 row(s) in set (0.003487s) -``` - -### TAIL - -``` -SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; -``` - -**Description**: The next _k_ rows are returned after skipping the last `offset_val` rows, NULL values are not ignored. `offset_val` is optional parameter. When it's not specified, the last _k_ rows are returned. When `offset_val` is used, the effect is same as `order by ts desc LIMIT k OFFSET offset_val`. - -**Parameter value range**: k: [1,100] offset_val: [0,100] - -**Return value type**: Same as the column being operated +**Applicable nested query**: Inner query and Outer query -**Applicable column types**: Any data type except form timestamp, i.e. the primary key +**More explanations** -**Applicable versions**: From version 2.6.0.0 +- If the input value is NULL, the output is NULL too -**Examples**: +#### SUBSTR ``` -taos> select ts,dbig from tail2; - ts | dbig | -================================================== -2021-10-15 00:31:33.000 | 1 | -2021-10-17 00:31:31.000 | NULL | -2021-12-24 00:31:34.000 | 2 | -2022-01-01 08:00:05.000 | 19 | -2022-01-01 08:00:06.000 | NULL | -2022-01-01 08:00:07.000 | 9 | -Query OK, 6 row(s) in set (0.001952s) - -taos> select tail(dbig,2,2) from tail2; -ts | tail(dbig,2,2) | -================================================== -2021-12-24 00:31:34.000 | 2 | -2022-01-01 08:00:05.000 | 19 | -Query OK, 2 row(s) in set (0.002307s) +SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause] ``` -### UNIQUE - -``` -SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause]; -``` +**Description**: The sub-string starting from `pos` with length of `len` from the original string `str` -**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. +**Return value type**: Same as input -**Return value type**: Same as the column or tag being operated +**Applicable data types**: VARCHAR or NCHAR -**Applicable column types**: Any data types except for timestamp +**Applicable table types**: table, STable -**Applicable versions**: From version 2.6.0.0 +**Applicable nested query**: Inner query and Outer query **More explanations**: -- It can be used against table or STable, but can't be used together with time window, like `interval`, `state_window` or `session_window` . -- Considering the number of result sets is unpredictable, it's suggested to limit the distinct values under 100,000 to control the memory usage, otherwise error will be returned. +- If the input is NULL, the output is NULL +- Parameter `pos` can be an positive or negative integer; If it's positive, the starting position will be counted from the beginning of the string; if it's negative, the starting position will be counted from the end of the string. +- If `len` is not specified, it means from `pos` to the end. -**Examples**: +#### UPPER ``` -taos> select ts,voltage from unique1; - ts | voltage | -================================================== -2021-10-17 00:31:31.000 | 1 | -2022-01-24 00:31:31.000 | 1 | -2021-10-17 00:31:31.000 | 1 | -2021-12-24 00:31:31.000 | 2 | -2022-01-01 08:00:01.000 | 19 | -2021-10-17 00:31:31.000 | NULL | -2022-01-01 08:00:02.000 | NULL | -2022-01-01 08:00:03.000 | 9 | -Query OK, 8 row(s) in set (0.003018s) - -taos> select unique(voltage) from unique1; -ts | unique(voltage) | -================================================== -2021-10-17 00:31:31.000 | 1 | -2021-10-17 00:31:31.000 | NULL | -2021-12-24 00:31:31.000 | 2 | -2022-01-01 08:00:01.000 | 19 | -2022-01-01 08:00:03.000 | 9 | -Query OK, 5 row(s) in set (0.108458s) +SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause] ``` -## Scalar functions +**Description**: Convert the input string to upper case -### DIFF +**Return value type**: Same as input -```sql -SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause]; -``` +**Applicable data types**: VARCHAR or NCHAR -**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored. +**Applicable table types**: table, STable -**Return value type**: Same as the column being operated +**Applicable nested query**: Inner query and Outer query -**Applicable column types**: Data types except for timestamp, binary, nchar and bool +**More explanations** -**Applicable table types**: table, STable +- If the input value is NULL, the output is NULL too -**More explanations**: +### Conversion Functions -- The number of result rows is the number of rows subtracted by one, no output for the first row -- From version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname` -- From version 2.6.0, `ignore_negative` parameter is supported +This kind of functions convert from one data type to another one. -**Examples**: +#### CAST ```sql -taos> SELECT DIFF(current) FROM d1001; - ts | diff(current) | -================================================= -2018-10-03 14:38:15.000 | 2.30000 | -2018-10-03 14:38:16.800 | -0.30000 | -Query OK, 2 row(s) in set (0.001162s) -``` - -### DERIVATIVE - -``` -SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause]; +SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored. +**Description**: It's used for type casting. The input parameter `expression` can be data columns, constants, scalar functions or arithmetic between them. -**Return value type**: Double precision floating point +**Return value type**: The type specified by parameter `type_name` -**Applicable column types**: Data types except for timestamp, binary, nchar and bool +**Applicable data types**: -**Applicable table types**: table, STable +- Parameter `expression` can be any data type except for JSON +- The output data type specified by `type_name` can only be one of BIGINT/VARCHAR(N)/TIMESTAMP/NCHAR(N)/BIGINT UNSIGNED **More explanations**: -- It is available from version 2.1.3.0, the number of result rows is the number of total rows in the time range subtracted by one, no output for the first row. -- It can be used together with `GROUP BY tbname` against a STable. - -**Examples**: - -``` -taos> select derivative(current, 10m, 0) from t1; - ts | derivative(current, 10m, 0) | -======================================================== - 2021-08-20 10:11:22.790 | 0.500000000 | - 2021-08-20 11:11:22.791 | 0.166666620 | - 2021-08-20 12:11:22.791 | 0.000000000 | - 2021-08-20 13:11:22.792 | 0.166666620 | - 2021-08-20 14:11:22.792 | -0.666666667 | -Query OK, 5 row(s) in set (0.004883s) -``` - -### SPREAD - -``` -SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The difference between the max and the min of a specific column - -**Return value type**: Double precision floating point - -**Applicable column types**: Data types except for binary, nchar, and bool - -**Applicable table types**: table, STable - -**More explanations**: Can be used on a column of TIMESTAMP type, the result is the time range size. - -**Examples**: - -``` -taos> SELECT SPREAD(voltage) FROM meters; - spread(voltage) | -============================ - 5.000000000 | -Query OK, 1 row(s) in set (0.001792s) - -taos> SELECT SPREAD(voltage) FROM d1001; - spread(voltage) | -============================ - 3.000000000 | -Query OK, 1 row(s) in set (0.000836s) -``` +- Error will be reported for unsupported type casting +- NULL will be returned if the input value is NULL +- Some values of some supported data types may not be casted, below are known issues: + 1)When casting VARCHAR/NCHAR to BIGINT/BIGINT UNSIGNED, some characters may be treated as illegal, for example "a" may be converted to 0. + 2)There may be overflow when casting singed integer or TIMESTAMP to unsigned BIGINT + 3)There may be overflow when casting unsigned BIGINT to BIGINT + 4)There may be overflow when casting FLOAT/DOUBLE to BIGINT or UNSIGNED BIGINT -### CEIL +#### TO_ISO8601 -``` -SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; +```sql +SELECT TO_ISO8601(ts_val | ts_col) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The round up value of a specific column +**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone of the client side system -**Return value type**: Same as the column being used +**Return value type**: VARCHAR -**Applicable data types**: Data types except for timestamp, binary, nchar, bool +**Applicable column types**: TIMESTAMP, constant or a column **Applicable table types**: table, STable -**Applicable nested query**: Inner query and outer query - **More explanations**: -- Can't be used on any tags of any type -- Arithmetic operation can be performed on the result of `ceil` function -- Can't be used with aggregate functions - -### FLOOR - -``` -SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The round down value of a specific column - -**More explanations**: The restrictions are same as `CEIL` function. - -### ROUND - -``` -SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The round value of a specific column. - -**More explanations**: The restrictions are same as `CEIL` function. +- If the input is UNIX timestamp constant, the precision of the returned value is determined by the digits of the input timestamp +- If the input is a column of TIMESTAMP type, The precision of the returned value is same as the precision set for the current data base in use -### CSUM +#### TO_JSON ```sql - SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] +SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The cumulative sum of each row for a specific column. The number of output rows is same as that of the input rows. +**Description**: Convert a JSON string to a JSON body。 -**Return value type**: Long integer for integers; Double for floating points. Timestamp is returned for each row. +**Return value type**: JSON -**Applicable data types**: Data types except for timestamp, binary, nchar, and bool +**Applicable column types**: JSON string, in the format like '{ "literal" : literal }'. '{}' is NULL value. keys in the string must be string constants, values can be constants of numeric types, bool, string or NULL. Escaping characters are not allowed in the JSON string. **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query - -**More explanations**: - -- Can't be used on tags when it's used on STable -- Arithmetic operation can't be performed on the result of `csum` function -- Can only be used with aggregate functions -- `Group by tbname` must be used together on a STable to force the result on a single timeline +**Applicable nested query**: Inner query and Outer query. -**Applicable versions**: From 2.3.0.x - -### MAVG +#### TO_UNIXTIMESTAMP ```sql - SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] +SELECT TO_UNIXTIMESTAMP(datetime_string | ts_col) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The moving average of continuous _k_ values of a specific column. If the number of input rows is less than _k_, nothing is returned. The applicable range is _k_ is [1,1000]. - -**Return value type**: Double precision floating point +**Description**: UNIX timestamp converted from a string of date/time format -**Applicable data types**: Data types except for timestamp, binary, nchar, and bool +**Return value type**: Long integer -**Applicable nested query**: Inner query and Outer query +**Applicable column types**: Constant or column of VARCHAR/NCHAR **Applicable table types**: table, STable **More explanations**: -- Arithmetic operation can't be performed on the result of `MAVG`. -- Can only be used with data columns, can't be used with tags. -- Can't be used with aggregate functions. -- Must be used with `GROUP BY tbname` when it's used on a STable to force the result on each single timeline. +- The input string must be compatible with ISO8601/RFC3339 standard, 0 will be returned if the string can't be converted +- The precision of the returned timestamp is same as the precision set for the current data base in use -**Applicable versions**: From 2.3.0.x +### DateTime Functions -### SAMPLE +This kind of functiosn oeprate on timestamp data. NOW(), TODAY() and TIMEZONE() are executed only once even though they may occurr multiple times in a single SQL statement. + +#### NOW ```sql - SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] +SELECT NOW() FROM { tb_name | stb_name } [WHERE clause]; +SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW(); +INSERT INTO tb_name VALUES (NOW(), ...); ``` -**Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,10000] +**Description**: The current time of the client side system -**Return value type**: Same as the column being operated plus the associated timestamp +**Return value type**: TIMESTAMP -**Applicable data types**: Any data type except for tags of STable +**Applicable column types**: TIMESTAMP only **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query - **More explanations**: -- Arithmetic operation can't be operated on the result of `SAMPLE` function -- Must be used with `Group by tbname` when it's used on a STable to force the result on each single timeline - -**Applicable versions**: From 2.3.0.x +- Add and Subtract operation can be performed, for example NOW() + 1s, the time unit can be: + b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week) +- The precision of the returned timestamp is same as the precision set for the current data base in use -### ASIN +#### TIMEDIFF ```sql -SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause] +SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 | ts_col2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The anti-sine of a specific column +**Description**: The difference between two timestamps, and rounded to the time unit specified by `time_unit` -**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL +**Return value type**: Long Integer -**Applicable data types**: Data types except for timestamp, binary, nchar, bool +**Applicable column types**: UNIX timestamp constant, string constant of date/time format, or a column of TIMESTAMP type **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query - -**Applicable versions**: From 2.6.0.0 - **More explanations**: -- Can't be used with tags -- Can't be used with aggregate functions +- Time unit specified by `time_unit` can be: + 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day). +- The precision of the returned timestamp is same as the precision set for the current data base in use -### ACOS +#### TIMETRUNCATE ```sql -SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause] +SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The anti-cosine of a specific column - -**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL - -**Applicable data types**: Data types except for timestamp, binary, nchar, bool +**Description**: Truncate the input timestamp with unit specified by `time_unit` -**Applicable table types**: table, STable +**Return value type**: TIMESTAMP -**Applicable nested query**: Inner query and Outer query +**Applicable column types**: UNIX timestamp constant, string constant of date/time format, or a column of timestamp -**Applicable versions**: From 2.6.0.0 +**Applicable table types**: table, STable **More explanations**: -- Can't be used with tags -- Can't be used with aggregate functions +- Time unit specified by `time_unit` can be: + 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day). +- The precision of the returned timestamp is same as the precision set for the current data base in use -### ATAN +#### TIMEZONE ```sql -SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause] +SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: anti-tangent of a specific column - -**Description**: The anti-cosine of a specific column +**Description**: The timezone of the client side system -**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL +**Return value type**: VARCHAR -**Applicable data types**: Data types except for timestamp, binary, nchar, bool +**Applicable column types**: None **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query - -**Applicable versions**: From 2.6.0.0 - -**More explanations**: - -- Can't be used with tags -- Can't be used with aggregate functions - -### SIN +#### TODAY ```sql -SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause] +SELECT TODAY() FROM { tb_name | stb_name } [WHERE clause]; +SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY()]; +INSERT INTO tb_name VALUES (TODAY(), ...); ``` -**Description**: The sine of a specific column - -**Description**: The anti-cosine of a specific column +**Description**: The timestamp of 00:00:00 of the client side system -**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL +**Return value type**: TIMESTAMP -**Applicable data types**: Data types except for timestamp, binary, nchar, bool +**Applicable column types**: TIMESTAMP only **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query +**More explanations**: -**Applicable versions**: From 2.6.0.0 +- Add and Subtract operation can be performed, for example NOW() + 1s, the time unit can be: + b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week) +- The precision of the returned timestamp is same as the precision set for the current data base in use -**More explanations**: +## Aggregate Functions -- Can't be used with tags -- Can't be used with aggregate functions +Aggregate functions return single result row for each group in the query result set. Groups are determined by `GROUP BY` clause or time window clause if they are used; or the whole result is considered a group if neither of them is used. -### COS +### AVG -```sql -SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause] +``` +SELECT AVG(field_name) FROM tb_name [WHERE clause]; ``` -**Description**: The cosine of a specific column - -**Description**: The anti-cosine of a specific column +**Description**: Get the average value of a column in a table or STable -**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL +**Return value type**: Double precision floating number -**Applicable data types**: Data types except for timestamp, binary, nchar, bool +**Applicable column types**: Numeric type **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query +### COUNT -**Applicable versions**: From 2.6.0.0 +``` +SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]; +``` -**More explanations**: +**Description**: Get the number of rows or the number of non-null values in a table or a super table. -- Can't be used with tags -- Can't be used with aggregate functions +**Return value type**: Long integer INT64 -### TAN +**Applicable column types**: All -```sql -SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause] -``` +**Applicable table types**: table, super table, sub table -**Description**: The tangent of a specific column +**More explanation**: -**Description**: The anti-cosine of a specific column +- Wildcard (\*) is used to represent all columns. The `COUNT` function is used to get the total number of all rows. +- The number of non-NULL values will be returned if this function is used on a specific column. -**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL +### ELAPSED -**Applicable data types**: Data types except for timestamp, binary, nchar, bool +```mysql +SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; +``` -**Applicable table types**: table, STable +**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. -**Applicable nested query**: Inner query and Outer query +**Return value type**:Double -**Applicable versions**: From 2.6.0.0 +**Applicable Column type**:Timestamp -**More explanations**: +**Applicable tables**: table, STable, outter in nested query -- Can't be used with tags -- Can't be used with aggregate functions +**Explanations**: -### POW +- `field_name` parameter can only be the first column of a table, i.e. timestamp primary key. +- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default ime unit. +- It can be used with `INTERVAL` to get the time valid time length of each time window. Please be noted that the return value is same as the time window for all time windows except for the first and the last time window. +- `order by asc/desc` has no effect on the result. +- `group by tbname` must be used together when `elapsed` is used against a STable. +- `group by` must NOT be used together when `elapsed` is used against a table or sub table. +- When used in nested query, it's only applicable when the inner query outputs an implicit timestamp column as the primary key. For example, `select elapsed(ts) from (select diff(value) from sub1)` is legal usage while `select elapsed(ts) from (select * from sub1)` is not. +- It can't be used with `leastsquares`, `diff`, `derivative`, `top`, `bottom`, `last_row`, `interp`. -```sql -SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause] -``` +### LEASTSQUARES -**Description**: The power of a specific column with `power` as the index +``` +SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]; +``` -**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL +**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value. -**Applicable data types**: Data types except for timestamp, binary, nchar, bool +**Return value type**: A string in the format of "(slope, intercept)" -**Applicable table types**: table, STable +**Applicable column types**: Numeric types -**Applicable nested query**: Inner query and Outer query +**Applicable table types**: table only -**Applicable versions**: From 2.6.0.0 +### MODE -**More explanations**: +``` +SELECT MODE(field_name) FROM tb_name [WHERE clause]; +``` -- Can't be used with tags -- Can't be used with aggregate functions +**Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence. It can't be used on timestamp column. -### LOG +**Return value type**:Same as the data type of the column being operated upon -```sql -SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause] -``` +**Applicable column types**:Data types except for timestamp -**Description**: The log of a specific with `base` as the radix +**More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned. -**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL +### SPREAD -**Applicable data types**: Data types except for timestamp, binary, nchar, bool +``` +SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; +``` -**Applicable table types**: table, STable +**Description**: The difference between the max and the min of a specific column -**Applicable nested query**: Inner query and Outer query +**Return value type**: Double precision floating point -**Applicable versions**: From 2.6.0.0 +**Applicable column types**: Numeric types -**More explanations**: +**Applicable table types**: table, STable -- Can't be used with tags -- Can't be used with aggregate functions +**More explanations**: Can be used on a column of TIMESTAMP type, the result is the time range size. -### ABS +### STDDEV -```sql -SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause] +``` +SELECT STDDEV(field_name) FROM tb_name [WHERE clause]; ``` -**Description**: The absolute of a specific column +**Description**: Standard deviation of a specific column in a table or STable -**Return value type**: UBIGINT if the input value is integer; DOUBLE if the input value is FLOAT/DOUBLE +**Return value type**: Double precision floating number -**Applicable data types**: Data types except for timestamp, binary, nchar, bool +**Applicable column types**: Numeric types **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query +### SUM -**Applicable versions**: From 2.6.0.0 +``` +SELECT SUM(field_name) FROM tb_name [WHERE clause]; +``` -**More explanations**: +**Description**: The sum of a specific column in a table or STable -- Can't be used with tags -- Can't be used with aggregate functions +**Return value type**: Double precision floating number or long integer -### SQRT +**Applicable column types**: Numeric types + +**Applicable table types**: table, STable + +### HYPERLOGLOG -```sql -SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause] +``` +SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The square root of a specific column +**Description**:The cardinal number of a specific column is returned by using hyperloglog algorithm. -**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL +**Return value type**:Integer -**Applicable data types**: Data types except for timestamp, binary, nchar, bool +**Applicable column types**:Any data type -**Applicable table types**: table, STable +**More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case. -**Applicable nested query**: Inner query and Outer query +### HISTOGRAM -**Applicable versions**: From 2.6.0.0 +``` +SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause]; +``` -**More explanations**: +**Description**:Returns count of data points in user-specified ranges. -- Can't be used with tags -- Can't be used with aggregate functions +**Return value type**:Double or INT64, depends on normalized parameter settings. -### CAST +**Applicable column type**:Numerical types. -```sql -SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] -``` +**Applicable table types**: table, STable -**Description**: It's used for type casting. The input parameter `expression` can be data columns, constants, scalar functions or arithmetic between them. Can't be used with tags, and can only be used in `select` clause. +**Explanations**: -**Return value type**: The type specified by parameter `type_name` +1. bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。 +2. bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively: -**Applicable data types**: + - "user_input": "[1, 3, 5, 7]": User specified bin values. -- Parameter `expression` can be any data type except for JSON, more specifically it can be any of BOOL/TINYINT/SMALLINT/INT/BIGINT/FLOAT/DOUBLE/BINARY(M)/TIMESTAMP/NCHAR(M)/TINYINT UNSIGNED/SMALLINT UNSIGNED/INT UNSIGNED/BIGINT UNSIGNED -- The output data type specified by `type_name` can only be one of BIGINT/BINARY(N)/TIMESTAMP/NCHAR(N)/BIGINT UNSIGNED + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" + "start" - bin starting point. + "width" - bin offset. + "count" - number of bins generated. + "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins. + The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]. -**Applicable versions**: From 2.6.0.0 + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" + "start" - bin starting point. + "factor" - exponential factor of bin offset. + "count" - number of bins generated. + "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins. + The above "log_bin" descriptor generates a set of bins:[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]. -**More explanations**: +3. normalized: setting to 1/0 to turn on/off result normalization. -- Error will be reported for unsupported type casting -- NULL will be returned if the input value is NULL -- Some values of some supported data types may not be casted, below are known issues: - 1)When casting BINARY/NCHAR to BIGINT/BIGINT UNSIGNED, some characters may be treated as illegal, for example "a" may be converted to 0. - 2)There may be overflow when casting singed integer or TIMESTAMP to unsigned BIGINT - 3)There may be overflow when casting unsigned BIGINT to BIGINT - 4)There may be overflow when casting FLOAT/DOUBLE to BIGINT or UNSIGNED BIGINT +## Selector Functions -### CONCAT +Selector functiosn choose one or more rows in the query result set to retrun according toe the semantics. You can specify to output ts column and other columns including tbname and tags so that you can easily know which rows the selected values belong to. -```sql -SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] +### APERCENTILE + +``` +SELECT APERCENTILE(field_name, P[, algo_type]) +FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The concatenation result of two or more strings, the number of strings to be concatenated is at least 2 and at most 8 +**Description**: Similar to `PERCENTILE`, but a simulated result is returned -**Return value type**: Same as the columns being operated, BINARY or NCHAR; or NULL if all the input are NULL +**Return value type**: Double precision floating point -**Applicable data types**: The input data must be in either all BINARY or in all NCHAR; can't be used on tag columns +**Applicable column types**: Numeric types **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query +**More explanations** + +- _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX. +- **algo_type** can only be input as `default` or `t-digest`, if it's not specified `default` will be used, i.e. `apercentile(column_name, 50)` is same as `apercentile(column_name, 50, "default")`. +- When `t-digest` is used, `t-digest` sampling is used to calculate. -**Applicable versions**: From 2.6.0.0 +**Nested query**: It can be used in both the outer query and inner query in a nested query. -### CONCAT_WS +### BOTTOM ``` -SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause] +SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The concatenation result of two or more strings with separator, the number of strings to be concatenated is at least 3 and at most 9 +**Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. -**Return value type**: Same as the columns being operated, BINARY or NCHAR; or NULL if all the input are NULL +**Return value type**: Same as the column being operated upon -**Applicable data types**: The input data must be in either all BINARY or in all NCHAR; can't be used on tag columns +**Applicable column types**: Numeric types **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query - -**Applicable versions**: From 2.6.0.0 - **More explanations**: -- If the value of `separator` is NULL, the output is NULL. If the value of `separator` is not NULL but other input are all NULL, the output is empty string. +- _k_ must be in range [1,100] +- The timestamp associated with the selected values are returned too +- Can't be used with `FILL` -### LENGTH +### FIRST ``` -SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] +SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The length in bytes of a string +**Description**: The first non-null value of a specific column in a table or STable -**Return value type**: Integer +**Return value type**: Same as the column being operated upon -**Applicable data types**: BINARY or NCHAR, can't be used on tags +**Applicable column types**: Any data type **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query - -**Applicable versions**: From 2.6.0.0 - -**More explanations** +**More explanations**: -- If the input value is NULL, the output is NULL too +- FIRST(\*) can be used to get the first non-null value of all columns +- NULL will be returned if all the values of the specified column are all NULL +- A result will NOT be returned if all the columns in the result set are all NULL -### CHAR_LENGTH +### INTERP ``` -SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause] +SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; ``` -**Description**: The length in number of characters of a string - -**Return value type**: Integer - -**Applicable data types**: BINARY or NCHAR, can't be used on tags +**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. -**Applicable table types**: table, STable +**Return value type**: Same as the column being operated upon -**Applicable nested query**: Inner query and Outer query +**Applicable column types**: Numeric data types -**Applicable versions**: From 2.6.0.0 +**Applicable table types**: table, STable, nested query **More explanations** -- If the input value is NULL, the output is NULL too +- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter. +- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input. +- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. If `RANGE` is not specified, then the timestamp of the first row that matches the filter condition is treated as timestamp1, the timestamp of the last row that matches the filter condition is treated as timestamp2. +- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. If `EVERY` parameter is not used, the time windows will be considered as no ending timestamp, i.e. there is only one time window from timestamp1. +- Interpolation is performed based on `FILL` parameter. No interpolation is performed if `FILL` is not used, that means either the original data that matches is returned or nothing is returned. +- `INTERP` can only be used to interpolate in single timeline. So it must be used with `group by tbname` when it's used on a STable. It can't be used with `GROUP BY` when it's used in the inner query of a nested query. +- The result of `INTERP` is not influenced by `ORDER BY TIMESTAMP`, which impacts the output order only.. -### LOWER +### LAST ``` -SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause] +SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: Convert the input string to lower case +**Description**: The last non-NULL value of a specific column in a table or STable -**Return value type**: Same as input +**Return value type**: Same as the column being operated upon -**Applicable data types**: BINARY or NCHAR, can't be used on tags +**Applicable column types**: Any data type **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query - -**Applicable versions**: From 2.6.0.0 - -**More explanations** +**More explanations**: -- If the input value is NULL, the output is NULL too +- LAST(\*) can be used to get the last non-NULL value of all columns +- If the values of a column in the result set are all NULL, NULL is returned for that column; if all columns in the result are all NULL, no result will be returned. +- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times. -### UPPER +### LAST_ROW ``` -SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause] +SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; ``` -**Description**: Convert the input string to upper case +**Description**: The last row of a table or STable -**Return value type**: Same as input +**Return value type**: Same as the column being operated upon -**Applicable data types**: BINARY or NCHAR, can't be used on tags +**Applicable column types**: Any data type **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query - -**Applicable versions**: From 2.6.0.0 - -**More explanations** +**More explanations**: -- If the input value is NULL, the output is NULL too +- When it's used against a STable, multiple rows with the same and largest timestamp may exist, in this case one of them is returned randomly and it's not guaranteed that the result is same if the query is run multiple times. +- Can't be used with `INTERVAL`. -### LTRIM +### MAX ``` -SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] +SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: Remove the left leading blanks of a string +**Description**: The maximum value of a specific column of a table or STable -**Return value type**: Same as input +**Return value type**: Same as the data type of the column being operated upon -**Applicable data types**: BINARY or NCHAR, can't be used on tags +**Applicable column types**: Numeric types **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query - -**Applicable versions**: From 2.6.0.0 - -**More explanations** - -- If the input value is NULL, the output is NULL too - -### RTRIM +### MIN ``` -SELECT RTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause] +SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; ``` -**Description**: Remove the right tailing blanks of a string +**Description**: The minimum value of a specific column in a table or STable -**Return value type**: Same as input +**Return value type**: Same as the data type of the column being operated upon -**Applicable data types**: BINARY or NCHAR, can't be used on tags +**Applicable column types**: Numeric types **Applicable table types**: table, STable -**Applicable nested query**: Inner query and Outer query - -**Applicable versions**: From 2.6.0.0 +### PERCENTILE -**More explanations** +``` +SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause]; +``` -- If the input value is NULL, the output is NULL too +**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned. -### SUBSTR +**Return value type**: Double precision floating point -``` -SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause] -``` +**Applicable column types**: Numeric types -**Description**: The sub-string starting from `pos` with length of `len` from the original string `str` +**Applicable table types**: table -**Return value type**: Same as input +**More explanations**: _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX. -**Applicable data types**: BINARY or NCHAR, can't be used on tags +### TAIL -**Applicable table types**: table, STable +``` +SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; +``` -**Applicable nested query**: Inner query and Outer query +**Description**: The next _k_ rows are returned after skipping the last `offset_val` rows, NULL values are not ignored. `offset_val` is optional parameter. When it's not specified, the last _k_ rows are returned. When `offset_val` is used, the effect is same as `order by ts desc LIMIT k OFFSET offset_val`. -**Applicable versions**: From 2.6.0.0 +**Parameter value range**: k: [1,100] offset_val: [0,100] -**More explanations**: +**Return value type**: Same as the column being operated upon -- If the input is NULL, the output is NULL -- Parameter `pos` can be an positive or negative integer; If it's positive, the starting position will be counted from the beginning of the string; if it's negative, the starting position will be counted from the end of the string. -- If `len` is not specified, it means from `pos` to the end. +**Applicable column types**: Any data type except form timestamp, i.e. the primary key -### Arithmetic Operations +### TOP ``` -SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause]; +SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The sum, difference, product, quotient, or remainder between one or more columns +**Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. -**Return value type**: Double precision floating point +**Return value type**: Same as the column being operated upon -**Applicable column types**: Data types except for timestamp, binary, nchar, bool +**Applicable column types**: Numeric types **Applicable table types**: table, STable **More explanations**: -- Arithmetic operations can be performed on two or more columns, `()` can be used to control the precedence -- NULL doesn't participate the operation, if one of the operands is NULL then result is NULL +- _k_ must be in range [1,100] +- The timestamp associated with the selected values are returned too +- Can't be used with `FILL` -**Examples**: +### UNIQUE ``` -taos> SELECT current + voltage * phase FROM d1001; -(current+(voltage*phase)) | -============================ - 78.190000713 | - 84.540003240 | - 80.810000718 | -Query OK, 3 row(s) in set (0.001046s) +SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause]; ``` -### STATECOUNT +**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. -``` -SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause]; -``` +**Return value type**: Same as the column or tag being operated upon -**Description**: The number of continuous rows satisfying the specified conditions for a specific column. The result is shown as an extra column for each row. If the specified condition is evaluated as true, the number is increased by 1; otherwise the number is reset to -1. If the input value is NULL, then the corresponding row is skipped. +**Applicable column types**: Any data types except for timestamp -**Applicable parameter values**: +**More explanations**: -- oper : Can be one of LT (lower than), GT (greater than), LE (lower than or euqal to), GE (greater than or equal to), NE (not equal to), EQ (equal to), the value is case insensitive -- val : Numeric types +- It can be used against table or STable, but can't be used together with time window, like `interval`, `state_window` or `session_window` . +- Considering the number of result sets is unpredictable, it's suggested to limit the distinct values under 100,000 to control the memory usage, otherwise error will be returned. -**Return value type**: Integer +## Time-Series Specific Functions -**Applicable data types**: Data types excpet for timestamp, binary, nchar, bool +TDengine provides a set of time-series specific functions to better meet the requirements in querying time-series data. In general databases, similar functionalities can only be achieved with much more complex syntax and much worse performance. TDengine provides these functionalities in builtin functions so that the burden on user side is minimized. -**Applicable table types**: table, STable +### CSUM -**Applicable nested query**: Outer query only +```sql + SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] +``` -**Applicable versions**: From 2.6.0.0 +**Description**: The cumulative sum of each row for a specific column. The number of output rows is same as that of the input rows. -**More explanations**: +**Return value type**: Long integer for integers; Double for floating points. Timestamp is returned for each row. -- Must be used together with `GROUP BY tbname` when it's used on a STable to force the result into each single timeline] -- Can't be used with window operation, like interval/state_window/session_window +**Applicable data types**: Numeric types -**Examples**: +**Applicable table types**: table, STable -``` -taos> select ts,dbig from statef2; - ts | dbig | -======================================================== -2021-10-15 00:31:33.000000000 | 1 | -2021-10-17 00:31:31.000000000 | NULL | -2021-12-24 00:31:34.000000000 | 2 | -2022-01-01 08:00:05.000000000 | 19 | -2022-01-01 08:00:06.000000000 | NULL | -2022-01-01 08:00:07.000000000 | 9 | -Query OK, 6 row(s) in set (0.002977s) +**Applicable nested query**: Inner query and Outer query -taos> select stateCount(dbig,GT,2) from statef2; -ts | dbig | statecount(dbig,gt,2) | -================================================================================ -2021-10-15 00:31:33.000000000 | 1 | -1 | -2021-10-17 00:31:31.000000000 | NULL | NULL | -2021-12-24 00:31:34.000000000 | 2 | -1 | -2022-01-01 08:00:05.000000000 | 19 | 1 | -2022-01-01 08:00:06.000000000 | NULL | NULL | -2022-01-01 08:00:07.000000000 | 9 | 2 | -Query OK, 6 row(s) in set (0.002791s) -``` +**More explanations**: +- Arithmetic operation can't be performed on the result of `csum` function +- Can only be used with aggregate functions +- `Group by tbname` must be used together on a STable to force the result on a single timeline -### STATEDURATION +### DERIVATIVE ``` -SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [WHERE clause]; +SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause]; ``` -**Description**: The length of time range in which all rows satisfy the specified condition for a specific column. The result is shown as an extra column for each row. The length for the first row that satisfies the condition is 0. Next, if the condition is evaluated as true for a row, the time interval between current row and its previous row is added up to the time range; otherwise the time range length is reset to -1. If the value of the column is NULL, the corresponding row is skipped. - -**Applicable parameter values**: - -- oper : Can be one of LT (lower than), GT (greater than), LE (lower than or euqal to), GE (greater than or equal to), NE (not equal to), EQ (equal to), the value is case insensitive -- val : Numeric types -- unit: The unit of time interval, can be [1s, 1m, 1h], default is 1s +**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored. -**Return value type**: Integer +**Return value type**: Double precision floating point -**Applicable data types**: Data types excpet for timestamp, binary, nchar, bool +**Applicable column types**: Numeric types **Applicable table types**: table, STable -**Applicable nested query**: Outer query only - -**Applicable versions**: From 2.6.0.0 - **More explanations**: -- Must be used together with `GROUP BY tbname` when it's used on a STable to force the result into each single timeline] -- Can't be used with window operation, like interval/state_window/session_window +- The number of result rows is the number of total rows in the time range subtracted by one, no output for the first row. +- It can be used together with `GROUP BY tbname` against a STable. -**Examples**: +### DIFF +```sql +SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause]; ``` -taos> select ts,dbig from statef2; - ts | dbig | -======================================================== -2021-10-15 00:31:33.000000000 | 1 | -2021-10-17 00:31:31.000000000 | NULL | -2021-12-24 00:31:34.000000000 | 2 | -2022-01-01 08:00:05.000000000 | 19 | -2022-01-01 08:00:06.000000000 | NULL | -2022-01-01 08:00:07.000000000 | 9 | -Query OK, 6 row(s) in set (0.002407s) -taos> select stateDuration(dbig,GT,2) from statef2; -ts | dbig | stateduration(dbig,gt,2) | -=================================================================================== -2021-10-15 00:31:33.000000000 | 1 | -1 | -2021-10-17 00:31:31.000000000 | NULL | NULL | -2021-12-24 00:31:34.000000000 | 2 | -1 | -2022-01-01 08:00:05.000000000 | 19 | 0 | -2022-01-01 08:00:06.000000000 | NULL | NULL | -2022-01-01 08:00:07.000000000 | 9 | 2 | -Query OK, 6 row(s) in set (0.002613s) -``` +**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored. -## Time Functions +**Return value type**: Same as the column being operated upon -From version 2.6.0.0, below time related functions can be used in TDengine. +**Applicable column types**: Numeric types -### NOW +**Applicable table types**: table, STable -```sql -SELECT NOW() FROM { tb_name | stb_name } [WHERE clause]; -SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW(); -INSERT INTO tb_name VALUES (NOW(), ...); +**More explanations**: + +- The number of result rows is the number of rows subtracted by one, no output for the first row +- It can be used on STable with `GROUP by tbname` + +### IRATE + +``` +SELECT IRATE(field_name) FROM tb_name WHERE clause; ``` -**Description**: The current time of the client side system +**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values. -**Return value type**: TIMESTAMP +**Return value type**: Double precision floating number -**Applicable column types**: TIMESTAMP only +**Applicable column types**: Numeric types **Applicable table types**: table, STable **More explanations**: -- Add and Subtract operation can be performed, for example NOW() + 1s, the time unit can be: - b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week) -- The precision of the returned timestamp is same as the precision set for the current data base in use +- It can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. -**Examples**: +### MAVG ```sql -taos> SELECT NOW() FROM meters; - now() | -========================== - 2022-02-02 02:02:02.456 | -Query OK, 1 row(s) in set (0.002093s) - -taos> SELECT NOW() + 1h FROM meters; - now() + 1h | -========================== - 2022-02-02 03:02:02.456 | -Query OK, 1 row(s) in set (0.002093s) - -taos> SELECT COUNT(voltage) FROM d1001 WHERE ts < NOW(); - count(voltage) | -============================= - 5 | -Query OK, 5 row(s) in set (0.004475s) - -taos> INSERT INTO d1001 VALUES (NOW(), 10.2, 219, 0.32); -Query OK, 1 of 1 row(s) in database (0.002210s) + SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] ``` -### TODAY - -```sql -SELECT TODAY() FROM { tb_name | stb_name } [WHERE clause]; -SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY()]; -INSERT INTO tb_name VALUES (TODAY(), ...); -``` +**Description**: The moving average of continuous _k_ values of a specific column. If the number of input rows is less than _k_, nothing is returned. The applicable range of _k_ is [1,1000]. -**Description**: The timestamp of 00:00:00 of the client side system +**Return value type**: Double precision floating point -**Return value type**: TIMESTAMP +**Applicable data types**: Numeric types -**Applicable column types**: TIMESTAMP only +**Applicable nested query**: Inner query and Outer query **Applicable table types**: table, STable **More explanations**: -- Add and Subtract operation can be performed, for example NOW() + 1s, the time unit can be: - b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), w(week) -- The precision of the returned timestamp is same as the precision set for the current data base in use +- Arithmetic operation can't be performed on the result of `MAVG`. +- Can't be used with aggregate functions. +- Must be used with `GROUP BY tbname` when it's used on a STable to force the result on each single timeline. -**Examples**: +### SAMPLE ```sql -taos> SELECT TODAY() FROM meters; - today() | -========================== - 2022-02-02 00:00:00.000 | -Query OK, 1 row(s) in set (0.002093s) - -taos> SELECT TODAY() + 1h FROM meters; - today() + 1h | -========================== - 2022-02-02 01:00:00.000 | -Query OK, 1 row(s) in set (0.002093s) - -taos> SELECT COUNT(voltage) FROM d1001 WHERE ts < TODAY(); - count(voltage) | -============================= - 5 | -Query OK, 5 row(s) in set (0.004475s) - -taos> INSERT INTO d1001 VALUES (TODAY(), 10.2, 219, 0.32); -Query OK, 1 of 1 row(s) in database (0.002210s) + SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] ``` -### TIMEZONE +**Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,10000] -```sql -SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause]; -``` +**Return value type**: Same as the column being operated plus the associated timestamp -**Description**: The timezone of the client side system +**Applicable data types**: Any data type except for tags of STable -**Return value type**: BINARY +**Applicable table types**: table, STable -**Applicable column types**: None +**Applicable nested query**: Inner query and Outer query -**Applicable table types**: table, STable +**More explanations**: + +- Arithmetic operation can't be operated on the result of `SAMPLE` function +- Must be used with `Group by tbname` when it's used on a STable to force the result on each single timeline -**Examples**: +### STATECOUNT -```sql -taos> SELECT TIMEZONE() FROM meters; - timezone() | -================================= - UTC (UTC, +0000) | -Query OK, 1 row(s) in set (0.002093s) +``` +SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause]; ``` -### TO_ISO8601 +**Description**: The number of continuous rows satisfying the specified conditions for a specific column. The result is shown as an extra column for each row. If the specified condition is evaluated as true, the number is increased by 1; otherwise the number is reset to -1. If the input value is NULL, then the corresponding row is skipped. -```sql -SELECT TO_ISO8601(ts_val | ts_col) FROM { tb_name | stb_name } [WHERE clause]; -``` +**Applicable parameter values**: -**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone of the client side system +- oper : Can be one of LT (lower than), GT (greater than), LE (lower than or euqal to), GE (greater than or equal to), NE (not equal to), EQ (equal to), the value is case insensitive +- val : Numeric types -**Return value type**: BINARY +**Return value type**: Integer -**Applicable column types**: TIMESTAMP, constant or a column +**Applicable data types**: Numeric types **Applicable table types**: table, STable -**More explanations**: +**Applicable nested query**: Outer query only -- If the input is UNIX timestamp constant, the precision of the returned value is determined by the digits of the input timestamp -- If the input is a column of TIMESTAMP type, The precision of the returned value is same as the precision set for the current data base in use +**More explanations**: -**Examples**: +- Must be used together with `GROUP BY tbname` when it's used on a STable to force the result into each single timeline] +- Can't be used with window operation, like interval/state_window/session_window -```sql -taos> SELECT TO_ISO8601(1643738400) FROM meters; - to_iso8601(1643738400) | -============================== - 2022-02-02T02:00:00+0800 | +### STATEDURATION -taos> SELECT TO_ISO8601(ts) FROM meters; - to_iso8601(ts) | -============================== - 2022-02-02T02:00:00+0800 | - 2022-02-02T02:00:00+0800 | - 2022-02-02T02:00:00+0800 | +``` +SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [WHERE clause]; ``` -### TO_UNIXTIMESTAMP +**Description**: The length of time range in which all rows satisfy the specified condition for a specific column. The result is shown as an extra column for each row. The length for the first row that satisfies the condition is 0. Next, if the condition is evaluated as true for a row, the time interval between current row and its previous row is added up to the time range; otherwise the time range length is reset to -1. If the value of the column is NULL, the corresponding row is skipped. -```sql -SELECT TO_UNIXTIMESTAMP(datetime_string | ts_col) FROM { tb_name | stb_name } [WHERE clause]; -``` +**Applicable parameter values**: -**Description**: UNIX timestamp converted from a string of date/time format +- oper : Can be one of LT (lower than), GT (greater than), LE (lower than or euqal to), GE (greater than or equal to), NE (not equal to), EQ (equal to), the value is case insensitive +- val : Numeric types +- unit: The unit of time interval, can be [1s, 1m, 1h], default is 1s -**Return value type**: Long integer +**Return value type**: Integer -**Applicable column types**: Constant or column of BINARY/NCHAR +**Applicable data types**: Numeric types **Applicable table types**: table, STable -**More explanations**: +**Applicable nested query**: Outer query only -- The input string must be compatible with ISO8601/RFC3339 standard, 0 will be returned if the string can't be converted -- The precision of the returned timestamp is same as the precision set for the current data base in use +**More explanations**: -**Examples**: +- Must be used together with `GROUP BY tbname` when it's used on a STable to force the result into each single timeline] +- Can't be used with window operation, like interval/state_window/session_window -```sql -taos> SELECT TO_UNIXTIMESTAMP("2022-02-02T02:00:00.000Z") FROM meters; -to_unixtimestamp("2022-02-02T02:00:00.000Z") | -============================================== - 1643767200000 | +### TWA -taos> SELECT TO_UNIXTIMESTAMP(col_binary) FROM meters; - to_unixtimestamp(col_binary) | -======================================== - 1643767200000 | - 1643767200000 | - 1643767200000 | ``` - -### TIMETRUNCATE - -```sql -SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name | stb_name } [WHERE clause]; +SELECT TWA(field_name) FROM tb_name WHERE clause; ``` -**Description**: Truncate the input timestamp with unit specified by `time_unit`\ +**Description**: Time weighted average on a specific column within a time range -**Return value type**: TIMESTAMP\ +**Return value type**: Double precision floating number -**Applicable column types**: UNIX timestamp constant, string constant of date/time format, or a column of timestamp +**Applicable column types**: Numeric types **Applicable table types**: table, STable **More explanations**: -- Time unit specified by `time_unit` can be: - 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day). -- The precision of the returned timestamp is same as the precision set for the current data base in use - -**Examples**: +- It can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. -```sql -taos> SELECT TIMETRUNCATE(1643738522000, 1h) FROM meters; - timetruncate(1643738522000, 1h) | -=================================== - 2022-02-02 02:00:00.000 | -Query OK, 1 row(s) in set (0.001499s) +## System Information Functions -taos> SELECT TIMETRUNCATE("2022-02-02 02:02:02", 1h) FROM meters; - timetruncate("2022-02-02 02:02:02", 1h) | -=========================================== - 2022-02-02 02:00:00.000 | -Query OK, 1 row(s) in set (0.003903s) +### DATABASE -taos> SELECT TIMETRUNCATE(ts, 1h) FROM meters; - timetruncate(ts, 1h) | -========================== - 2022-02-02 02:00:00.000 | - 2022-02-02 02:00:00.000 | - 2022-02-02 02:00:00.000 | -Query OK, 3 row(s) in set (0.003903s) ``` - -### TIMEDIFF - -```sql -SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 | ts_col2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause]; +SELECT DATABASE(); ``` -**Description**: The difference between two timestamps, and rounded to the time unit specified by `time_unit` +**Description**:Return the current database being used. If the user doesn't specify database when logon and doesn't use `USE` SQL command to switch the datbase, this function returns NULL. -**Return value type**: Long Integer +### CLIENT_VERSION -**Applicable column types**: UNIX timestamp constant, string constant of date/time format, or a column of TIMESTAMP type +``` +SELECT CLIENT_VERSION(); +``` -**Applicable table types**: table, STable +**Description**:Return the client version. -**More explanations**: +### SERVER_VERSION -- Time unit specified by `time_unit` can be: - 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day). -- The precision of the returned timestamp is same as the precision set for the current data base in use +``` +SELECT SERVER_VERSION(); +``` -**Examples**: +**Description**:Returns the server version. + +### SERVER_STATUS -```sql -taos> SELECT TIMEDIFF(1643738400000, 1643742000000) FROM meters; - timediff(1643738400000, 1643742000000) | -========================================= - 3600000 | -Query OK, 1 row(s) in set (0.002553s) -taos> SELECT TIMEDIFF(1643738400000, 1643742000000, 1h) FROM meters; - timediff(1643738400000, 1643742000000, 1h) | -============================================= - 1 | -Query OK, 1 row(s) in set (0.003726s) - -taos> SELECT TIMEDIFF("2022-02-02 03:00:00", "2022-02-02 02:00:00", 1h) FROM meters; - timediff("2022-02-02 03:00:00", "2022-02-02 02:00:00", 1h) | -============================================================= - 1 | -Query OK, 1 row(s) in set (0.001937s) - -taos> SELECT TIMEDIFF(ts_col1, ts_col2, 1h) FROM meters; - timediff(ts_col1, ts_col2, 1h) | -=================================== - 1 | -Query OK, 1 row(s) in set (0.001937s) ``` +SELECT SERVER_VERSION(); +``` + +**Description**:Returns the server's status. diff --git a/docs-en/12-taos-sql/08-interval.md b/docs-en/12-taos-sql/08-interval.md index bf0904458ce5601fa0b9f611f3fcba6106dc5084..acfb0de0e1521fd8c6a068497a3df7a17941524c 100644 --- a/docs-en/12-taos-sql/08-interval.md +++ b/docs-en/12-taos-sql/08-interval.md @@ -3,36 +3,36 @@ sidebar_label: Interval title: Aggregate by Time Window --- -Aggregate by time window is supported in TDengine. For example, each temperature sensor reports the temperature every second, the average temperature every 10 minutes can be retrieved by query with time window. -Window related clauses are used to divide the data set to be queried into subsets and then aggregate. There are three kinds of windows, time window, status window, and session window. There are two kinds of time windows, sliding window and flip time window. +Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. +Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. ## Time Window -`INTERVAL` clause is used to generate time windows of the same time interval, `SLIDING` is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining continuous query both the size of time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time window. +The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window. -![Time Window](./timewindow-1.webp) +![TDengine Database Time Window](./timewindow-1.webp) -`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. Below SQL statement is illegal because no aggregate or selection function is used with `INTERVAL`. +`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`. ``` SELECT * FROM temp_tb_1 INTERVAL(1m); ``` -The time step specified by `SLIDING` can't exceed the time interval specified by `INTERVAL`. Below SQL statement is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. +The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. ``` SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); ``` -When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. From version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. +When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip/tumbling window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. Since version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. ## Status Window -In case of using integer, bool, or string to represent the device status at a moment, the continuous rows with same status belong to same status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. +In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. -![Status Window](./timewindow-3.webp) +![TDengine Database Status Window](./timewindow-3.webp) -`STATE_WINDOW` is used to specify the column based on which to define status window, for example: +`STATE_WINDOW` is used to specify the column on which the status window will be based. For example: ``` SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); @@ -44,9 +44,9 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); ``` -The primary key, i.e. timestamp, is used to determine which session window the row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different time windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. +The primary key, i.e. timestamp, is used to determine which session window a row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different session windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. -![Session Window](./timewindow-2.webp) +![TDengine Database Session Window](./timewindow-2.webp) If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now. @@ -73,7 +73,7 @@ SELECT function_list FROM stb_name ### Restrictions -- Aggregate functions and select functions can be used in `function_list`, with each function having only one output, for example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple output can't be used, for example DIFF or arithmetic operations. +- Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used. - `LAST_ROW` can't be used together with window aggregate. - Scalar functions, like CEIL/FLOOR, can't be used with window aggregate. - `WHERE` clause can be used to specify the starting and ending time and other filter conditions @@ -87,8 +87,8 @@ SELECT function_list FROM stb_name :::info -1. Huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum interpolation values that can be returned in single query is 10,000,000. -2. The result set is in ascending order of timestamp in aggregate by time window aggregate. +1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000. +2. The result set is in ascending order of timestamp when you aggregate by time window. 3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `GROUP BY` is not used in the query, the result set will be returned in ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group. ::: @@ -97,13 +97,13 @@ Aggregate by time window is also used in continuous query, please refer to [Cont ## Examples -The table of intelligent meters can be created by the SQL statement below: +A table of intelligent meters can be created by the SQL statement below: ```sql CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); ``` -The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the below SQL statement, with missing values filled with the previous non-NULL values. +The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values. ``` SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters diff --git a/docs-en/12-taos-sql/09-limit.md b/docs-en/12-taos-sql/09-limit.md index b987cbcb7886dd35d4fbfefb945d8f36f8d4f399..db55cdd69e7bd29ca66ee15b61f28991568d9556 100644 --- a/docs-en/12-taos-sql/09-limit.md +++ b/docs-en/12-taos-sql/09-limit.md @@ -4,8 +4,8 @@ title: Limits & Restrictions ## Naming Rules -1. Only English characters, digits and underscore are allowed -2. Can't start with a digit +1. Only characters from the English alphabet, digits and underscore are allowed +2. Names cannot start with a digit 3. Case insensitive without escape character "\`" 4. Identifier with escape character "\`" To support more flexible table or column names, a new escape character "\`" is introduced. For more details please refer to [escape](/taos-sql/escape). @@ -16,38 +16,38 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`. ## General Limits -- Maximum length of database name is 32 bytes -- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator -- Maximum length of each data row is 48K bytes from version 2.1.7.0 , before which the limit is 16K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. -- Maximum of column name is 64. +- Maximum length of database name is 32 bytes. +- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator. +- Maximum length of each data row is 48K bytes since version 2.1.7.0 , before which the limit was 16K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. +- Maximum length of column name is 64. - Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp. - Maximum length of tag name is 64. - Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values should not exceed 16K bytes. -- Maximum length of singe SQL statement is 1048576, i.e. 1 MB bytes. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576]. -- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`, functions in the query statement may constitute columns. Error will be returned if the limit is exceeded. -- Maximum numbers of databases, STables, tables are only depending on the system resources. +- Maximum length of singe SQL statement is 1048576, i.e. 1 MB. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576]. +- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded. +- Maximum numbers of databases, STables, tables are dependent only on the system resources. - Maximum of database name is 32 bytes, and it can't include "." or special characters. -- Maximum replica number of database is 3 -- Maximum length of user name is 23 bytes -- Maximum length of password is 15 bytes -- Maximum number of rows depends on the storage space only. -- Maximum number of tables depends on the number of nodes only. -- Maximum number of databases depends on the number of nodes only. -- Maximum number of vnodes for single database is 64. +- Maximum number of replicas for a database is 3. +- Maximum length of user name is 23 bytes. +- Maximum length of password is 15 bytes. +- Maximum number of rows depends only on the storage space. +- Maximum number of tables depends only on the number of nodes. +- Maximum number of databases depends only on the number of nodes. +- Maximum number of vnodes for a single database is 64. ## Restrictions of `GROUP BY` -`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with one restriction that only one column and the number of unique values on that column is lower than 100,000. Please note that `GROUP BY` can't be performed on float or double types. +`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with the only restriction being it can only be performed on one data column and the number of unique values in that column is lower than 100,000. Please note that `GROUP BY` cannot be performed on float or double types. ## Restrictions of `IS NOT NULL` -`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `<\>""` can only be used on non-numeric data types. +`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `< > ""` can only be used on non-numeric data types. ## Restrictions of `ORDER BY` - Only one `order by` is allowed for normal table and subtable. - At most two `order by` are allowed for STable, and the second one must be `ts`. -- `order by tag` must be used with `group by tag` on same tag, this rule is also applicable to `tbname`. +- `order by tag` must be used with `group by tag` on same tag. This rule is also applicable to `tbname`. - `order by column` must be used with `group by column` or `top/bottom` on same column. This rule is applicable to table and STable. - `order by ts` is applicable to table and STable. - If `order by ts` is used with `group by`, the result set is sorted using `ts` in each group. @@ -56,7 +56,7 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`. ### Name Restrictions of Table/Column -The name of a table or column can only be composed of ASCII characters, digits and underscore, while it can't start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator. +The name of a table or column can only be composed of ASCII characters, digits and underscore and it cannot start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator. ### Name Restrictions After Escaping diff --git a/docs-en/12-taos-sql/10-json.md b/docs-en/12-taos-sql/10-json.md index abe6649330618eb3df45f5bed03335a65f93a434..7460a5e0ba3ce78ee7744569cda460c477cac19c 100644 --- a/docs-en/12-taos-sql/10-json.md +++ b/docs-en/12-taos-sql/10-json.md @@ -4,7 +4,7 @@ title: JSON Type ## Syntax -1. Tag of JSON type +1. Tag of type JSON ```sql create STable s1 (ts timestamp, v1 int) tags (info json); @@ -12,7 +12,7 @@ title: JSON Type create table s1_1 using s1 tags ('{"k1": "v1"}'); ``` -2. -> Operator of JSON +2. "->" Operator of JSON ```sql select * from s1 where info->'k1' = 'v1'; @@ -20,7 +20,7 @@ title: JSON Type select info->'k1' from s1; ``` -3. contains Operator of JSON +3. "contains" Operator of JSON ```sql select * from s1 where info contains 'k2'; @@ -30,7 +30,7 @@ title: JSON Type ## Applicable Operations -1. When JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used. +1. When a JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used. ```sql select * from s1 where info->'k1' match 'v*'; @@ -42,9 +42,9 @@ title: JSON Type select * from s1 where info->'k1' is not null; ``` -2. Tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query, for example `group by json->'key'` +2. A tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query; for example `group by json->'key'` -3. `Distinct` can be used with tag of JSON type +3. `Distinct` can be used with a tag of type JSON ```sql select distinct info->'k1' from s1; @@ -52,9 +52,9 @@ title: JSON Type 4. Tag Operations - The value of JSON tag can be altered. Please note that the full JSON will be overriden when doing this. + The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this. - The name of JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. + The name of a JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. ## Other Restrictions @@ -64,17 +64,17 @@ title: JSON Type - JSON format: - - The input string for JSON can be empty, i.e. "", "\t", or NULL, but can't be non-NULL string, bool or array. - - object can be {}, and the whole JSON is empty if so. Key can be "", and it's ignored if so. - - value can be int, double, string, boll or NULL, can't be array. Nesting is not allowed, that means value can't be another JSON. + - The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array. + - object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so. + - value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON. - If one key occurs twice in JSON, only the first one is valid. - Escape characters are not allowed in JSON. -- NULL is returned if querying a key that doesn't exist in JSON. +- NULL is returned when querying a key that doesn't exist in JSON. - If a tag of JSON is the result of inner query, it can't be parsed and queried in the outer query. -For example, the below SQL statements are not supported. +For example, the SQL statements below are not supported. ```sql; select jtag->'key' from (select jtag from STable); diff --git a/docs-en/12-taos-sql/12-keywords.md b/docs-en/12-taos-sql/12-keywords.md index fa750300b71251e1172dba13f91d05822f9ac1f4..ed0c96b4e4d94dd70da1c3778f4129bd34daed62 100644 --- a/docs-en/12-taos-sql/12-keywords.md +++ b/docs-en/12-taos-sql/12-keywords.md @@ -46,3 +46,45 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam | CONNECTIONS | HAVING | NOT | SOFFSET | VNODES | | CONNS | ID | NOTNULL | STable | WAL | | COPY | IF | NOW | STableS | WHERE | +| _C0 | _QSTART | _QSTOP | _QDURATION | _WSTART | +| _WSTOP | _WDURATION | _ROWTS | + +## Explanations +### TBNAME +`TBNAME` can be considered as a special tag, which represents the name of the subtable, in a STable. + +Get the table name and tag values of all subtables in a STable. +```mysql +SELECT TBNAME, location FROM meters; +``` + +Count the number of subtables in a STable. +```mysql +SELECT COUNT(TBNAME) FROM meters; +``` + +Only filter on TAGS can be used in WHERE clause in the above two query statements. +```mysql +taos> SELECT TBNAME, location FROM meters; + tbname | location | +================================================================== + d1004 | California.SanFrancisco | + d1003 | California.SanFrancisco | + d1002 | California.LosAngeles | + d1001 | California.LosAngeles | +Query OK, 4 row(s) in set (0.000881s) + +taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; + count(tbname) | +======================== + 2 | +Query OK, 1 row(s) in set (0.001091s) +``` +### _QSTART/_QSTOP/_QDURATION +The start, stop and duration of a query time window. + +### _WSTART/_WSTOP/_WDURATION +The start, stop and duration of aggegate query by time window, like interval, session window, state window. + +### _c0/_ROWTS +_c0 is equal to _ROWTS, it means the first column of a table or STable. diff --git a/docs-en/12-taos-sql/13-operators.md b/docs-en/12-taos-sql/13-operators.md new file mode 100644 index 0000000000000000000000000000000000000000..0ca9ec49430a66384400bc41cd08562b3d5d28c7 --- /dev/null +++ b/docs-en/12-taos-sql/13-operators.md @@ -0,0 +1,66 @@ +--- +sidebar_label: Operators +title: Operators +--- + +## Arithmetic Operators + +| # | **Operator** | **Data Types** | **Description** | +| --- | :----------: | -------------- | --------------------------------------------------------- | +| 1 | +, - | Numeric Types | Representing positive or negative numbers, unary operator | +| 2 | +, - | Numeric Types | Addition and substraction, binary operator | +| 3 | \*, / | Numeric Types | Multiplication and division, binary oeprator | +| 4 | % | Numeric Types | Taking the remainder, binary operator | + +## Bitwise Operators + +| # | **Operator** | **Data Types** | **Description** | +| --- | :----------: | -------------- | ----------------------------- | +| 1 | & | Numeric Types | Bitewise AND, binary operator | +| 2 | \| | Numeric Types | Bitewise OR, binary operator | + +## JSON Operator + +`->` operator can be used to get the value of a key in a column of JSON type, the left oeprand is the column name, the right operand is a string constant. For example, `col->'name'` returns the value of key `'name'`. + +## Set Operator + +Set operators are used to combine the results of two queries into single result. A query including set operators is called a combined query. The number of rows in each result in a combined query must be same, and the type is determined by the first query's result, the type of the following queriess result must be able to be converted to the type of the first query's result, the conversion rule is same as `CAST` function. + +TDengine provides 2 set operators: `UNION ALL` and `UNION`. `UNION ALL` combines the results without removing duplicate data. `UNION` combines the results and remove duplicate data rows. In single SQL statement, at most 100 set operators can be used. + +## Comparsion Operator + +| # | **Operator** | **Data Types** | **Description** | +| --- | :---------------: | ------------------------------------------------------------------- | ----------------------------------------------- | +| 1 | = | Except for BLOB, MEDIUMBLOB and JSON | Equal | +| 2 | <\>, != | Except for BLOB, MEDIUMBLOB, JSON and primary key of timestamp type | Not equal | +| 3 | \>, < | Except for BLOB, MEDIUMBLOB and JSON | Greater than, less than | +| 4 | \>=, <= | Except for BLOB, MEDIUMBLOB and JSON | Greater than or equal to, less than or equal to | +| 5 | IS [NOT] NULL | Any types | Is NULL or NOT | +| 6 | [NOT] BETWEEN AND | Except for BLOB, MEDIUMBLOB and JSON | In a value range or not | +| 7 | IN | Except for BLOB, MEDIUMBLOB, JSON and primary key of timestamp type | In a list of values or not | +| 8 | LIKE | BINARY, NCHAR and VARCHAR | Wildcard matching | +| 9 | MATCH, NMATCH | BINARY, NCHAR and VARCHAR | Regular expression matching | +| 10 | CONTAINS | JSON | If A key exists in JSON | + +`LIKE` operator uses wildcard to match a string, the rules are: + +- '%' matches 0 to any number of characters; '\_' matches any single ASCII character. +- \_ can be used to match a `_` in the string, i.e. using escape character backslash `\` +- Wildcard string is 100 bytes at most. Longer a wildcard string is, worse the performance of LIKE operator is. + +`MATCH` and `NMATCH` operators use regular expressions to match a string, the rules are: + +- Regular expressions of POSIX standard are supported. +- Only `tbname`, i.e. table name of sub tables, and tag columns of string types can be matched with regular expression, data columns are not supported. +- Regular expression string is 128 bytes at most, and can be adjusted by setting parameter `maxRegexStringLen`, which is a client side configuration and needs to restart the client to take effect. + +## Logical Operators + +| # | **Operator** | **Data Types** | **Description** | +| --- | :----------: | -------------- | ---------------------------------------------------------------------------------------- | +| 1 | AND | BOOL | Logical AND, return TRUE if both conditions are TRUE; return FALSE if any one is FALSE. | +| 2 | OR | BOOL | Logical OR, return TRUE if any condition is TRUE; return FALSE if both are FALSE | + +TDengine uses shortcircut optimization when performing logical operations. For AND operator, if the first condition is evaluated to FALSE, then the second one is not evaluated. For OR operator, if the first condition is evaluated to TRUE, then the second one is not evaluated. diff --git a/docs-en/12-taos-sql/index.md b/docs-en/12-taos-sql/index.md index 32850e8c4b0a816cae94563079c79b94c8611bd5..33656338a7bba38dc55cf536bdba8e95309c5acf 100644 --- a/docs-en/12-taos-sql/index.md +++ b/docs-en/12-taos-sql/index.md @@ -3,11 +3,9 @@ title: TDengine SQL description: "The syntax supported by TDengine SQL " --- -This section explains the syntax to operating databases, tables, STables, inserting data, selecting data, functions and some tips that can be used in TDengine SQL. It would be easier to understand with some fundamental knowledge of SQL. +This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. -TDengine SQL is the major interface for users to write data into or query from TDengine. For users to easily use, syntax similar to standard SQL is provided. However, please note that TDengine SQL is not standard SQL. For instance, TDengine doesn't provide the functionality of deleting time series data, thus corresponding statements are not provided in TDengine SQL. - -TDengine SQL doesn't support abbreviation for keywords, for example `DESCRIBE` can't be abbreviated as `DESC`. +TDengine SQL is the major interface for users to write data into or query from TDengine. For ease of use, the syntax is similar to that of standard SQL. However, please note that TDengine SQL is not standard SQL. For instance, TDengine doesn't provide a delete function for time series data and so corresponding statements are not provided in TDengine SQL. Syntax Specifications used in this chapter: @@ -16,7 +14,7 @@ Syntax Specifications used in this chapter: - | means one of a few options, excluding | itself. - … means the item prior to it can be repeated multiple times. -To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of meters. Assuming each meter collects 3 data measurements: current, voltage, phase. The data model is shown below: +To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below: ```sql taos> DESCRIBE meters; @@ -30,4 +28,4 @@ taos> DESCRIBE meters; groupid | INT | 4 | TAG | ``` -The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003, d1004 respectively based on the data model of TDengine. +The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003 and d1004 based on the data model of TDengine. diff --git a/docs-en/13-operation/01-pkg-install.md b/docs-en/13-operation/01-pkg-install.md index 8dd6de34280ee3702bc955d00dfb24fcb73e940e..c098002962d62aa0acc7a94462c052303cb2ed90 100644 --- a/docs-en/13-operation/01-pkg-install.md +++ b/docs-en/13-operation/01-pkg-install.md @@ -6,7 +6,7 @@ description: Install, Uninstall, Start, Stop and Upgrade import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -TDengine community version provides dev and rpm packages for users to choose based on the system environment. deb supports Debian, Ubuntu and systems derived from them. rpm supports CentOS, RHEL, SUSE and systems derived from them. Furthermore, tar.gz package is provided for enterprise customers. +TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers. ## Install @@ -124,7 +124,7 @@ taoskeeper is installed, enable it by `systemctl enable taoskeeper` ``` :::info -Some configuration will be prompted for users to provide when install.sh is executing, the interactive mode can be disabled by executing `./install.sh -e no`. `./install -h` can show all parameters and detailed explanation. +Users will be prompted to enter some configuration information when install.sh is executing. The interactive mode can be disabled by executing `./install.sh -e no`. `./install.sh -h` can show all parameters with detailed explanation. ::: @@ -132,7 +132,7 @@ Some configuration will be prompted for users to provide when install.sh is exec :::note -When installing on the first node in the cluster, when "Enter FQDN:" is prompted, nothing needs to be provided. When installing on following nodes, when "Enter FQDN:" is prompted, the end point of the first dnode in the cluster can be input if it is already up; or just ignore it and configure later after installation is done. +When installing on the first node in the cluster, at the "Enter FQDN:" prompt, nothing needs to be provided. When installing on subsequent nodes, at the "Enter FQDN:" prompt, you must enter the end point of the first dnode in the cluster if it is already up. You can also just ignore it and configure it later after installation is finished. ::: @@ -181,14 +181,14 @@ taosKeeper is removed successfully! :::note -- It's strongly suggested not to use multiple kinds of installation packages on a single host TDengine -- After deb package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as in the command below and then reinstalling. +- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. +- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed. ```bash $ sudo rm -f /var/lib/dpkg/info/tdengine* ``` -- After rpm package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as in the command below and then reinstalling. +- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed. ```bash $ sudo rpm -e --noscripts tdengine @@ -219,7 +219,7 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/ During the installation process: - Configuration directory, data directory, and log directory are created automatically if they don't exist -- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg if not existing +- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg - The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data - The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log - The executables at /usr/local/taos/bin are linked to /usr/bin @@ -228,7 +228,7 @@ During the installation process: :::note -- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution because data can't be recovered +- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data. - When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used. ## Start and Stop @@ -263,18 +263,19 @@ Active: inactive (dead) There are two aspects in upgrade operation: upgrade installation package and upgrade a running server. -Upgrading package should follow the steps mentioned previously to first uninstall the old version then install the new version. +To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version. -Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 section match can the old version be upgraded to the new version. The steps of upgrading a running server are as below: +Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below: - Stop inserting data -- Make sure all data are persisted into disk +- Make sure all data is persisted to disk +- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.) - Stop the cluster of TDengine - Uninstall old version and install new version - Start the cluster of TDengine -- Make some simple queries to make sure no data loss -- Make some simple data insertion to make sure the cluster works well -- Restore business data +- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss +- Run some simple data insertion statements to make sure the cluster works well +- Restore business services :::warning diff --git a/docs-en/13-operation/02-planning.mdx b/docs-en/13-operation/02-planning.mdx index 4b8ed1f1b893446a521425b9eb1f6ec32b112505..c1baf92dbfa8d93f83174c05c2ea631d1a469739 100644 --- a/docs-en/13-operation/02-planning.mdx +++ b/docs-en/13-operation/02-planning.mdx @@ -2,17 +2,17 @@ title: Resource Planning --- -The computing and storage resources need to be planned if using TDengine to build an IoT platform. How to plan the CPU, memory and disk required will be described in this chapter. +It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter. ## Memory Requirement of Server Side -The number of vgroups created for each database is the same as the number of CPU cores by default and can be configured by parameter `maxVgroupsPerDb`, each vnode in a vgroup stores one replica. Each vnode consumes a fixed size of memory, i.e. `blocks` \* `cache`. Besides, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below: +By default, the number of vgroups created for each database is the same as the number of CPU cores. This can be configured by the parameter `maxVgroupsPerDb`. Each vnode in a vgroup stores one replica. Each vnode consumes a fixed amount of memory, i.e. `blocks` \* `cache`. In addition, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below: ``` Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB) ``` -For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M. +For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` is 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M. In the real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`. @@ -22,10 +22,10 @@ In the real operation of TDengine, we are more concerned about the memory used b In the above formula: -1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula mentioned previously then dividing by number of dnodes and multiplying the number of replicas. +1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula for Database Memory Size, mentioned above, then dividing by number of dnodes and multiplying the number of replicas. ``` - vnode_memory = sum(Database memory) / number_of_dnodes * replica + vnode_memory = (sum(Database Memory Size) / number_of_dnodes) * replica ``` 2. "mnode_memory" of a `taosd` process is the memory consumed by a mnode. If there is one (and only one) mnode hosted in a `taosd` process, the memory consumed by "mnode" is "0.2KB \* the total number of tables in the cluster". @@ -56,8 +56,8 @@ So, at least 3GB needs to be reserved for such a client. The CPU resources required depend on two aspects: -- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The computing resource consumed between inserting 1 row one time and inserting 10 rows one time is very small. So, the more the rows to insert one time, the higher the efficiency. Inserting in bach also exposes requirements for the client side which needs to cache rows and insert in batch once the cached rows reaches a threshold. -- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, etc provided by user. +- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold. +- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, and other requirements provided by users. In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources. @@ -71,12 +71,12 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable For example, there are 10,000,000 meters, while each meter collects data every 15 minutes and the data size of each collection is 128 bytes, so the raw data size of one year is: 10000000 \* 128 \* 24 \* 60 / 15 \* 365 = 44.8512(TB). Assuming compression ratio is 5, the actual disk size is: 44.851 / 5 = 8.97024(TB). -Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device, and this is transparent to application programs. +Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device. This is completely transparent to application programs. -To increase the performance, multiple disks can be setup for parallel data reading or data inserting. Please note that an expensive disk array is not necessary because replications are used in TDengine to provide high availability. +To increase performance, multiple disks can be setup for parallel data reading or data inserting. Please note that an expensive disk array is not necessary because replications are used in TDengine to provide high availability. ## Number of Hosts -A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulas mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily. +A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily. **Quick Estimation for CPU, Memory and Disk** Please refer to [Resource Estimate](https://www.taosdata.com/config/config.html). diff --git a/docs-en/13-operation/03-tolerance.md b/docs-en/13-operation/03-tolerance.md index 9f74760278cd34a50c232f528549e90842631e18..d4d48d7fcdc2c990b6ea0821e2347c70a809ed79 100644 --- a/docs-en/13-operation/03-tolerance.md +++ b/docs-en/13-operation/03-tolerance.md @@ -7,26 +7,26 @@ title: Fault Tolerance & Disaster Recovery TDengine uses **WAL**, i.e. Write Ahead Log, to achieve fault tolerance and high reliability. -When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally due to any reason and then restarted. +When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally for any reason and then restarted. There are 2 configuration parameters related to WAL: - walLevel: - - 0:wal is disabled; - - 1:wal is enabled without fsync; - - 2:wal is enabled with fsync. -- fsync:only valid when walLevel is set to 2, it specifies the interval of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written. + - 0:wal is disabled + - 1:wal is enabled without fsync + - 2:wal is enabled with fsync +- fsync:This parameter is only valid when walLevel is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written. -To achieve absolutely no data loss, walLevel needs to be set to 2 and fsync needs to be set to 1. The penalty is the performance of data ingestion downgrades. However, if the concurrent threads of data insertion on the client side can reach a big enough number, for example 50, the data ingestion performance would be still good enough, our verification shows that the drop is only 30% compared to fsync is set to 3,000 milliseconds. +To achieve absolutely no data loss, walLevel should be set to 2 and fsync should be set to 1. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when fsync is set to 3,000 milliseconds. ## Disaster Recovery -TDengine uses replications to provide high availability and disaster recovery capability. +TDengine uses replication to provide high availability and disaster recovery capability. -TDengine cluster is managed by mnode. To make sure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee the metadata consistency. +A TDengine cluster is managed by mnode. To ensure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency. -The number of replicas for the time series data in TDengine is associated with each database, there can be a lot of databases in a cluster while each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1. +The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1. The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table. -As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is set to bigger than 1, high availability can be achieved without any other assistance. If dnodes of TDengine cluster are deployed in geographically different data centers, disaster recovery can be achieved too. +As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers. diff --git a/docs-en/13-operation/08-export.md b/docs-en/13-operation/08-export.md index fa9625a7c5f6b0e6706d726bff410cee647286bb..5780de42faeaedbc1c985ad2aa2f52fe56c76971 100644 --- a/docs-en/13-operation/08-export.md +++ b/docs-en/13-operation/08-export.md @@ -2,11 +2,13 @@ title: Data Export --- -There are two ways of exporting data from a TDengine cluster, one is SQL statement in TDengine CLI, the other one is `taosdump`. +There are two ways of exporting data from a TDengine cluster: +- Using a SQL statement in TDengine CLI +- Using the `taosdump` tool ## Export Using SQL -If you want to export the data of a table or a STable, please execute below SQL statement in TDengine CLI. +If you want to export the data of a table or a STable, please execute the SQL statement below, in the TDengine CLI. ```sql select * from >> data.csv; @@ -16,4 +18,4 @@ The data of table or STable specified by `tb_name` will be exported into a file ## Export Using taosdump -With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). +With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). diff --git a/docs-en/13-operation/09-status.md b/docs-en/13-operation/09-status.md index ca8974bb8f4efec4c6d7c87c60b3ca67ad35c613..51396524ea281ae665c9fdf61d2e6e6202995537 100644 --- a/docs-en/13-operation/09-status.md +++ b/docs-en/13-operation/09-status.md @@ -3,7 +3,7 @@ sidebar_label: Connections & Tasks title: Manage Connections and Query Tasks --- -A system operator can use TDengine CLI to show the connections, ongoing queries, stream computing, and can close connection or stop ongoing query task or stream computing. +A system operator can use the TDengine CLI to show connections, ongoing queries, stream computing, and can close connections or stop ongoing query tasks or stream computing. ## Show Connections @@ -13,7 +13,7 @@ SHOW CONNECTIONS; One column of the output of the above SQL command is "ip:port", which is the end point of the client. -## Close Connections Forcedly +## Force Close Connections ```sql KILL CONNECTION ; @@ -27,9 +27,9 @@ In the above SQL command, `connection-id` is from the first column of the output SHOW QUERIES; ``` -The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection, in format of "connection-id:query-no". +The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection. The format is "connection-id:query-no". -## Close Queries Forcedly +## Force Close Queries ```sql KILL QUERY ; @@ -43,9 +43,9 @@ In the above SQL command, `query-id` is from the first column of the output of ` SHOW STREAMS; ``` -The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection, in the format of "connection-id:stream-no". +The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection. The format is "connection-id:stream-no". -## Close Continuous Query Forcedly +## Force Close Continuous Query ```sql KILL STREAM ; diff --git a/docs-en/13-operation/10-monitor.md b/docs-en/13-operation/10-monitor.md index 615f79ca73f25115f5b4f19863c0f152f4fecf69..a4679983f2bc77bb4e438f5d43fa1b8beb39b120 100644 --- a/docs-en/13-operation/10-monitor.md +++ b/docs-en/13-operation/10-monitor.md @@ -2,13 +2,13 @@ title: TDengine Monitoring --- -After TDengine is started, a database named `log` for monitoring is created automatically. The information about CPU, memory, disk, bandwidth, number of requests, disk I/O speed, slow query is written into `log` database on the basis of a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console. +After TDengine is started, a database named `log` is created automatically to help with monitoring. Information that includes CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, is written into the `log` database at a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console. The collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in the configuration file. ## TDinsight -TDinsight is a complete solution which uses the monitor database `log` mentioned previously and Grafana to monitor a TDengine cluster. +TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster. From version 2.3.3.0, more monitoring data has been added in the `log` database. Please refer to [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) to learn more details about using TDinsight to monitor TDengine. diff --git a/docs-en/13-operation/17-diagnose.md b/docs-en/13-operation/17-diagnose.md index 53d808ef511b72acbf7cff22dc8c0d5a5b05408e..2b474fddba4af5ba0c29103cd8ab1249d10d055b 100644 --- a/docs-en/13-operation/17-diagnose.md +++ b/docs-en/13-operation/17-diagnose.md @@ -4,13 +4,13 @@ title: Problem Diagnostics ## Network Connection Diagnostics -When the client is unable to access the server, the network connection between the client side and the server side needs to be checked to find out the root cause and resolve problems. +When a TDengine client is unable to access a TDengine server, the network connection between the client side and the server side must be checked to find the root cause and resolve problems. -The diagnostic for network connection can be executed between Linux and Linux or between Linux and Windows. +Diagnostics for network connections can be executed between Linux and Linux or between Linux and Windows. Diagnostic steps: -1. If the port range to be diagnosed are being occupied by a `taosd` server process, please first stop `taosd. +1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd. 2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server". 3. On the client side, execute command `taos -n client -h -P -l ` to send a testing package to the specified server and port. @@ -65,13 +65,13 @@ Output of the client side for the example is below: 12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011 ``` -The output needs to be checked carefully for the system operator to find out the root cause and solve the problem. +The output needs to be checked carefully for the system operator to find the root cause and resolve the problem. ## Startup Status and RPC Diagnostic -`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a comman task for a system operator to do to determine whether `taosd` has been started successfully, especially in case of cluster. +`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a common task which should be performed by a system operator, especially in the case of a cluster, to determine whether `taosd` has been started successfully. -`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or `taosd` is abnormal. +`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or whether `taosd` is abnormal. ## Sync and Arbitrator Diagnostic @@ -80,13 +80,13 @@ taos -n sync -P 6040 -h taos -n sync -P 6042 -h ``` -The above commands can be executed on Linux Shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well. +The above commands can be executed in a Linux shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well. ## Network Speed Diagnostic `taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP` -From version 2.2.0.0, the above command can be executed on Linux Shell to test the network speed, it sends uncompressed package to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below: +From version 2.2.0.0 onwards, the above command can be executed in a Linux shell to test network speed. The command sends uncompressed packages to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below: -n:When set to "speed", it means testing network speed. -h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used. @@ -99,23 +99,23 @@ From version 2.2.0.0, the above command can be executed on Linux Shell to test t `taos -n fqdn -h ` -From version 2.2.0.0, the above command can be executed on Linux Shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below: +From version 2.2.0.0 onward, the above command can be executed in a Linux shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below: -n:When set to "fqdn", it means testing the speed of resolving FQDN. -h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default. ## Server Log -The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131, for debug purpose it needs to be escalated to 135 or 143. +The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively. -Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily, so on server side important information is stored at different place from other logs. +Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily and so on the server side, important information is stored in a different place from other logs. - The log at level of INFO, WARNING and ERROR is stored in `taosinfo` so that it is easy to find important information - The log at level of DEBUG (135) and TRACE (143) and other information not handled by `taosinfo` are stored in `taosdlog` ## Client Log -An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded, for debugging purposes it needs to be changed to 135 or 143 so that logs at DEBUG or TRACE level can be recorded. +An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded. The maximum length of a single log file is controlled by parameter `numOfLogLines` and only 2 log files are kept for each `taosd` server process. diff --git a/docs-en/13-operation/index.md b/docs-en/13-operation/index.md index a9801c0390f294d6b39b1219cc4055149871ef9c..c64749c40e26f091e4a25e0238827ebceff4b069 100644 --- a/docs-en/13-operation/index.md +++ b/docs-en/13-operation/index.md @@ -2,7 +2,7 @@ title: Administration --- -This chapter is mainly written for system administrators, covering download, install/uninstall, data import/export, system monitoring, user management, connection management, etc. Capacity planning and system optimization are also covered. +This chapter is mainly written for system administrators. It covers download, install/uninstall, data import/export, system monitoring, user management, connection management, capacity planning and system optimization. ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docs-en/14-reference/02-rest-api/02-rest-api.mdx b/docs-en/14-reference/02-rest-api/02-rest-api.mdx index f405d551e530a37a5221e71a824f605fba0c0db9..990af861961e9daf4ac775462e21d6d9852d17c1 100644 --- a/docs-en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs-en/14-reference/02-rest-api/02-rest-api.mdx @@ -2,23 +2,23 @@ title: REST API --- -To support the development of various types of platforms, TDengine provides an API that conforms to the REST principle, namely REST API. To minimize the learning cost, different from the other database REST APIs, TDengine directly requests the SQL command contained in the request BODY through HTTP POST to operate the database and only requires a URL. +To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database. :::note -One difference from the native connector is that the REST interface is stateless, so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name prefix. (Since version 2.2.0.0, it is supported to specify db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default. And it requires that the `db_name` must be specified in the URL.) +One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. (Since version 2.2.0.0, TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default and it requires that the `db_name` must be specified in the URL.) ::: ## Installation -The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language supports the HTTP protocol is enough. +The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol. ## Verification If the TDengine server is already installed, it can be verified as follows: -The following is an Ubuntu environment using the `curl` tool (to confirm that it is installed) to verify that the REST interface is working. +The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment. -The following example lists all databases, replacing `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. +The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. ```html curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql @@ -89,7 +89,7 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60 TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication. -- The custom authentication information is as follows (Let's introduce token later) +- The custom authentication information is as follows. More details about "token" later. ``` Authorization: Taosd @@ -136,7 +136,7 @@ The return result is in JSON format, as follows: Description: -- status: tell if the operation result is success or failure. +- status: tells you whethre the operation result is success or failure. - head: the definition of the table, or just one column "affected_rows" if no result set is returned. (As of version 2.0.17.0, it is recommended not to rely on the head return value to determine the data column type but rather use column_meta. In later versions, the head item may be removed from the return value.) - column_meta: this item is added to the return value to indicate the data type of each column in the data with version 2.0.17.0 and later versions. Each column is described by three values: column name, column type, and type length. For example, `["current",6,4]` means that the column name is "current", the column type is 6, which is the float type, and the type length is 4, which is the float type with 4 bytes. If the column type is binary or nchar, the type length indicates the maximum length of content stored in the column, not the length of the specific data in this return value. When the column type is nchar, the type length indicates the number of Unicode characters that can be saved, not bytes. - data: The exact data returned, presented row by row, or just [[affected_rows]] if no result set is returned. The order of the data columns in each row of data is the same as that of the data columns described in column_meta. diff --git a/docs-en/14-reference/03-connector/03-connector.mdx b/docs-en/14-reference/03-connector/03-connector.mdx index 38eba73d0983951901a26eee3962e89007f6d30a..44685579005c2cebd5e0194a10d457cd1199051e 100644 --- a/docs-en/14-reference/03-connector/03-connector.mdx +++ b/docs-en/14-reference/03-connector/03-connector.mdx @@ -4,7 +4,7 @@ title: Connector TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector. -![image-connector](./connector.webp) +![TDengine Database image-connector](./connector.webp) ## Supported platforms diff --git a/docs-en/14-reference/03-connector/cpp.mdx b/docs-en/14-reference/03-connector/cpp.mdx index 4b388d32a9050645e268bb267d16e9a5b8aa4bda..d549413012d1f17edf4711ae51a56ba5696fcbe3 100644 --- a/docs-en/14-reference/03-connector/cpp.mdx +++ b/docs-en/14-reference/03-connector/cpp.mdx @@ -4,7 +4,7 @@ sidebar_label: C/C++ title: C/C++ Connector --- -C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use it, you need to include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs; the application also needs to link to the corresponding dynamic libraries on the platform where it is located. +C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located. ```c #include @@ -26,7 +26,7 @@ Please refer to [list of supported platforms](/reference/connector#supported-pla ## Supported versions -The version number of the TDengine client driver and the version number of the TDengine server require one-to-one correspondence and recommend using the same version of client driver as what the TDengine server version is. Although a lower version of the client driver is compatible to work with a higher version of the server, if the first three version numbers are the same (i.e., only the fourth version number is different), but it is not recommended. It is strongly discouraged to use a higher version of the client driver to access a lower version of the TDengine server. +The version number of the TDengine client driver and the version number of the TDengine server should be the same. A lower version of the client driver is compatible with a higher version of the server, if the first three version numbers are the same (i.e., only the fourth version number is different). For e.g. if the client version is x.y.z.1 and the server version is x.y.z.2 the client and server are compatible. But in general we do not recommend using a lower client version with a newer server version. It is also strongly discouraged to use a higher version of the client driver to access a lower version of the TDengine server. ## Installation steps @@ -55,7 +55,7 @@ In the above example code, `taos_connect()` establishes a connection to port 603 :::note -- If not specified, when the return value of the API is an integer, _0_ means success, the others are error codes representing the reason for failure, and when the return value is a pointer, _NULL_ means failure. +- If not specified, when the return value of the API is an integer, _0_ means success. All others are error codes representing the reason for failure. When the return value is a pointer, _NULL_ means failure. - All error codes and their corresponding causes are described in the `taoserror.h` file. ::: @@ -114,7 +114,6 @@ This section shows sample code for standard access methods to TDengine clusters Subscribe and consume ```c -{{#include examples/c/subscribe.c}} ``` @@ -140,13 +139,12 @@ The base API is used to do things like create database connections and provide a - `void taos_cleanup()` - Clean up the runtime environment and should be called before the application exits. + Cleans up the runtime environment and should be called before the application exits. - ` int taos_options(TSDB_OPTION option, const void * arg, ...) ` Set client options, currently supports region setting (`TSDB_OPTION_LOCALE`), character set -(`TSDB_OPTION_CHARSET`), time zone -(`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`) . The region setting, character set, and time zone default to the current settings of the operating system. +(`TSDB_OPTION_CHARSET`), time zone (`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`). The region setting, character set, and time zone default to the current settings of the operating system. - `char *taos_get_client_info()` @@ -159,7 +157,7 @@ The base API is used to do things like create database connections and provide a - host: FQDN of any node in the TDengine cluster - user: user name - pass: password - - db: database name, if the user does not provide, it can also be connected correctly, the user can create a new database through this connection, if the user provides the database name, it means that the database user has already created, the default use of the database + - db: the database name. Even if the user does not provide this, the connection will still work correctly. The user can create a new database through this connection. If the user provides the database name, it means that the database has already been created and the connection can be used for regular operations on the database. - port: the port the taosd program is listening on NULL indicates a failure. The application needs to save the returned parameters for subsequent use. @@ -187,7 +185,7 @@ The APIs described in this subsection are all synchronous interfaces. After bein - `TAOS_RES* taos_query(TAOS *taos, const char *sql)` - Executes an SQL command, either a DQL, DML, or DDL statement. The `taos` parameter is a handle obtained with `taos_connect()`. You can't tell if the result failed by whether the return value is `NULL`, but by parsing the error code in the result set with the `taos_errno()` function. + Executes an SQL command, either a DQL, DML, or DDL statement. The `taos` parameter is a handle obtained with `taos_connect()`. If the return value is `NULL` this does not necessarily indicate a failure. You can get the error code, if any, by parsing the error code in the result set with the `taos_errno()` function. - `int taos_result_precision(TAOS_RES *res)` @@ -231,7 +229,7 @@ typedef struct taosField { - ` void taos_free_result(TAOS_RES *res)` - Frees the query result set and the associated resources. Be sure to call this API to free the resources after the query is completed. Otherwise, it may lead to a memory leak in the application. However, note that the application will crash if you call a function like `taos_consume()` to get the query results after freeing the resources. + Frees the query result set and the associated resources. Be sure to call this API to free the resources after the query is completed. Failing to call this, may lead to a memory leak in the application. However, note that the application will crash if you call a function like `taos_consume()` to get the query results after freeing the resources. - `char *taos_errstr(TAOS_RES *res)` @@ -242,7 +240,7 @@ typedef struct taosField { Get the reason for the last API call failure. The return value is the error code. :::note -TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, etc., issued based on TAOS structures are multi-thread safe, but state quantities such as "USE statement" may interfere between threads. In addition, the C connector can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection. +TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, and other operations issued that are based on TAOS structures are multi-thread safe, but state quantities such as the "USE statement" may interfere between threads. In addition, the C connector can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection. ::: @@ -274,12 +272,12 @@ All TDengine's asynchronous APIs use a non-blocking call pattern. Applications c ### Parameter Binding API -In addition to direct calls to `taos_query()` to perform queries, TDengine also provides a set of `bind` APIs that supports parameter binding, similar in style to MySQL, and currently only supports using a question mark `? ` to represent the parameter to be bound. +In addition to direct calls to `taos_query()` to perform queries, TDengine also provides a set of `bind` APIs that supports parameter binding, similar in style to MySQL. TDengine currently only supports using a question mark `? ` to represent the parameter to be bound. -Starting with versions 2.1.1.0 and 2.1.2.0, TDengine has significantly improved the bind APIs to support for data writing (INSERT) scenarios. This avoids the resource consumption of SQL syntax parsing when writing data through the parameter binding interface, thus significantly improving write performance in most cases. A typical operation, in this case, is as follows. +Starting with versions 2.1.1.0 and 2.1.2.0, TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. This avoids the resource consumption of SQL syntax parsing when writing data through the parameter binding interface, thus significantly improving write performance in most cases. A typical operation, in this case, is as follows. 1. call `taos_stmt_init()` to create the parameter binding object. -2. call `taos_stmt_prepare()` to parse the INSERT statement. 3. +2. call `taos_stmt_prepare()` to parse the INSERT statement. 3. call `taos_stmt_set_tbname()` to set the table name if it is reserved in the INSERT statement but not the TAGS. 4. call `taos_stmt_set_tbname_tags()` to set the table name and TAGS values if the table name and TAGS are reserved in the INSERT statement (for example, if the INSERT statement takes an automatic table build). 5. call `taos_stmt_bind_param_batch()` to set the value of VALUES in multiple columns, or call `taos_stmt_bind_param()` to set the value of VALUES in a single row. @@ -383,7 +381,7 @@ In addition to writing data using the SQL method or the parameter binding API, w **return value** TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`. In some cases, the returned TAOS_RES is `NULL`, and it is still possible to call `taos_errno()` to safely get the error code information. - The returned TAOS_RES needs to be freed by the caller. Otherwise, a memory leak will occur. + The returned TAOS_RES needs to be freed by the caller in order to avoid memory leaks. **Description** The protocol type is enumerated and contains the following three formats. @@ -416,13 +414,13 @@ The Subscription API currently supports subscribing to one or more tables and co This function is responsible for starting the subscription service, returning the subscription object on success and `NULL` on failure, with the following parameters. - - taos: the database connection that has been established - - restart: if the subscription already exists, whether to restart or continue the previous subscription - - topic: the topic of the subscription (i.e., the name). This parameter is the unique identifier of the subscription - - sql: the query statement of the subscription, this statement can only be _select_ statement, only the original data should be queried, only the data can be queried in time order - - fp: the callback function when the query result is received (the function prototype will be introduced later), only used when called asynchronously. This parameter should be passed `NULL` when called synchronously - - param: additional parameter when calling the callback function, the system API will pass it to the callback function as it is, without any processing - - interval: polling period in milliseconds. The callback function will be called periodically according to this parameter when called asynchronously. not recommended to set this parameter too small To avoid impact on system performance when called synchronously. If the interval between two calls to `taos_consume()` is less than this period, the API will block until the interval exceeds this period. + - taos: the database connection that has been established. + - restart: if the subscription already exists, whether to restart or continue the previous subscription. + - topic: the topic of the subscription (i.e., the name). This parameter is the unique identifier of the subscription. + - sql: the query statement of the subscription which can only be a _select_ statement. Only the original data should be queried, and data can only be queried in temporal order. + - fp: the callback function when the query result is received only used when called asynchronously. This parameter should be passed `NULL` when called synchronously. The function prototype is described below. + - param: additional parameter when calling the callback function. The system API will pass it to the callback function as is, without any processing. + - interval: polling period in milliseconds. The callback function will be called periodically according to this parameter when called asynchronously. The interval should not be too small to avoid impact on system performance when called synchronously. If the interval between two calls to `taos_consume()` is less than this period, the API will block until the interval exceeds this period. - ` typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` diff --git a/docs-en/14-reference/03-connector/csharp.mdx b/docs-en/14-reference/03-connector/csharp.mdx index 2969392a0594ff0705e88bede5be90fb9dfd646d..5eb322cf9125fe036349de22ceea5988de46e404 100644 --- a/docs-en/14-reference/03-connector/csharp.mdx +++ b/docs-en/14-reference/03-connector/csharp.mdx @@ -179,9 +179,9 @@ namespace TDengineExample 1. "Unable to establish connection", "Unable to resolve FQDN" - Usually, it cause by the FQDN configuration is incorrect, you can refer to [How to understand TDengine's FQDN (Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html) to solve it. 2. + Usually, it's caused by an incorrect FQDN configuration. Please refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot. -Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found. +2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found. This is usually because the program did not find the dependent client driver. The solution is to copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32\` directory on Windows, and create the following soft link on Linux `ln -s /usr/local/taos/driver/libtaos.so.x.x .x.x /usr/lib/libtaos.so` will work. diff --git a/docs-en/14-reference/03-connector/go.mdx b/docs-en/14-reference/03-connector/go.mdx index fd5930f07ff7184bd8dd5ff19cd3860f9718eaf9..c1e85ae4eb1d1d7ccfb70b2b4f38cebaf6cbf06c 100644 --- a/docs-en/14-reference/03-connector/go.mdx +++ b/docs-en/14-reference/03-connector/go.mdx @@ -15,9 +15,9 @@ import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.md import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx" import GoQuery from "../../07-develop/04-query-data/_go.mdx" -`driver-go` is the official Go language connector for TDengine, which implements the interface to the Go language [database/sql](https://golang.org/pkg/database/sql/) package. Go developers can use it to develop applications that access TDengine cluster data. +`driver-go` is the official Go language connector for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data. -`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from the native connection. +`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection. This article describes how to install `driver-go` and connect to TDengine clusters and perform basic operations such as data query and data writing through `driver-go`. @@ -213,7 +213,7 @@ func main() { Since the REST interface is stateless, the `use db` syntax will not work. You need to put the db name into the SQL command, e.g. `create table if not exists tb1 (ts timestamp, a int)` to `create table if not exists test.tb1 (ts timestamp, a int)` otherwise it will report the error `[0x217] Database not specified or available`. -You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter in TDengine 2.4.0.5. is supported since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. +You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. The complete example is as follows. @@ -289,7 +289,7 @@ func main() { 6. `readBufferSize` parameter has no significant effect after being increased - If you increase `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value according to the actual situation to achieve the best query result. + Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. 7. `disableCompression` parameter is set to `false` when the query efficiency is reduced diff --git a/docs-en/14-reference/03-connector/java.mdx b/docs-en/14-reference/03-connector/java.mdx index 530798af1143d2e611369579a945de295d248ab0..33d715c2e218fd6db4f61882f2a7a92baa80f5a2 100644 --- a/docs-en/14-reference/03-connector/java.mdx +++ b/docs-en/14-reference/03-connector/java.mdx @@ -9,19 +9,19 @@ description: TDengine Java based on JDBC API and provide both native and REST co import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). REST connections implement has a slight differences to compare the set of features implemented and native connections. +'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). The implementation of the REST connection and those of the native connections have slight differences in features. -![tdengine-connector](tdengine-jdbc-connector.webp) +![TDengine Database tdengine-connector](tdengine-jdbc-connector.webp) The preceding diagram shows two ways for a Java app to access TDengine via connector: - JDBC native connection: Java applications use TSDBDriver on physical node 1 (pnode1) to call client-driven directly (`libtaos.so` or `taos.dll`) APIs to send writing and query requests to taosd instances located on physical node 2 (pnode2). -- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server of physical node 2 (taosAdapter), requests TDengine server through the REST server, and returns the result. +- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server (taosAdapter) on physical node 2. taosAdapter forwards the request to TDengine server and returns the result. -Using REST connection, which does not rely on TDengine client drivers.It can be cross-platform more convenient and flexible but introduce about 30% lower performance than native connection. +The REST connection, which does not rely on TDengine client drivers, is more convenient and flexible, in addition to being cross-platform. However the performance is about 30% lower than that of the native connection. :::info -TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases, so 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. You need to pay attention to the following points when using: +TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases. So 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. It is important to keep the following points in mind: - TDengine does not currently support delete operations for individual data records. - Transactional operations are not currently supported. @@ -88,7 +88,7 @@ Add following dependency in the `pom.xml` file of your Maven project: -You can build Java connector from source code after clone TDengine project: +You can build Java connector from source code after cloning the TDengine project: ```shell git clone https://github.com/taosdata/TDengine.git @@ -96,7 +96,7 @@ cd TDengine/src/connector/jdbc mvn clean install -Dmaven.test.skip=true ``` -After compilation, a jar package of taos-jdbcdriver-2.0.XX-dist .jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository. +After compilation, a jar package named taos-jdbcdriver-2.0.XX-dist.jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository. @@ -186,7 +186,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl); In the above example, a RestfulDriver with a JDBC REST connection is used to establish a connection to a database named `test` with hostname `taosdemo.com` on port `6041`. The URL specifies the user name as `root` and the password as `taosdata`. -There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required: 1. +There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required: 1. driverClass specified as "com.taosdata.jdbc.rs.RestfulDriver". 2. jdbcUrl starting with "jdbc:TAOS-RS://". @@ -209,7 +209,7 @@ The configuration parameters in the URL are as follows. INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); ``` -- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); +- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into test using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); ::: @@ -271,7 +271,7 @@ If the configuration parameters are duplicated in the URL, Properties, or client 2. Properties connProps 3. the configuration file taos.cfg of the TDengine client driver when using a native connection -For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously. In this case, JDBC will use the password in the URL to establish the connection. +For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously, JDBC will use the password in the URL to establish the connection. ## Usage examples @@ -323,7 +323,7 @@ while(resultSet.next()){ } ``` -> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, starting from 1, it is recommended to use the field names to get them. +> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. ### Handling exceptions @@ -623,7 +623,7 @@ public void setNString(int columnIndex, ArrayList list, int size) throws ### Schemaless Writing -Starting with version 2.2.0.0, TDengine has added the ability to schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details. +Starting with version 2.2.0.0, TDengine has added the ability to perform schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details. **Note**. @@ -666,16 +666,16 @@ The TDengine Java Connector supports subscription functionality with the followi #### Create subscriptions ```java -TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); +TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topicname", "select * from meters", false); ``` The three parameters of the `subscribe()` method have the following meanings. -- topic: the subscribed topic (i.e., name). This parameter is the unique identifier of the subscription -- sql: the query statement of the subscription, this statement can only be `select` statement, only the original data should be queried, and you can query only the data in the positive time order +- topicname: the name of the subscribed topic. This parameter is the unique identifier of the subscription. +- sql: the query statement of the subscription. This statement can only be a `select` statement. Only original data can be queried, and you can query the data only temporal order. - restart: if the subscription already exists, whether to restart or continue the previous subscription -The above example will use the SQL command `select * from meters` to create a subscription named `topic`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning. +The above example will use the SQL command `select * from meters` to create a subscription named `topicname`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning. #### Subscribe to consume data diff --git a/docs-en/14-reference/03-connector/node.mdx b/docs-en/14-reference/03-connector/node.mdx index 3d30148e8ed9d8f98d135fa0fa72809f1115231a..8f586acde4848af71efcb23358be1f8486cedb8e 100644 --- a/docs-en/14-reference/03-connector/node.mdx +++ b/docs-en/14-reference/03-connector/node.mdx @@ -14,7 +14,6 @@ import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; -import NodeAsyncQuery from "../../07-develop/04-query-data/_js_async.mdx"; `td2.0-connector` and `td2.0-rest-connector` are the official Node.js language connectors for TDengine. Node.js developers can develop applications to access TDengine instance data. @@ -189,14 +188,8 @@ let cursor = conn.cursor(); ### Query data -#### Synchronous queries - -#### asynchronous query - - - ## More Sample Programs | Sample Programs | Sample Program Description | @@ -232,7 +225,7 @@ See [video tutorial](https://www.taosdata.com/blog/2020/11/11/1957.html) for the 2. "Unable to establish connection", "Unable to resolve FQDN" - Usually, root cause is the FQDN is not configured correctly. You can refer to [How to understand TDengine's FQDN (In Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html). + Usually, the root cause is an incorrect FQDN configuration. You can refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot. ## Important Updates diff --git a/docs-en/14-reference/03-connector/python.mdx b/docs-en/14-reference/03-connector/python.mdx index 2b238173e04e3e13de36b5ac4d91d0cda290ca72..69eec2388d460754493d2b775f14ab4bbf129799 100644 --- a/docs-en/14-reference/03-connector/python.mdx +++ b/docs-en/14-reference/03-connector/python.mdx @@ -11,18 +11,18 @@ import TabItem from "@theme/TabItem"; `taospy` is the official Python connector for TDengine. `taospy` provides a rich set of APIs that makes it easy for Python applications to access TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/). -The connection to the server directly using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection". +The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection". The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). ## Supported Platforms -- The native connection [supported platforms](/reference/connector/#supported-platforms) is the same as the one supported by the TDengine client. +- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client. - REST connections are supported on all platforms that can run Python. ## Version selection -We recommend using the latest version of `taospy`, regardless what the version of TDengine is. +We recommend using the latest version of `taospy`, regardless of the version of TDengine. ## Supported features @@ -53,7 +53,7 @@ Earlier TDengine client software includes the Python connector. If the Python co ::: -#### to install `taospy` +#### To install `taospy` @@ -139,7 +139,7 @@ The FQDN above can be the FQDN of any dnode in the cluster, and the PORT is the -For REST connections and making sure the cluster is up, make sure the taosAdapter component is up. This can be tested using the following `curl ` command. +For REST connections, make sure the cluster and taosAdapter component, are running. This can be tested using the following `curl ` command. ``` curl -u root:taosdata http://:/rest/sql -d "select server_version()" @@ -312,7 +312,7 @@ For a more detailed description of the `sql()` method, please refer to [RestClie ### Exception handling -All database operations will be thrown directly if an exception occurs. The application is responsible for exception handling. For example: +All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example: ```python {{#include docs-examples/python/handle_exception.py}} @@ -320,7 +320,7 @@ All database operations will be thrown directly if an exception occurs. The appl ### About nanoseconds -Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. +Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. 1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds 2. https://www.python.org/dev/peps/pep-0564/ @@ -328,7 +328,7 @@ Due to the current imperfection of Python's nanosecond support (see link below), ## Frequently Asked Questions -Welcome to [ask questions or report questions] (https://github.com/taosdata/taos-connector-python/issues). +Welcome to [ask questions or report questions](https://github.com/taosdata/taos-connector-python/issues). ## Important Update diff --git a/docs-en/14-reference/03-connector/rust.mdx b/docs-en/14-reference/03-connector/rust.mdx index 2c8fe68c1ca8b091b8d685d8e20942a02ab2c5e8..cd54f35982ec13fc3c9160145fa002fb6f1d094b 100644 --- a/docs-en/14-reference/03-connector/rust.mdx +++ b/docs-en/14-reference/03-connector/rust.mdx @@ -30,7 +30,7 @@ REST connections are supported on all platforms that can run Rust. Please refer to [version support list](/reference/connector#version-support). -The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. Recommend to use TDengine version 2.4 or higher to avoid known issues. +The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 2.4 or higher to avoid known issues. ## Installation @@ -206,7 +206,7 @@ let conn: Taos = cfg.connect(); ### Connection pooling -In complex applications, recommand to enable connection pool. Connection pool for [libtaos] is implemented using [r2d2]. +In complex applications, we recommend enabling connection pools. Connection pool for [libtaos] is implemented using [r2d2]. As follows, a connection pool with default parameters can be generated. @@ -269,7 +269,7 @@ The [Taos] structure is the connection manager in [libtaos] and provides two mai Note that Rust asynchronous functions and an asynchronous runtime are required. -[Taos] provides partial Rust methodization of SQL to reduce the frequency of `format!` code blocks. +[Taos] provides a few Rust methods that encapsulate SQL to reduce the frequency of `format!` code blocks. - `.describe(table: &str)`: Executes `DESCRIBE` and returns a Rust data structure. - `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement. @@ -279,7 +279,7 @@ In addition, this structure is also the entry point for [Parameter Binding](#Par ### Bind Interface -Similar to the C interface, Rust provides the bind interface's wraping. First, create a bind object [Stmt] for a SQL command from the [Taos] object. +Similar to the C interface, Rust provides the bind interface's wrapping. First, create a bind object [Stmt] for a SQL command from the [Taos] object. ```rust let mut stmt: Stmt = taos.stmt("insert into ? values(? ,?)") ? ; diff --git a/docs-en/14-reference/04-taosadapter.md b/docs-en/14-reference/04-taosadapter.md index de42e8a883d8b195b9d342f761e39458e557dfac..3264124655e7040e1d94b43500a0b582d95cb5a1 100644 --- a/docs-en/14-reference/04-taosadapter.md +++ b/docs-en/14-reference/04-taosadapter.md @@ -24,21 +24,21 @@ taosAdapter provides the following features. ## taosAdapter architecture diagram -![taosAdapter Architecture](taosAdapter-architecture.webp) +![TDengine Database taosAdapter Architecture](taosAdapter-architecture.webp) ## taosAdapter Deployment Method ### Install taosAdapter -taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TAOSData official website](https://taosdata.com/en/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. +taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. -### start/stop taosAdapter +### Start/Stop taosAdapter On Linux systems, the taosAdapter service is managed by `systemd` by default. You can use the command `systemctl start taosadapter` to start the taosAdapter service and use the command `systemctl stop taosadapter` to stop the taosAdapter service. ### Remove taosAdapter -Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package or use package management command like rpm or apt to remove the TDengine server, including taosAdapter. +Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package. If you installed using a .deb or .rpm package, use the corresponding command, for your package manager, like apt or rpm to remove the TDengine server, including taosAdapter. ### Upgrade taosAdapter @@ -153,8 +153,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl ## Feature List -- Compatible with RESTful interfaces - [https://www.taosdata.com/cn/documentation/connector#restful](https://www.taosdata.com/cn/documentation/connector#restful) +- Compatible with RESTful interfaces [REST API](/reference/rest-api/) - Compatible with InfluxDB v1 write interface [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/) - Compatible with OpenTSDB JSON and telnet format writes @@ -187,7 +186,7 @@ You can use any client that supports the http protocol to write data to or query ### InfluxDB -You can use any client that supports the http protocol to access the Restful interface address `http://:6041/` to write data in InfluxDB compatible format to TDengine. The EndPoint is as follows: +You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in InfluxDB compatible format to TDengine. The EndPoint is as follows: ```text /influxdb/v1/write @@ -204,7 +203,7 @@ Note: InfluxDB token authorization is not supported at present. Only Basic autho ### OpenTSDB -You can use any client that supports the http protocol to access the Restful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine. +You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine. ```text /opentsdb/v1/put/json/:db @@ -241,7 +240,7 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne ## Memory usage optimization methods -taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values range from -1 to 100 integers in percent of the system's physical memory. +taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory. - pauseQueryMemoryThreshold - pauseAllMemoryThreshold @@ -277,7 +276,7 @@ Corresponding configuration parameter monitor.pauseQueryMemoryThreshold memory threshold for no more queries Environment variable `TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD` (default 70) ``` -You can adjust it according to the specific application scenario and operation strategy, and it is recommended to use operation monitoring software to monitor system memory status timely. The load balancer can also check the taosAdapter running status through this interface. +You should adjust this parameter based on your specific application scenario and operation strategy. We recommend using monitoring software to monitor system memory status. The load balancer can also check the taosAdapter running status through this interface. ## taosAdapter Monitoring Metrics @@ -326,7 +325,7 @@ You can also adjust the level of the taosAdapter log output by setting the `--lo ## How to migrate from older TDengine versions to taosAdapter -In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its process ID. And there are some configuration parameters and behaviors that are different between the two. See the following table for details. +In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its own process ID. There are some configuration parameters and behaviors that are different between the two. See the following table for details. | **#** | **embedded httpd** | **taosAdapter** | **comment** | | ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------ ------------------------------------------------------------------------ | diff --git a/docs-en/14-reference/05-taosbenchmark.md b/docs-en/14-reference/05-taosbenchmark.md index 1e2b0b99f652bca0d775bebe28378600470f8661..b029f3d3eea0b010354dac1eb3ffecbc872e597f 100644 --- a/docs-en/14-reference/05-taosbenchmark.md +++ b/docs-en/14-reference/05-taosbenchmark.md @@ -7,7 +7,7 @@ description: "taosBenchmark (once called taosdemo ) is a tool for testing the pe ## Introduction -taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can flexibly control the number and type of databases, supertables, tag columns, number and type of data columns, and sub-tables, and types of databases, super tables, the number and types of data columns, the number of sub-tables, the amount of data per sub-table, the time interval for inserting data, the number of working threads, whether and how to insert disordered data, and so on. The installer provides taosdemo as a soft link to taosBenchmark for compatibility with past users. +taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can flexibly control the number and type of databases, supertables, tag columns, number and type of data columns, and sub-tables, and types of databases, super tables, the number and types of data columns, the number of sub-tables, the amount of data per sub-table, the time interval for inserting data, the number of working threads, whether and how to insert disordered data, and so on. The installer provides taosdemo as a soft link to taosBenchmark for compatibility and for the convenience of past users. ## Installation @@ -21,7 +21,7 @@ There are two ways to install taosBenchmark: ### Configuration and running methods -taosBenchmark supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive, and with only one command-line parameter, users can use `-f ` to specify a configuration file when using a configuration file. When running taosBenchmark with command-line arguments and controlling its behavior, users should use other parameters for configuration rather than `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters. +taosBenchmark supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive. Users can use `-f ` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters. taosBenchmark supports complete performance testing of TDengine. taosBenchmark supports the TDengine functions in three categories: write, query, and subscribe. These three functions are mutually exclusive, and users can select only one of them each time taosBenchmark runs. It is important to note that the type of functionality to be tested is not configurable when using the command-line configuration method, which can only test writing performance. To test the query and subscription performance of the TDengine, you must use the configuration file method and specify the function type to test via the parameter `filetype` in the configuration file. @@ -35,7 +35,7 @@ Execute the following commands to quickly experience taosBenchmark's default con taosBenchmark ``` -When run without parameters, taosBenchmark connects to the TDengine cluster specified in `/etc/taos` by default and creates a database named test in TDengine, a super table named `meters` under the test database, and 10,000 tables under the super table with 10,000 records written to each table. Note that if there is already a test database, this table is not used. Note that if there is already a test database, this command will delete it first and create a new test database. +When run without parameters, taosBenchmark connects to the TDengine cluster specified in `/etc/taos` by default and creates a database named `test`, a super table named `meters` under the test database, and 10,000 tables under the super table with 10,000 records written to each table. Note that if there is already a database named "test" this command will delete it first and create a new database. ### Run with command-line configuration parameters @@ -45,7 +45,7 @@ The `-f ` argument cannot be used when running taosBenchmark with com taosBenchmark -I stmt -n 200 -t 100 ``` -The above command, `taosBenchmark` will create a database named `test`, create a super table `meters` in it, create 100 sub-tables in the super table and insert 200 records for each sub-table using parameter binding. +Using the above command, `taosBenchmark` will create a database named `test`, create a super table `meters` in it, create 100 sub-tables in the super table and insert 200 records for each sub-table using parameter binding. ### Run with the configuration file @@ -95,10 +95,10 @@ taosBenchmark -f ## Command-line argument in detailed - **-f/--file ** : - specify the configuration file to use. This file includes All parameters. And users should not use this parameter with other parameters on the command-line. There is no default value. + specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value. - **-c/--config-dir ** : - specify the directory where the TDengine cluster configuration file. the default path is `/etc/taos`. + specify the directory where the TDengine cluster configuration file. The default path is `/etc/taos`. - **-h/--host ** : Specify the FQDN of the TDengine server to connect to. The default value is localhost. @@ -272,13 +272,13 @@ The parameters for creating super tables are configured in `super_tables` in the - **child_table_prefix** : The prefix of the child table name, mandatory configuration item, no default value. -- **escape_character**: specify the super table and child table names containing escape characters. By default is "no". The value can be "yes" or "no". +- **escape_character**: specify the super table and child table names containing escape characters. The value can be "yes" or "no". The default is "no". - **auto_create_table**: only when insert_mode is taosc, rest, stmt, and childtable_exists is "no". "yes" means taosBenchmark will automatically create non-existent tables when inserting data; "no" means that taosBenchmark will create all tables before inserting. -- **batch_create_tbl_num** : the number of tables per batch when creating sub-tables, default is 10. Note: the actual number of batches may not be the same as this value when the executed SQL statement is larger than the maximum length supported, it will be automatically truncated and re-executed to continue creating. +- **batch_create_tbl_num** : the number of tables per batch when creating sub-tables, default is 10. Note: the actual number of batches may not be the same as this value. If the executed SQL statement is larger than the maximum length supported, it will be automatically truncated and re-executed to continue creating. -- **data_source**: specify the source of data-generating. Default is taosBenchmark randomly generated. Users can configure it as "rand" and "sample". When "sample" is used, taosBenchmark will use the data in the file specified by the `sample_file` parameter. +- **data_source**: specify the source of data-generation. Default is taosBenchmark randomly generated. Users can configure it as "rand" and "sample". When "sample" is used, taosBenchmark will use the data in the file specified by the `sample_file` parameter. - **insert_mode**: insertion mode with options taosc, rest, stmt, sml, sml-rest, corresponding to normal write, restful interface write, parameter binding interface write, schemaless interface write, restful schemaless interface write (provided by taosAdapter). The default value is taosc. @@ -300,15 +300,15 @@ The parameters for creating super tables are configured in `super_tables` in the - **partial_col_num**: If this value is a positive number n, only the first n columns are written to, only if insert_mode is taosc and rest, or all columns if n is 0. -- **disorder_ratio** : Specifies the percentage probability of disordered data in the value range [0,50]. The default is 0, which means there is no disorder data. +- **disorder_ratio** : Specifies the percentage probability of disordered (i.e. out-of-order) data in the value range [0,50]. The default is 0, which means there is no disorder data. -- **disorder_range** : Specifies the timestamp fallback range for the disordered data. The generated disorder timestamp is the timestamp that should be used in the non-disorder case minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0. +- **disorder_range** : Specifies the timestamp fallback range for the disordered data. The disordered timestamp is generated by subtracting a random value in this range, from the timestamp that would be used in the non-disorder case. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0. -- **timestamp_step**: The timestamp step for inserting data in each child table, in units consistent with the `precision` of the database, the default value is 1. +- **timestamp_step**: The timestamp step for inserting data in each child table, in units consistent with the `precision` of the database. For e.g. if the `precision` is milliseconds, the timestamp step will be in milliseconds. The default value is 1. - **start_timestamp** : The timestamp start value of each sub-table, the default value is now. -- **sample_format**: The type of the sample data file, now only "csv" is supported. +- **sample_format**: The type of the sample data file; for now only "csv" is supported. - **sample_file**: Specify a CSV format file as the data source. It only works when data_source is a sample. If the number of rows in the CSV file is less than or equal to prepared_rand, then taosBenchmark will read the CSV file data cyclically until it is the same as prepared_rand; otherwise, taosBenchmark will read only the rows with the number of prepared_rand. The final number of rows of data generated is the smaller of the two. @@ -341,7 +341,7 @@ The configuration parameters for specifying super table tag columns and data col - **create_table_thread_count** : The number of threads to build the table, default is 8. -- **connection_pool_size** : The number of pre-established connections to the TDengine server. If not configured, it is the same number of threads specified. +- **connection_pool_size** : The number of pre-established connections to the TDengine server. If not configured, it is the same as number of threads specified. - **result_file** : The path to the result output file, the default value is . /output.txt. diff --git a/docs-en/14-reference/06-taosdump.md b/docs-en/14-reference/06-taosdump.md index 973999704b595ea9b742f1ef759f973aa1f05649..5403e40925f633ce62795cc6037fc8c8f7aad07a 100644 --- a/docs-en/14-reference/06-taosdump.md +++ b/docs-en/14-reference/06-taosdump.md @@ -1,25 +1,25 @@ --- title: taosdump -description: "taosdump is a tool application that supports backing up data from a running TDengine cluster and restoring the backed up data to the same or another running TDengine cluster." +description: "taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster." --- ## Introduction -taosdump is a tool application that supports backing up data from a running TDengine cluster and restoring the backed up data to the same or another running TDengine cluster. +taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster. taosdump can back up a database, a super table, or a normal table as a logical data unit or backup data records in the database, super tables, and normal tables. When using taosdump, you can specify the directory path for data backup. If you do not specify a directory, taosdump will back up the data to the current directory by default. -Suppose the specified location already has data files. In that case, taosdump will prompt the user and exit immediately to avoid data overwriting which means that the same path can only be used for one backup. -Please be careful if you see a prompt for this. +If the specified location already has data files, taosdump will prompt the user and exit immediately to avoid data overwriting. This means that the same path can only be used for one backup. + +Please be careful if you see a prompt for this and please ensure that you follow best practices and relevant SOPs for data integrity, backup and data security. -taosdump is a logical backup tool and should not be used to back up any raw data, environment settings, Users should not use taosdump to back up raw data, environment settings, hardware information, server configuration, or cluster topology. taosdump uses [Apache AVRO](https://avro.apache.org/) as the data file format to store backup data. ## Installation There are two ways to install taosdump: -- Install the taosTools official installer. Please find taosTools from [All download links](https://www.taosdata.com/all-downloads) page and download and install it. +- Install the taosTools official installer. Please find taosTools from [All download links](https://www.tdengine.com/all-downloads) page and download and install it. - Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. @@ -28,14 +28,14 @@ There are two ways to install taosdump: ### taosdump backup data 1. backing up all databases: specify `-A` or `-all-databases` parameter. -2. backup multiple specified databases: use `-D db1,db2,... ` parameters; 3. +2. backup multiple specified databases: use `-D db1,db2,... ` parameters; 3. back up some super or normal tables in the specified database: use `-dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces. 4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter. -5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use This can reduce the backup data time and backup data footprint if table names, column names, and tag names do not use `escape character`. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters. +5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters. :::tip - taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema. -- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ..." can be tried by challenging the `-B` parameter to a smaller value. +- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ...", then try changing the `-B` parameter to a smaller value. ::: @@ -44,7 +44,7 @@ There are two ways to install taosdump: Restore the data file in the specified path: use the `-i` parameter plus the path to the data file. You should not use the same directory to backup different data sets, and you should not backup the same data set multiple times in the same path. Otherwise, the backup data will cause overwriting or multiple backups. :::tip -taosdump internally uses TDengine stmt binding API for writing recovery data and currently uses 16384 as one write batch for better data recovery performance. If there are more columns in the backup data, it may cause a "WAL size exceeds limit" error. You can try to adjust to a smaller value by using the `-B` parameter. +taosdump internally uses TDengine stmt binding API for writing recovery data with a default batch size of 16384 for better data recovery performance. If there are more columns in the backup data, it may cause a "WAL size exceeds limit" error. You can try to adjust the batch size to a smaller value by using the `-B` parameter. ::: @@ -59,7 +59,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...] or: taosdump [OPTION...] -i inpath or: taosdump [OPTION...] -o outpath - -h, --host=HOST Server host dumping data from. Default is + -h, --host=HOST Server host from which to dump data. Default is localhost. -p, --password User password to connect to server. Default is taosdata. @@ -72,10 +72,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...] -r, --resultFile=RESULTFILE DumpOut/In Result file path and name. -a, --allow-sys Allow to dump system database -A, --all-databases Dump all databases. - -D, --databases=DATABASES Dump inputted databases. Use comma to separate - databases' name. + -D, --databases=DATABASES Dump listed databases. Use comma to separate + database names. -N, --without-property Dump database without its properties. - -s, --schemaonly Only dump tables' schema. + -s, --schemaonly Only dump table schemas. -y, --answer-yes Input yes for prompt. It will skip data file checking! -d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy, @@ -98,7 +98,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...] and try. The workable value is related to the length of the row and type of table schema. -I, --inspect inspect avro file content and print on screen - -L, --loose-mode Using loose mode if the table name and column name + -L, --loose-mode Use loose mode if the table name and column name use letter and number only. Default is NOT. -n, --no-escape No escape char '`'. Default is using it. -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is diff --git a/docs-en/14-reference/07-tdinsight/index.md b/docs-en/14-reference/07-tdinsight/index.md index dc337bf9fff2a9b60ea2f1c5110185a8ac683098..cebfafa225e6e8de75ff84bb51fa664784177910 100644 --- a/docs-en/14-reference/07-tdinsight/index.md +++ b/docs-en/14-reference/07-tdinsight/index.md @@ -5,11 +5,11 @@ sidebar_label: TDinsight TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana]. -After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, etc., and also vnode, dnode, and mnode status, and exception alerts. Developers monitoring TDengine cluster operation status in real-time can be very convinient. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel through `TDinsight.sh` installation script. +After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, vnode, dnode, and mnode status, exception alerts and many other metrics. This is very convenient for developers who want to monitor TDengine cluster status in real-time. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel using the `TDinsight.sh` installation script. ## System Requirements -To deploy TDinsight, a single-node TDengine server or a multi-nodes TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`). +To deploy TDinsight, a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`). ## Installing Grafana @@ -17,7 +17,7 @@ We recommend using the latest [Grafana] version 7 or 8 here. You can install Gra ### Installing Grafana on Debian or Ubuntu -For Debian or Ubuntu operating systems, we recommend the Grafana image repository and Use the following command to install from scratch. +For Debian or Ubuntu operating systems, we recommend the Grafana image repository and using the following command to install from scratch. ```bash sudo apt-get install -y apt-transport-https @@ -61,7 +61,7 @@ sudo yum install \ ## Automated deployment of TDinsight -We provide an installation script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) script to allow users to configure the installation automatically and quickly. +We provide an installation script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) to allow users to configure the installation automatically and quickly. You can download the script via `wget` or other tools: @@ -71,7 +71,7 @@ chmod +x TDinsight.sh ./TDinsight.sh ``` -This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters from the command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications. +This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications. Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard. @@ -233,33 +233,33 @@ The default username/password is `admin`. Grafana will require a password change Point to the **Configurations** -> **Data Sources** menu, and click the **Add data source** button. -![Add data source button](./assets/howto-add-datasource-button.webp) +![TDengine Database TDinsight Add data source button](./assets/howto-add-datasource-button.webp) Search for and select **TDengine**. -![Add datasource](./assets/howto-add-datasource-tdengine.webp) +![TDengine Database TDinsight Add datasource](./assets/howto-add-datasource-tdengine.webp) Configure the TDengine datasource. -![Datasource Configuration](./assets/howto-add-datasource.webp) +![TDengine Database TDinsight Datasource Configuration](./assets/howto-add-datasource.webp) Save and test. It will report 'TDengine Data source is working' under normal circumstances. -![datasource test](./assets/howto-add-datasource-test.webp) +![TDengine Database TDinsight datasource test](./assets/howto-add-datasource-test.webp) ### Importing dashboards Point to **+** / **Create** - **import** (or `/dashboard/import` url). -![Import Dashboard and Configuration](./assets/import_dashboard.webp) +![TDengine Database TDinsight Import Dashboard and Configuration](./assets/import_dashboard.webp) Type the dashboard ID `15167` in the **Import via grafana.com** location and **Load**. -![Import via grafana.com](./assets/import-dashboard-15167.webp) +![TDengine Database TDinsight Import via grafana.com](./assets/import-dashboard-15167.webp) Once the import is complete, the full page view of TDinsight is shown below. -![show](./assets/TDinsight-full.webp) +![TDengine Database TDinsight show](./assets/TDinsight-full.webp) ## TDinsight dashboard details @@ -269,7 +269,7 @@ Details of the metrics are as follows. ### Cluster Status -![tdinsight-mnodes-overview](./assets/TDinsight-1-cluster-status.webp) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-1-cluster-status.webp) This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom). @@ -289,7 +289,7 @@ This section contains the current information and status of the cluster, the ale ### DNodes Status -![tdinsight-mnodes-overview](./assets/TDinsight-2-dnodes.webp) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-2-dnodes.webp) - **DNodes Status**: simple table view of `show dnodes`. - **DNodes Lifetime**: the time elapsed since the dnode was created. @@ -298,14 +298,14 @@ This section contains the current information and status of the cluster, the ale ### MNode Overview -![tdinsight-mnodes-overview](./assets/TDinsight-3-mnodes.webp) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-3-mnodes.webp) -1. **MNodes Status**: a simple table view of `show mnodes`. 2. +1. **MNodes Status**: a simple table view of `show mnodes`. 2. **MNodes Number**: similar to `DNodes Number`, the number of MNodes changes. ### Request -![tdinsight-requests](./assets/TDinsight-4-requests.webp) +![TDengine Database TDinsight tdinsight requests](./assets/TDinsight-4-requests.webp) 1. **Requests Rate(Inserts per Second)**: average number of inserts per second. 2. **Requests (Selects)**: number of query requests and change rate (count of second). @@ -313,46 +313,46 @@ This section contains the current information and status of the cluster, the ale ### Database -![tdinsight-database](./assets/TDinsight-5-database.webp) +![TDengine Database TDinsight database](./assets/TDinsight-5-database.webp) Database usage, repeated for each value of the variable `$database` i.e. multiple rows per database. -1. **STables**: number of super tables. 2. -2. **Total Tables**: number of all tables. 3. -3. **Sub Tables**: the number of all super table sub-tables. 4. +1. **STables**: number of super tables. +2. **Total Tables**: number of all tables. +3. **Sub Tables**: the number of all super table subtables. 4. **Tables**: graph of all normal table numbers over time. 5. **Tables Number Foreach VGroups**: The number of tables contained in each VGroups. ### DNode Resource Usage -![dnode-usage](./assets/TDinsight-6-dnode-usage.webp) +![TDengine Database TDinsight dnode usage](./assets/TDinsight-6-dnode-usage.webp) Data node resource usage display with repeated multiple rows for the variable `$fqdn` i.e., each data node. Includes. 1. **Uptime**: the time elapsed since the dnode was created. -2. **Has MNodes?**: whether the current dnode is a mnode. 3. -3. **CPU Cores**: the number of CPU cores. 4. -4. **VNodes Number**: the number of VNodes in the current dnode. 5. -5. **VNodes Masters**: the number of vnodes in the master role. 6. +2. **Has MNodes?**: whether the current dnode is a mnode. +3. **CPU Cores**: the number of CPU cores. +4. **VNodes Number**: the number of VNodes in the current dnode. +5. **VNodes Masters**: the number of vnodes in the master role. 6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes. 7. **Current Memory Usage of taosd**: memory usage of taosd processes. 8. **Disk Used**: The total disk usage percentage of the taosd data directory. -9. **CPU Usage**: Process and system CPU usage. 10. +9. **CPU Usage**: Process and system CPU usage. 10. **RAM Usage**: Time series view of RAM usage metrics. 11. **Disk Used**: Disks used at each level of multi-level storage (default is level0). 12. **Disk Increasing Rate per Minute**: Percentage increase or decrease in disk usage per minute. -13. **Disk IO**: Disk IO rate. 14. +13. **Disk IO**: Disk IO rate. 14. **Net IO**: Network IO, the aggregate network IO rate in addition to the local network. ### Login History -![Login History](./assets/TDinsight-7-login-history.webp) +![TDengine Database TDinsight Login History](./assets/TDinsight-7-login-history.webp) Currently, only the number of logins per minute is reported. ### Monitoring taosAdapter -![taosadapter](./assets/TDinsight-8-taosadapter.webp) +![TDengine Database TDinsight monitor taosadapter](./assets/TDinsight-8-taosadapter.webp) Support monitoring taosAdapter request statistics and status details. Includes. @@ -376,7 +376,7 @@ TDinsight installed via the `TDinsight.sh` script can be cleaned up using the co To completely uninstall TDinsight during a manual installation, you need to clean up the following. 1. the TDinsight Dashboard in Grafana. -2. the Data Source in Grafana. 3. +2. the Data Source in Grafana. 3. remove the `tdengine-datasource` plugin from the plugin installation directory. ## Integrated Docker Example diff --git a/docs-en/14-reference/08-taos-shell.md b/docs-en/14-reference/08-taos-shell.md index fe5e5f2bc29509a4b96646253732076c7a6ee7ea..002b515093258152e85dd9d7437e424dfa98c874 100644 --- a/docs-en/14-reference/08-taos-shell.md +++ b/docs-en/14-reference/08-taos-shell.md @@ -1,14 +1,14 @@ --- -title: TDengine Command Line (CLI) -sidebar_label: TDengine CLI +title: TDengine Command Line Interface (CLI) +sidebar_label: Command Line Interface description: Instructions and tips for using the TDengine CLI --- -The TDengine command-line application (hereafter referred to as `TDengine CLI`) is the most simplest way for users to manipulate and interact with TDengine instances. +The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances. ## Installation -If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI on the environment which no TDengine server running, the TDengine client installation package needs to be installed first. For details, please refer to [connector](/reference/connector/). +If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [connector](/reference/connector/). ## Execution diff --git a/docs-en/14-reference/11-docker/index.md b/docs-en/14-reference/11-docker/index.md index 4ca84be369e14b3223e8609e06c9ebc4e35eaa2d..b7e60ab3e7f04a6078950977a563382a3524ebaa 100644 --- a/docs-en/14-reference/11-docker/index.md +++ b/docs-en/14-reference/11-docker/index.md @@ -13,7 +13,7 @@ The TDengine image starts with the HTTP service activated by default, using the docker run -d --name tdengine -p 6041:6041 tdengine/tdengine ``` -The above command starts a container named "tdengine" and maps the HTTP service end 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. +The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. ```shell curl -u root:taosdata -d "show databases" localhost:6041/rest/sql @@ -34,7 +34,7 @@ taos> show databases; Query OK, 1 row(s) in set (0.002843s) ``` -The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from containerized using TDengine CLI or various connectors in some complex scenarios. +The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various connectors for complex scenarios. ## Start TDengine on the host network @@ -42,7 +42,7 @@ The TDengine server running in the container uses the container's hostname to es docker run -d --name tdengine --network host tdengine/tdengine ``` -The above command starts TDengine on the host network and uses the host's FQDN to establish a connection instead of the container's hostname. It works too, like using `systemctl` to start TDengine on the host. If the TDengine client is already installed on the host, you can access it directly with the following command. +The above command starts TDengine on the host network and uses the host's FQDN to establish a connection instead of the container's hostname. It is the equivalent of using `systemctl` to start TDengine on the host. If the TDengine client is already installed on the host, you can access it directly with the following command. ```shell $ taos @@ -315,13 +315,13 @@ password: taosdata taoslog-td2: ``` - :::note +:::note - The `VERSION` environment variable is used to set the tdengine image tag - `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time - `TAOS_REPLICA` is used to set the default number of database replicas. Its value range is [1,3] - We recommend setting with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment. - ::: - + We recommend setting it with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment. + + ::: 2. Start the cluster @@ -382,7 +382,7 @@ password: taosdata Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example: ```docker - ersion: "3" + version: "3" networks: inter: diff --git a/docs-en/14-reference/12-config/index.md b/docs-en/14-reference/12-config/index.md index 1a84f1539938ed8456d1c21c6def97d89305914d..8ad9a474a02c5cc52559ccdc5910ad9d7b6264ae 100644 --- a/docs-en/14-reference/12-config/index.md +++ b/docs-en/14-reference/12-config/index.md @@ -65,7 +65,7 @@ taos --dump-config | ------------- | ------------------------------------------------------------------------ | | Applicable | Server Only | | Meaning | The FQDN of the host where `taosd` will be started. It can be IP address | -| Default Value | The first hostname configured for the hos | +| Default Value | The first hostname configured for the host | | Note | It should be within 96 bytes | ### serverPort @@ -78,7 +78,7 @@ taos --dump-config | Note | REST service is provided by `taosd` before 2.4.0.0 but by `taosAdapter` after 2.4.0.0, the default port of REST service is 6041 | :::note -TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by `serverPort`. These ports need to be kept as open if firewall is enabled. Below table describes the ports used by TDengine in details. +TDengine uses 13 continuous ports, both TCP and UDP, starting with the port specified by `serverPort`. You should ensure, in your firewall rules, that these ports are kept open. Below table describes the ports used by TDengine in details. ::: @@ -182,8 +182,8 @@ TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by | ------------- | -------------------------------------------- | | Applicable | Server Only | | Meaning | The maximum number of distinct rows returned | -| Value Range | [100,000 - 100, 000, 000] | -| Default Value | 100, 000 | +| Value Range | [100,000 - 100,000,000] | +| Default Value | 100,000 | | Note | After version 2.3.0.0 | ## Locale Parameters @@ -197,7 +197,7 @@ TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by | Default Value | TimeZone configured in the host | :::info -To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly. +To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly. On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. @@ -209,7 +209,7 @@ timezone Asia/Shanghai The above examples are all proper configuration for the timezone of UTC+8. On Windows system, however, `timezone Asia/Shanghai` is not supported, it must be set as `timezone UTC-8`. -The setting for timezone impacts the strings not in Unix timestamp, keywords or functions related to date/time, for example +The setting for timezone impacts strings that are not in Unix timestamp format and keywords or functions related to date/time. For example: ```sql SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08'; @@ -227,7 +227,7 @@ If the timezone is UTC, it's equal to SELECT count(*) FROM table_name WHERE TS<1554984068000; ``` -To avoid the problems of using time strings, Unix timestamp can be used directly. Furthermore, time strings with timezone can be used in SQL statement, for example "2013-04-12T15:52:01.123+08:00" in RFC3339 format or "2013-04-12T15:52:01.123+0800" in ISO-8601 format, they are not influenced by timezone setting when converted to Unix timestamp. +To avoid the problems of using time strings, Unix timestamp can be used directly. Furthermore, time strings with timezone can be used in SQL statements. For example "2013-04-12T15:52:01.123+08:00" in RFC3339 format or "2013-04-12T15:52:01.123+0800" in ISO-8601 format are not influenced by timezone setting when converted to Unix timestamp. ::: @@ -240,11 +240,11 @@ To avoid the problems of using time strings, Unix timestamp can be used directly | Default Value | Locale configured in host | :::info -A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly. +A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly. The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE. -The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. On Linux andMac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset. +The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset. ::: @@ -263,7 +263,7 @@ On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the cha locale zh_CN.UTF-8 ``` -Besides, on Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the one who comes later in the configuration file is used. +On a Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence. ```title="Effective charset is GBK" locale zh_CN.UTF-8 @@ -778,8 +778,8 @@ To prevent system resource from being exhausted by multiple concurrent streams, ## HTTP Parameters :::note -HTTP server had been provided by `taosd` prior to version 2.4.0.0, now is provided by `taosAdapter` after version 2.4.0.0. -The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter]](/reference/taosadapter/). +HTTP service was provided by `taosd` prior to version 2.4.0.0 and is provided by `taosAdapter` after version 2.4.0.0. +The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter](/reference/taosadapter/). ::: diff --git a/docs-en/14-reference/12-directory.md b/docs-en/14-reference/12-directory.md index dbdba2b715bb41baf9b70dce91a3065e585d0434..304e3bcb434ee9a6ba338577a4d1ba546b548e3f 100644 --- a/docs-en/14-reference/12-directory.md +++ b/docs-en/14-reference/12-directory.md @@ -32,7 +32,7 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d - _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution. :::note -taosdump after version 2.4.0.0 require taosTools as a standalone installation. A few version taosBenchmark is include in taosTools too. +taosdump after version 2.4.0.0 require taosTools as a standalone installation. A new version of taosBenchmark is include in taosTools too. ::: :::tip diff --git a/docs-en/14-reference/13-schemaless/13-schemaless.md b/docs-en/14-reference/13-schemaless/13-schemaless.md index d9ce9b434dd14a89d243b2ed629f3fde64e6aba0..acbbb1cd3c5a7c50e226644f2de9e0e77274c6dd 100644 --- a/docs-en/14-reference/13-schemaless/13-schemaless.md +++ b/docs-en/14-reference/13-schemaless/13-schemaless.md @@ -1,19 +1,19 @@ --- title: Schemaless Writing -description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data as it is written to the interface." +description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface." --- -In IoT applications, many data items are often collected for intelligent control, business analysis, device monitoring, etc. Due to the version upgrade of the application logic, or the hardware adjustment of the device itself, the data collection items may change more frequently. To facilitate the data logging work in such cases, TDengine starting from version 2.2.0.0, it provides a series of interfaces to the schemaless writing method, which eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data as the data is written to the interface. And when necessary, Schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. +In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine starting from version 2.2.0.0, provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. -The schemaless writing method creates super tables and their corresponding sub-tables completely indistinguishable from the super tables and sub-tables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and lack readability. +The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability. ## Schemaless Writing Line Protocol -TDengine's schemaless writing line protocol supports to be compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content. +TDengine's schemaless writing line protocol supports InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content. For the standard writing protocols of InfluxDB and OpenTSDB, please refer to the documentation of each protocol. The following is a description of TDengine's extended protocol, based on InfluxDB's line protocol first. They allow users to control the (super table) schema more granularly. -With the following formatting conventions, Schemaless writing uses a single string to express a data row (multiple rows can be passed into the writing API at once to enable bulk writing). +With the following formatting conventions, schemaless writing uses a single string to express a data row (multiple rows can be passed into the writing API at once to enable bulk writing). ```json measurement,tag_set field_set timestamp @@ -23,7 +23,7 @@ where : - measurement will be used as the data table name. It will be separated from tag_set by a comma. - tag_set will be used as tag data in the format `=,=`, i.e. multiple tags' data can be separated by a comma. It is separated from field_set by space. -- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by space. +- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by a space. - The timestamp is the primary key corresponding to the data in this row. All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes ("). @@ -32,7 +32,7 @@ In the schemaless writing data line protocol, each data item in the field_set ne - If there are English double quotes on both sides, it indicates the BINARY(32) type. For example, `"abc"`. - If there are double quotes on both sides and an L prefix, it means NCHAR(32) type. For example, `L"error message"`. -- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\) in front. (All refer to the ASCII character) +- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character) - Numeric types will be distinguished from data types by the suffix. | **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** | @@ -58,26 +58,25 @@ Note that if the wrong case is used when describing the data type suffix, or if Schemaless writes process row data according to the following principles. -1. You can use the following rules to generate the sub-table names: first, combine the measurement name and the key and value of the label into the next string: +1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string: ```json "measurement,tag_key1=tag_value1,tag_key2=tag_value2" ``` Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol. -The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has. 2. +The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has. 2. If the super table obtained by parsing the line protocol does not exist, this super table is created. -If the sub-table obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the sub-table name determined in steps 1 or 2. 4. +If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2. 4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental). 5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL. 6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data. -7. If the specified data sub-table already exists, and the specified tag column takes a value different from the saved value this time, the value in the latest data row overwrites the old tag column take value. +7. If the specified data subtable already exists, and the specified tag column takes a value different from the saved value this time, the value in the latest data row overwrites the old tag column take value. 8. Errors encountered throughout the processing will interrupt the writing process and return an error code. :::tip -All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed -16k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. +All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. ::: ## Time resolution recognition @@ -87,7 +86,7 @@ Three specified modes are supported in the schemaless writing process, as follow | **Serial** | **Value** | **Description** | | -------- | ------------------- | ------------------------------- | | 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol | -| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol | | 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol +| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol | | 3 | SML_JSON_PROTOCOL | JSON protocol format | In the SML_LINE_PROTOCOL parsing mode, the user is required to specify the time resolution of the input timestamp. The available time resolutions are shown in the following table. @@ -106,8 +105,11 @@ In SML_TELNET_PROTOCOL and SML_JSON_PROTOCOL modes, the time precision is determ ## Data schema mapping rules -This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped to -The tag name in tag_set is the name of the tag in the data schema, and the name in field_set is the column's name. The following data is used as an example to illustrate the mapping rules. +This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped as follows: +- The tag name in tag_set is the name of the tag in the data schema +- The name in field_set is the column's name. + +The following data is used as an example to illustrate the mapping rules. ```json st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 @@ -139,7 +141,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000 st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000 ``` -The first line of the line protocol parsing will declare column c5 is a BINARY(4) field, the second line data write will extract column c5 is still a BINARY column. Still, its width is 6, then you need to increase the width of the BINARY field to be able to accommodate the new string. +The first line of the line protocol parsing will declare column c5 is a BINARY(4) field. The second line data write will parse column c5 as a BINARY column. But in the second line, c5's width is 6 so you need to increase the width of the BINARY field to be able to accommodate the new string. ```json st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000 diff --git a/docs-en/14-reference/_collectd.mdx b/docs-en/14-reference/_collectd.mdx index 1f57d883eec9feadc3cc460bf968b0dd43fedfe8..ce88328098a181de48dcaa080ef45f228b20bf1c 100644 --- a/docs-en/14-reference/_collectd.mdx +++ b/docs-en/14-reference/_collectd.mdx @@ -25,7 +25,7 @@ The default database name written by taosAdapter is `collectd`. You can also mod #collectd collectd uses a plugin mechanism to write the collected monitoring data to different data storage software in various forms. tdengine supports both direct collection plugins and write_tsdb plugins. -#### is configured to receive data from the direct collection plugin +#### Configure the direct collection plugin Modify the relevant configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf). @@ -62,7 +62,7 @@ LoadPlugin write_tsdb ``` -Where fills in the server's domain name or IP address running taosAdapter. Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047). +Where is the domain name or IP address of the server running taosAdapter. Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047). ```text LoadPlugin write_tsdb diff --git a/docs-en/14-reference/_tcollector.mdx b/docs-en/14-reference/_tcollector.mdx index 85794d54007b70acf205b1bbc897cec1d0c4f824..42b021410e3862c4fa328d8dae40dcac1456e929 100644 --- a/docs-en/14-reference/_tcollector.mdx +++ b/docs-en/14-reference/_tcollector.mdx @@ -17,7 +17,7 @@ password = "taosdata" ... ``` -The taosAdapter writes to the database with the default name `tcollector`. You can also modify the taosAdapter configuration file dbs entry to specify a different name. user and password fill in the actual TDengine configuration values. After changing the configuration file, you need to restart the taosAdapter. +The taosAdapter writes to the database with the default name `tcollector`. You can also modify the taosAdapter configuration file dbs entry to specify a different name. Fill in the actual user and password for TDengine. After changing the configuration file, you need to restart the taosAdapter. - You can also enable taosAdapter to receive tcollector data by using the taosAdapter command-line parameters or setting environment variables. @@ -25,7 +25,7 @@ The taosAdapter writes to the database with the default name `tcollector`. You c To use TCollector, you need to download its [source code](https://github.com/OpenTSDB/tcollector). Its configuration items are in its source code. Note: TCollector differs significantly from version to version, so here is an example of the latest code for the current master branch (git commit: 37ae920). -Modify the contents of the `collectors/etc/config.py` and `tcollector.py` files. Change the address of the OpenTSDB host to the domain name or IP address of the server where taosAdapter is deployed, and change the port to the port that taosAdapter supports TCollector on (default is 6049). +Modify the contents of the `collectors/etc/config.py` and `tcollector.py` files. Change the address of the OpenTSDB host to the domain name or IP address of the server where taosAdapter is deployed, and change the port to the port on which taosAdapter supports TCollector (default is 6049). Example of git diff output of source code changes. diff --git a/docs-en/14-reference/index.md b/docs-en/14-reference/index.md index 89f675902d01ba2d2c1b322408c372429d6bda1c..f350eebfc1a1ca2feaedc18c4b4fa798742e31b4 100644 --- a/docs-en/14-reference/index.md +++ b/docs-en/14-reference/index.md @@ -2,11 +2,11 @@ title: Reference --- -The reference guide is the detailed introduction to TDengine, various TDengine's connectors in different languages, and the tools that come with it. +The reference guide is a detailed introduction to TDengine including various TDengine connectors in different languages, and the tools that come with TDengine. ```mdx-code-block import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-en/20-third-party/01-grafana.mdx b/docs-en/20-third-party/01-grafana.mdx index 7239710e0aebdd95977d9b73a5a1a9fccd656542..1a84e02c665d2e49deca35a20b137b205736def5 100644 --- a/docs-en/20-third-party/01-grafana.mdx +++ b/docs-en/20-third-party/01-grafana.mdx @@ -3,13 +3,14 @@ sidebar_label: Grafana title: Grafana --- -TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a DashBoard. +TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard. You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md). ## Prerequisites In order for Grafana to add the TDengine data source successfully, the following preparations are required: + 1. The TDengine cluster is deployed and functioning properly 2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details. @@ -19,21 +20,22 @@ TDengine currently supports Grafana versions 7.0 and above. Users can go to the ## Configuring Grafana -You can download The Grafana plugin for TDengine from . The current latest version is 3.1.4. - -Recommend using the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation. +Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation. -``bash -sudo -u grafana grafana-cli \ - --pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v3.1.4/tdengine-datasource-3.1.4.zip \ - plugins install tdengine-datasource +```bash +grafana-cli plugins install tdengine-datasource +# with sudo +sudo -u grafana grafana-cli plugins install tdengine-datasource ``` -Or download it locally and extract it to the Grafana plugin directory. +Alternatively, you can manually download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and unpack it into your grafana plugins directory. ```bash -GF_VERSION=3.1.4 +GF_VERSION=3.2.2 +# from GitHub wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip +# from Grafana +wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tdengine-datasource/versions/$GF_VERSION/download ``` Take CentOS 7.2 for example, extract the plugin package to /var/lib/grafana/plugins directory, and restart grafana. @@ -42,18 +44,10 @@ Take CentOS 7.2 for example, extract the plugin package to /var/lib/grafana/plug sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/ ``` -Grafana versions 7.3+ / 8.x do signature checks on plugins, so you also need to add the following line to the grafana.ini file to use the plugin correctly. - -```ini -[plugins] -allow_loading_unsigned_plugins = tdengine-datasource -``` - -The TDengine plugin can be automatically installed and set up using the following environment variable settings in a Docker environment. +If Grafana is running in a Docker environment, the TDengine plugin can be automatically installed and set up using the following environment variable settings: ```bash -GF_INSTALL_PLUGINS=https://github.com/taosdata/grafanaplugin/releases/download/v3.1.4/tdengine-datasource-3.1.4.zip;tdengine- datasource -GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource +GF_INSTALL_PLUGINS=tdengine-datasource ``` ## Using Grafana @@ -62,39 +56,39 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource Users can log in to the Grafana server (username/password: admin/admin) directly through the URL `http://localhost:3000` and add a datasource through `Configuration -> Data Sources` on the left side, as shown in the following figure. -![img](./grafana/add_datasource1.webp) +![TDengine Database TDinsight plugin add datasource 1](./grafana/add_datasource1.webp) Click `Add data source` to enter the Add data source page, and enter TDengine in the query box to add it, as shown in the following figure. -![img](./grafana/add_datasource2.webp) +![TDengine Database TDinsight plugin add datasource 2](./grafana/add_datasource2.webp) Enter the datasource configuration page, and follow the default prompts to modify the corresponding configuration. -![img](./grafana/add_datasource3.webp) +![TDengine Database TDinsight plugin add database 3](./grafana/add_datasource3.webp) - Host: IP address of the server where the components of the TDengine cluster provide REST service (offered by taosd before 2.4 and by taosAdapter since 2.4) and the port number of the TDengine REST service (6041), by default use `http://localhost:6041`. - User: TDengine user name. - Password: TDengine user password. -Click `Save & Test` to test. Follows are a success. +Click `Save & Test` to test. You should see a success message if the test worked. -![img](./grafana/add_datasource4.webp) +![TDengine Database TDinsight plugin add database 4](./grafana/add_datasource4.webp) ### Create Dashboard -Go back to the main interface to create the Dashboard, click Add Query to enter the panel query page: +Go back to the main interface to create a dashboard and click Add Query to enter the panel query page: -![img](./grafana/create_dashboard1.webp) +![TDengine Database TDinsight plugin create dashboard 1](./grafana/create_dashboard1.webp) As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query. -- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, ` custom template variables are also supported. +- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, custom template variables are also supported. - ALIAS BY: This allows you to set the current query alias. - GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement. Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows. -![img](./grafana/create_dashboard2.webp) +![TDengine Database TDinsight plugin create dashboard 2](./grafana/create_dashboard2.webp) > For more information on how to use Grafana to create the appropriate monitoring interface and for more details on using Grafana, refer to the official Grafana [documentation](https://grafana.com/docs/). diff --git a/docs-en/20-third-party/03-telegraf.md b/docs-en/20-third-party/03-telegraf.md index 0d563c9ff36268ac27e18e21fefed789789dc1a7..6a7aac322f9def880f58d7ed0adcc4a8f3687ed1 100644 --- a/docs-en/20-third-party/03-telegraf.md +++ b/docs-en/20-third-party/03-telegraf.md @@ -5,7 +5,7 @@ title: Telegraf writing import Telegraf from "../14-reference/_telegraf.mdx" -Telegraf is a viral metrics collection open-source software. Telegraf can collect the operation information of various components without writing any scripts to collect regularly, reducing the difficulty of data acquisition. +Telegraf is a viral, open-source, metrics collection software. Telegraf can collect the operation information of various components without having to write any scripts to collect regularly, reducing the difficulty of data acquisition. Telegraf's data can be written to TDengine by simply adding the output configuration of Telegraf to the URL corresponding to taosAdapter and modifying several configuration items. The presence of Telegraf data in TDengine can take advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data. diff --git a/docs-en/20-third-party/05-collectd.md b/docs-en/20-third-party/05-collectd.md index 609e55842ab35cdc2d394663f5450f908e49f7f7..db62f2ecd1afb4936466ca0243a7e14ff294f8b6 100644 --- a/docs-en/20-third-party/05-collectd.md +++ b/docs-en/20-third-party/05-collectd.md @@ -6,7 +6,7 @@ title: collectd writing import CollectD from "../14-reference/_collectd.mdx" -collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics number while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load. +collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load. You can write the data collected by collectd to TDengine by simply modifying the configuration of collectd to the domain name (or IP address) and corresponding port of the server running taosAdapter. It can take full advantage of TDengine's efficient storage query performance and clustering capability for time-series data. diff --git a/docs-en/20-third-party/06-statsd.md b/docs-en/20-third-party/06-statsd.md index bf4b6c7ab5dac4114cad0d650b2aeb026a67581c..40e927b9fd1d2eca9d454a987ac51d533eb75005 100644 --- a/docs-en/20-third-party/06-statsd.md +++ b/docs-en/20-third-party/06-statsd.md @@ -7,7 +7,7 @@ import StatsD from "../14-reference/_statsd.mdx" StatsD is a simple daemon for aggregating application metrics, which has evolved rapidly in recent years into a unified protocol for collecting application performance metrics. -You can write StatsD data to TDengine by simply modifying in the configuration file of StatsD with the domain name (or IP address) of the server running taosAdapter and the corresponding port. It can take full advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data. +You can write StatsD data to TDengine by simply modifying the configuration file of StatsD with the domain name (or IP address) of the server running taosAdapter and the corresponding port. It can take full advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data. ## Prerequisites diff --git a/docs-en/20-third-party/07-icinga2.md b/docs-en/20-third-party/07-icinga2.md index ba9cde8cea7504ac9df871d5f6aa42cc5c94d895..b27196dfe313b468eeb73ff4b114d9d955618c3e 100644 --- a/docs-en/20-third-party/07-icinga2.md +++ b/docs-en/20-third-party/07-icinga2.md @@ -5,7 +5,7 @@ title: icinga2 writing import Icinga2 from "../14-reference/_icinga2.mdx" -icinga2 is an open-source software monitoring host and network initially developed from the Nagios network monitoring application. Currently, icinga2 is distributed under the GNU GPL v2 license. +icinga2 is an open-source, host and network monitoring software initially developed from the Nagios network monitoring application. Currently, icinga2 is distributed under the GNU GPL v2 license. You can write the data collected by icinga2 to TDengine by simply modifying the icinga2 configuration to point to the taosAdapter server and the corresponding port, taking advantage of TDengine's efficient storage and query performance and clustering capabilities for time-series data. diff --git a/docs-en/20-third-party/09-emq-broker.md b/docs-en/20-third-party/09-emq-broker.md index 560c6463b59b00a362023d6cfa44cf833419a9ea..d3eafebc14e8ddc29b03abf8785a6c0a013ef014 100644 --- a/docs-en/20-third-party/09-emq-broker.md +++ b/docs-en/20-third-party/09-emq-broker.md @@ -3,7 +3,7 @@ sidebar_label: EMQX Broker title: EMQX Broker writing --- -MQTT is a popular IoT data transfer protocol, [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software, without any code, only need to use "rules" in EMQX Dashboard to do simple configuration. You can write MQTT data directly to TDengine. EMQX supports saving data to TDengine by sending it to web services and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it. tdengine). +MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software. You can write MQTT data directly to TDengine without any code. You only need to setup "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending data to a web service and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.). ## Prerequisites @@ -44,25 +44,25 @@ Since the configuration interface of EMQX differs from version to version, here Use your browser to open the URL `http://IP:18083` and log in to EMQX Dashboard. The initial installation username is `admin` and the password is: `public`. -![img](./emqx/login-dashboard.webp) +![TDengine Database EMQX login dashboard](./emqx/login-dashboard.webp) ### Creating Rule Select "Rule" in the "Rule Engine" on the left and click the "Create" button: ! -![img](./emqx/rule-engine.webp) +![TDengine Database EMQX rule engine](./emqx/rule-engine.webp) ### Edit SQL fields -![img](./emqx/create-rule.webp) +![TDengine Database EMQX create rule](./emqx/create-rule.webp) ### Add "action handler" -![img](./emqx/add-action-handler.webp) +![TDengine Database EMQX add action handler](./emqx/add-action-handler.webp) ### Add "Resource" -![img](./emqx/create-resource.webp) +![TDengine Database EMQX create resource](./emqx/create-resource.webp) Select "Data to Web Service" and click the "New Resource" button. @@ -70,13 +70,13 @@ Select "Data to Web Service" and click the "New Resource" button. Select "Data to Web Service" and fill in the request URL as the address and port of the server running taosAdapter (default is 6041). Leave the other properties at their default values. -![img](./emqx/edit-resource.webp) +![TDengine Database EMQX edit resource](./emqx/edit-resource.webp) ### Edit "action" Edit the resource configuration to add the key/value pairing for Authorization. Please refer to the [ TDengine REST API documentation ](https://docs.taosdata.com/reference/rest-api/) for the authorization in details. Enter the rule engine replacement template in the message body. -![img](./emqx/edit-action.webp) +![TDengine Database EMQX edit action](./emqx/edit-action.webp) ## Compose program to mock data @@ -163,7 +163,7 @@ Edit the resource configuration to add the key/value pairing for Authorization. Note: `CLIENT_NUM` in the code can be set to a smaller value at the beginning of the test to avoid hardware performance be not capable to handle a more significant number of concurrent clients. -![img](./emqx/client-num.webp) +![TDengine Database EMQX client num](./emqx/client-num.webp) ## Execute tests to simulate sending MQTT data @@ -172,19 +172,19 @@ npm install mqtt mockjs --save ---registry=https://registry.npm.taobao.org node mock.js ``` -![img](./emqx/run-mock.webp) +![TDengine Database EMQX run mock](./emqx/run-mock.webp) ## Verify that EMQX is receiving data Refresh the EMQX Dashboard rules engine interface to see how many records were received correctly: -![img](./emqx/check-rule-matched.webp) +![TDengine Database EMQX rule matched](./emqx/check-rule-matched.webp) ## Verify that data writing to TDengine Use the TDengine CLI program to log in and query the appropriate databases and tables to verify that the data is being written to TDengine correctly: -![img](./emqx/check-result-in-taos.webp) +![TDengine Database EMQX result in taos](./emqx/check-result-in-taos.webp) Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine. EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX. diff --git a/docs-en/20-third-party/11-kafka.md b/docs-en/20-third-party/11-kafka.md index 2da9a86b7d3def338497c9c0f3481918b566aaed..6720af8bf81ea2f4fce415a54847453f578ababf 100644 --- a/docs-en/20-third-party/11-kafka.md +++ b/docs-en/20-third-party/11-kafka.md @@ -7,17 +7,17 @@ TDengine Kafka Connector contains two plugins: TDengine Source Connector and TDe ## What is Kafka Connect? -Kafka Connect is a component of Apache Kafka that enables other systems, such as databases, cloud services, file systems, etc., to connect to Kafka easily. Data can flow from other software to Kafka via Kafka Connect and Kafka to other systems via Kafka Connect. Plugins that read data from other software are called Source Connectors, and plugins that write data to other software are called Sink Connectors. Neither Source Connector nor Sink Connector will directly connect to Kafka Broker, and Source Connector transfers data to Kafka Connect. Sink Connector receives data from Kafka Connect. +Kafka Connect is a component of [Apache Kafka](https://kafka.apache.org/) that enables other systems, such as databases, cloud services, file systems, etc., to connect to Kafka easily. Data can flow from other software to Kafka via Kafka Connect and Kafka to other systems via Kafka Connect. Plugins that read data from other software are called Source Connectors, and plugins that write data to other software are called Sink Connectors. Neither Source Connector nor Sink Connector will directly connect to Kafka Broker, and Source Connector transfers data to Kafka Connect. Sink Connector receives data from Kafka Connect. -![](kafka/Kafka_Connect.webp) +![TDengine Database Kafka Connector -- Kafka Connect](kafka/Kafka_Connect.webp) TDengine Source Connector is used to read data from TDengine in real-time and send it to Kafka Connect. Users can use The TDengine Sink Connector to receive data from Kafka Connect and write it to TDengine. -![](kafka/streaming-integration-with-kafka-connect.webp) +![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) ## What is Confluent? -Confluent adds many extensions to Kafka. include: +[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include: 1. Schema Registry 2. REST Proxy @@ -26,7 +26,7 @@ Confluent adds many extensions to Kafka. include: 5. GUI for managing and monitoring Kafka - Confluent Control Center Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version. -![](kafka/confluentPlatform.webp) +![TDengine Database Kafka Connector -- Confluent platform](kafka/confluentPlatform.webp) Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components. @@ -79,10 +79,10 @@ Development: false git clone https://github.com:taosdata/kafka-connect-tdengine.git cd kafka-connect-tdengine mvn clean package -unzip -d $CONFLUENT_HOME/share/confluent-hub-components/ target/components/packages/taosdata-kafka-connect-tdengine-0.1.0.zip +unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip ``` -The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to the path where the plugin is installed. The path to install the plugin is in the configuration file `$CONFLUENT_HOME/etc/kafka/connect-standalone.properties`. The default path is `$CONFLUENT_HOME/share/confluent-hub-components/`. +The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path. ### Install with confluent-hub @@ -96,7 +96,7 @@ confluent local services start ``` :::note -Be sure to install the plugin before starting Confluent. Otherwise, there will be a class not found error. The log of Kafka Connect (default path: /tmp/confluent.xxxx/connect/logs/connect.log) will output the successfully installed plugin, which users can use to determine whether the plugin is installed successfully. +Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins. ::: :::tip @@ -123,6 +123,59 @@ Control Center is [UP] To clear data, execute `rm -rf /tmp/confluent.106668`. ::: +### Check Confluent Services Status + +Use command bellow to check the status of all service: + +``` +confluent local services status +``` + +The expected output is: +``` +Connect is [UP] +Control Center is [UP] +Kafka is [UP] +Kafka REST is [UP] +ksqlDB Server is [UP] +Schema Registry is [UP] +ZooKeeper is [UP] +``` + +### Check Successfully Loaded Plugin + +After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully: +``` +confluent local services connect plugin list +``` + +The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow: + +``` +Available Connect Plugins: +[ + { + "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "type": "sink", + "version": "1.0.0" + }, + { + "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector", + "type": "source", + "version": "1.0.0" + }, +...... +``` + +If not, please check the log file of Kafka Connect. To view the log file path, please execute: + +``` +echo `cat /tmp/confluent.current`/connect/connect.stdout +``` +It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout` + +Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`. + ## The use of TDengine Sink Connector The role of the TDengine Sink Connector is to synchronize the data of the specified topic to TDengine. Users do not need to create databases and super tables in advance. The name of the target database can be specified manually (see the configuration parameter connection.database), or it can be generated according to specific rules (see the configuration parameter connection.database.prefix). @@ -142,7 +195,7 @@ vi sink-demo.properties sink-demo.properties' content is following: ```ini title="sink-demo.properties" -name=tdengine-sink-demo +name=TDengineSinkConnector connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector tasks.max=1 topics=meters @@ -151,6 +204,7 @@ connection.user=root connection.password=taosdata connection.database=power db.schemaless=line +data.precision=ns key.converter=org.apache.kafka.connect.storage.StringConverter value.converter=org.apache.kafka.connect.storage.StringConverter ``` @@ -177,6 +231,7 @@ If the above command is executed successfully, the output is as follows: "connection.url": "jdbc:TAOS://127.0.0.1:6030", "connection.user": "root", "connector.class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "data.precision": "ns", "db.schemaless": "line", "key.converter": "org.apache.kafka.connect.storage.StringConverter", "tasks.max": "1", @@ -221,14 +276,14 @@ Database changed. taos> select * from meters; ts | current | voltage | phase | groupid | location | =============================================================================================================================================================== - 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LoSangeles | - 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LoSangeles | - 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LoSangeles | - 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LoSangeles | + 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles | Query OK, 4 row(s) in set (0.004208s) ``` -If you see the above data, the synchronization is successful. If not, check the logs of Kafka Connect. For detailed description of configuration parameters, see [Configuration Reference](#Configuration Reference). +If you see the above data, the synchronization is successful. If not, check the logs of Kafka Connect. For detailed description of configuration parameters, see [Configuration Reference](#configuration-reference). ## The use of TDengine Source Connector @@ -356,6 +411,7 @@ The following configuration items apply to TDengine Sink Connector and TDengine 4. `max.retries`: The maximum number of retries when an error occurs. Defaults to 1. 5. `retry.backoff.ms`: The time interval for retry when sending an error. The unit is milliseconds. The default is 3000. 6. `db.schemaless`: Data format, could be one of `line`, `json`, and `telnet`. Represent InfluxDB line protocol format, OpenTSDB JSON format, and OpenTSDB Telnet line protocol format. +7. `data.precision`: The time precision when use InfluxDB line protocol format data, could be one of `ms`, `us` and `ns`. The default is `ns`. ### TDengine Source Connector specific configuration @@ -366,7 +422,13 @@ The following configuration items apply to TDengine Sink Connector and TDengine 5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database. Default is 100. 6. `out.format`: The data format. The value could be line or json. The line represents the InfluxDB Line protocol format, and json represents the OpenTSDB JSON format. Default is `line`. -## feedback + +## Other notes + +1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually. +2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect. + +## Feedback https://github.com/taosdata/kafka-connect-tdengine/issues diff --git a/docs-en/21-tdinternal/01-arch.md b/docs-en/21-tdinternal/01-arch.md index 2c430908e410c7ae8e6f09a3f7e2d059f906fda5..4d8bed4d2d6b3a0404e10213aeab599767325cc2 100644 --- a/docs-en/21-tdinternal/01-arch.md +++ b/docs-en/21-tdinternal/01-arch.md @@ -5,38 +5,38 @@ title: Architecture ## Cluster and Primary Logic Unit -The design of TDengine is based on the assumption that any hardware or software system is not 100% reliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, TDengine has been designed in a distributed and high-reliability architecture since day one of the development, so that hardware failure or software failure of any single even multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware resources significantly. +The design of TDengine is based on the assumption that any hardware or software system is not 100% reliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, since day one, TDengine has been designed as a natively distributed system, with high-reliability architecture. Hardware failure or software failure of a single, or even multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware resource needs, significantly. ### Primary Logic Unit -Logical structure diagram of TDengine distributed architecture as following: +Logical structure diagram of TDengine's distributed architecture is as follows: -![TDengine architecture diagram](structure.webp) +![TDengine Database architecture diagram](structure.webp)
Figure 1: TDengine architecture diagram
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit. **Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine, or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please check [wikipedia](https://en.wikipedia.org/wiki/Fully_qualified_domain_name). -**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node. A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE), zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes. +**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node (pnode). A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE) and zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes. -**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node. +**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node. -**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction. +**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction. -**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. +**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. -**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster. +**TAOSC**: TAOSC is the driver provided by TDengine to applications. It is responsible for dealing with the interaction between application and cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster. ### Node Communication -**Communication mode**: The communication among each data node of TDengine system, and among the client driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation. +**Communication mode**: The communication among each data node of TDengine system, and among the client driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digitally sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation. **FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter “fqdn”. If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter “fqdn” of the node to its IP address. However, IP is not recommended because IP address may be changed, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the DNS service is running, or hosts files on nodes are configured properly. **Port configuration**: The external port of a data node is determined by the system configuration parameter “serverPort” in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. To run the system, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort. -**Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted. +**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted. **Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: @@ -44,31 +44,33 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc 2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; 3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again. -**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server-side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. +**The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. -**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"; Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the detailed user tutorial for detailed steps. In this way, the cluster will be established step by step. +**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. +- Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode" +- Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step. -**Redirection**: No matter about dnode or TAOSC, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not a mnode by self, it will reply to the mnode EP List back. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes. +**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not an mnode itself, it will reply to the mnode with the EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes. ### A Typical Data Writing Process To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process. -![typical process of TDengine](message.webp) +![typical process of TDengine Database](message.webp)
Figure 2: Typical process of TDengine
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs. -2. TAOSC checks if meta data existing for the table in the cache. If so, go straight to Step 4. If not, TAOSC sends a get meta-data request to mnode. +2. TAOSC checks the cache to see if meta data exists for the table. If it does, it goes straight to Step 4. If not, TAOSC sends a get meta-data request to mnode. 3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode. 4. TAOSC initiates an insert request to master vnode. 5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup. 6. TAOSC notifies APP that writing is successful. -For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will inform the mnode EP list in a reply message, so that TAOSC will re-issue a request to obtain meta-data to the EP of another new mnode. +For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to obtain meta-data to the EP of another mnode. -For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target where TAOSC shall send a request to. Once the reply of successful insertion is obtained, TAOSC will cache the information of master node. +For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of master node. -The above is the process of inserting data, and the processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications. +The above describes the process of inserting data. The processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications. Through TAOSC caching mechanism, mnode needs to be accessed only when a table is accessed for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), TAOSC will interact with mnode regularly to automatically update the cache. @@ -76,24 +78,24 @@ Through TAOSC caching mechanism, mnode needs to be accessed only when a table is ### Storage Model -The data stored by TDengine include collected time-series data, metadata related to database and tables, tag data, etc. These data are specifically divided into three parts: +The data stored by TDengine includes collected time-series data, metadata related to database and tables, tag data, etc. All of the data is specifically divided into three parts: -- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database “update” parameter is set to 1. By adopting the model with **one table for each data collection point**, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single data collection point with the best performance. -- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the tag filtering results will return in milliseconds. -- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of create, delete, update and read are supported. The amount of these data are not large and can be stored in memory, moreover, the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck. +- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database “update” parameter is set to 1. By adopting the model with **one table for each data collection point**, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the best performance for both insert and query operations of a single data collection point. +- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even with millions of tables, the tag filtering results will return in milliseconds. +- Metadata: stored in mnode and includes system node, user, DB, table schema and other information. Four standard operations of create, delete, update and read are supported. The amount of this data is not large and can be stored in memory. Moreover, the number of queries is not large because of client cache. Even though TDengine uses centralized storage management, because of the architecture, there is no performance bottleneck. -Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately, which has two major advantages: +Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately. This has two major advantages: -- Reduce the redundancy of tag data storage significantly: general NoSQL database or time-series database adopts K-V storage, in which Key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is extremely expensive to operate. -- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds out the tables which satisfy the filtering conditions, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. +- Reduces the redundancy of tag data storage significantly. General NoSQL database or time-series database adopts K-V (key-value) storage, in which the key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is an extremely expensive operation. +- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds the tables which satisfy the filtering conditions, and then finds the corresponding data blocks of these tables. This greatly reduces the data sets to be scanned which in turn improves the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. ### Data Sharding -For large-scale data management, to achieve scale-out, it is generally necessary to adopt the Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for a time range. +For large-scale data management, to achieve scale-out, it is generally necessary to adopt a Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for a time range. VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application. -For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G), so TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores. +For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores. When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes. @@ -101,43 +103,43 @@ The meta data of each table (including schema, tags, etc.) is also stored in vno ### Data Partitioning -In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter `“days”`. This method of partitioning by time rang is also convenient to efficiently implement the data retention policy. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the tiered-storage. Cold/hot data can be stored in different storage media to reduce the storage cost. +In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by the database configuration parameter `“days”`. This method of partitioning by time range is also convenient to efficiently implement data retention policies. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate tiered-storage. Cold/hot data can be stored in different storage media to significantly reduce storage costs. In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability. ### Load Balancing -Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node), so mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected. +Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) so that the mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected. -If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. When offline lasts a certain period of time (configured by parameter `“offlineThreshold”`), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number. +If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. If the dnode stays offline beyond the time configured by parameter `“offlineThreshold”`, the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number. -When new data nodes are added to the cluster, with new computing and storage resources are added, the system will automatically start the load balancing process. +When new data nodes are added to the cluster, with new computing and storage resources, the system will automatically start the load balancing process. The load balancing process does not require any manual intervention, and it is transparent to the application. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.** ## Data Writing and Replication Process -If a database has N replicas, thus a virtual node group has N virtual nodes, but only one as Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect. +If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect. ### Master vnode Writing Process Master Vnode uses a writing process as follows: -![TDengine Master Writing Process](write_master.webp) +![TDengine Database Master Writing Process](write_master.webp)
Figure 3: TDengine Master writing process
1. Master vnode receives the application data insertion request, verifies, and moves to next step; 2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; 3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data; 4. Write into memory and add the record to “skip list”; -5. Master vnode returns a confirmation message to the application, indicating a successful writing. +5. Master vnode returns a confirmation message to the application, indicating a successful write. 6. If any of Step 2, 3 or 4 fails, the error will directly return to the application. ### Slave vnode Writing Process For a slave vnode, the write process as follows: -![TDengine Slave Writing Process](write_slave.webp) +![TDengine Database Slave Writing Process](write_slave.webp)
Figure 4: TDengine Slave Writing Process
1. Slave vnode receives a data insertion request forwarded by Master vnode; @@ -146,19 +148,19 @@ For a slave vnode, the write process as follows: Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same. -### Remote Disaster Recovery and IDC Migration +### Remote Disaster Recovery and IDC (Internet Data Center) Migration -As above Master and Slave processes discussed, TDengine adopts asynchronous replication for data synchronization. This method can greatly improve the writing performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. +As discussed above, TDengine writes using Master and Slave processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. -On the other hand, TDengine supports dynamic modification of the replicas number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization completed, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. +On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. -However, the asynchronous replication has a tiny time window where data can be lost. The specific scenario is as follows: +However, the asynchronous replication has a very low probability scenario where data may be lost. The specific scenario is as follows: -1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then went down; +1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down; 2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2; 3. Slave vnode will become the new master, thus losing one record. -In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before. +In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this is an extremely low probability scenario as described above. Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet** @@ -171,43 +173,43 @@ When a vnode starts, the roles (master, slave) are uncertain, and the data is in 1. If there’s only one replica, it’s always master 2. When all replicas are online, the one with latest version is master 3. Over half of online nodes are virtual nodes, and some virtual node is slave, it will automatically become master -4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master +4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master. ### Synchronous Replication For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application. -With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication. +With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistency, the default for data synchronization between mnodes is synchronous replication. ## Caching and Persistence ### Caching -TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly put the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the newly generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer. +TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly puts the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the most recently generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer. -TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data cache by setting appropriate configuration parameters without deploying Redis or other additional cache systems**, which can effectively simplify the system architecture and reduce the operation costs. It should be noted that after the TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer as so in a proprietary key-value cache system. +TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data cache by setting appropriate configuration parameters without deploying Redis or other additional cache systems**. This can effectively simplify the system architecture and reduce operational costs. It should be noted that after TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer. In this sense, TDengine's cache differs from proprietary key-value cache systems. Each vnode has its own independent memory, and it is composed of multiple memory blocks of fixed size, and different vnodes are completely isolated. When writing data, similar to the writing of logs, data is sequentially added to memory, but each vnode maintains its own skip list for quick search. When more than one third of the memory block are used, the disk writing operation will start, and the subsequent writing operation is carried out in a new memory block. By this design, one third of the memory blocks in a vnode keep the latest data, so as to achieve the purpose of caching and quick search. The number of memory blocks of a vnode is determined by the configuration parameter “blocks”, and the size of memory blocks is determined by the configuration parameter “cache”. ### Persistent Storage -TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will also pull up the disk-writing thread to write the cached data into persistent storage in order not to block subsequent data writing. TDengine will open a new database log file when the data is written, and delete the old database log file after written successfully to avoid unlimited log growth. +TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will pull up the disk-writing thread to write the cached data into persistent storage so that subsequent data writing is not blocked. TDengine will open a new database log file when the data is written, and delete the old database log file after successfull persistence, to avoid unlimited log growth. -To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `“days”`. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations. +To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `“days”`. Thus for given start and end dates of a query, you can locate the data files to open immediately without any index. This greatly speeds up read operations. For time-series data, there is generally a retention policy, which is determined by the system configuration parameter `“keep”`. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space. Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set. -In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `“maxRows”` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. +In each data file, the data of a table is stored in blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `“maxRows”` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, data location for queries will take a longer tim. If it is too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. -Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `“minRows”` (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file. +Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information which allows the system to locate the data to be found very quickly. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `“minRows”` (minimum number of records per block), it will be stored in the last file first. At the next write operation to the disk, the newly written records will be merged with the records in last file and then written into data file. -When data is written to disk, it is decided whether to compress the data according to system configuration parameter `“comp”`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. +When data is written to disk, the system decideswhether to compress the data based on the system configuration parameter `“comp”`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. ### Tiered Storage -By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data for more than one week is stored on local hard disk, and the data for more than four weeks is stored on network storage device, thus reducing the storage cost and ensuring efficient data access. The movement of data on different storage media is automatically done by the system and completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”. +By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”. dataDir format is as follows: ``` @@ -216,7 +218,7 @@ dataDir data_path [tier_level] Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data. -Suppose a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows: +Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows: ``` dataDir /mnt/disk1/taos @@ -233,11 +235,11 @@ Note: Tiered Storage is only supported in Enterprise Edition ## Data Query -TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. The query processing of TDengine needs the collaboration of client, vnode and mnode. +TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. Query processing in TDengine needs the collaboration of client, vnode and mnode. ### Single Table Query -The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then request metadata information (table metadata) for the table specified in the query from management node (mnode). +The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then metadata information (table metadata) for the table specified is requested in the query from management node (mnode). According to the End Point information in metadata information, the query request is serialized and sent to the data node (dnode) where the table is located. After receiving the query, the dnode identifies the virtual node (vnode) pointed to and forwards the message to the query execution queue of the vnode. The query execution thread of vnode establishes the basic query execution environment, immediately returns the query request and starts executing the query at the same time. @@ -245,9 +247,9 @@ When client obtains query result, the worker thread in query execution queue of ### Aggregation by Time Axis, Downsampling, Interpolation -The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and distinct feature from common databases. From this point of view, it is similar to the window query of stream computing engine. +Time-series data is different from ordinary data in that each record has a timestamp. So aggregating data by timestamps on the time axis is an important and distinct feature of time-series databases which is different from that of common databases. It is similar to the window query of stream computing engines. -The keyword `interval` is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated based on time windows, and the data within window range are aggregated as needed. For example: +The keyword `interval` is introduced into TDengine to split fixed length time windows on the time axis. The data is aggregated based on time windows, and the data within time window ranges is aggregated as needed. For example: ```mysql select count(*) from d1001 interval(1h); @@ -265,21 +267,21 @@ For the data collected by device D1001, the number of records per hour is counte ### Multi-table Aggregation Query -TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure: +TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure: -![Diagram of multi-table aggregation query](multi_tables.webp) +![TDengine Database Diagram of multi-table aggregation query](multi_tables.webp)
Figure 5: Diagram of multi-table aggregation query
1. Application sends a query condition to system; 2. TAOSC sends the STable name to Meta Node(management node); 3. Management node sends the vnode list owned by the STable back to TAOSC; 4. TAOSC sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes; -5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC; +5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC; 6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application. -Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. +Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. ### Precomputation -In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the index BRIN (Block Range Index) of PostgreSQL. +In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the BRIN (Block Range Index) of PostgreSQL. diff --git a/docs-en/25-application/01-telegraf.md b/docs-en/25-application/01-telegraf.md index 07ab289ac2bbf44c219535fe128db69b34465c01..d30a23fe1b942e1411e8b5f1320e1c54ae2b407f 100644 --- a/docs-en/25-application/01-telegraf.md +++ b/docs-en/25-application/01-telegraf.md @@ -5,18 +5,18 @@ title: Quickly Build IT DevOps Visualization System with TDengine + Telegraf + G ## Background -TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telematics, Industrial Internet, IT DevOps, etc. by TAOSData. Since it opened its source code in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. +TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telemetry, Industrial Internet, IT DevOps and other applications. Since it was open-sourced in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. IT DevOps metric data usually are time sensitive, for example: - System resource metrics: CPU, memory, IO, bandwidth, etc. - Software system metrics: health status, number of connections, number of requests, number of timeouts, number of errors, response time, service type, and other business-related metrics. -Current mainstream IT DevOps system usually include a data collection module, a data persistent module, and a visualization module; Telegraf and Grafana are one of the most popular data collection modules and visualization modules, respectively. The data persistent module is available in a wide range of options, with OpenTSDB or InfluxDB being the most popular. TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. +Current mainstream IT DevOps system usually include a data collection module, a data persistent module, and a visualization module; Telegraf and Grafana are one of the most popular data collection modules and visualization modules, respectively. The data persistence module is available in a wide range of options, with OpenTSDB or InfluxDB being the most popular. TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. -This article introduces how to quickly build a TDengine + Telegraf + Grafana based IT DevOps visualization system without writing even a single line of code and by simply modifying a few lines of configuration files. The architecture is as follows. +This article introduces how to quickly build a TDengine + Telegraf + Grafana based IT DevOps visualization system without writing even a single line of code and by simply modifying a few lines in configuration files. The architecture is as follows. -![IT-DevOps-Solutions-Telegraf.webp](./IT-DevOps-Solutions-Telegraf.webp) +![TDengine Database IT-DevOps-Solutions-Telegraf](./IT-DevOps-Solutions-Telegraf.webp) ## Installation steps @@ -73,11 +73,11 @@ sudo systemctl start telegraf Log in to the Grafana interface using a web browser at `IP:3000`, with the system's initial username and password being `admin/admin`. Click on the gear icon on the left and select `Plugins`, you should find the TDengine data source plugin icon. -Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard- v0.1.0.json`, download the dashboard JSON file and import it. You will then see the dashboard in the following screen. +Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json`, download the dashboard JSON file and import it. You will then see the dashboard in the following screen. -![IT-DevOps-Solutions-telegraf-dashboard.webp](./IT-DevOps-Solutions-telegraf-dashboard.webp) +![TDengine Database IT-DevOps-Solutions-telegraf-dashboard](./IT-DevOps-Solutions-telegraf-dashboard.webp) ## Wrap-up -The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and the powerful ecological software adaptation capability, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes. +The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes. Please refer to the official documentation and product implementation cases for other features. diff --git a/docs-en/25-application/02-collectd.md b/docs-en/25-application/02-collectd.md index 0ddea2855497f1dfdfce7a2aa6749e0c5ba1b9ff..1733ed1b1af8c9375c3773d1ca86831396499a78 100644 --- a/docs-en/25-application/02-collectd.md +++ b/docs-en/25-application/02-collectd.md @@ -5,19 +5,19 @@ title: Quickly build an IT DevOps visualization system using TDengine + collectd ## Background -TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telematics, Industrial Internet, IT DevOps, etc. by TAOSData. Since it opened its source code in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. +TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telemetry, Industrial Internet, IT DevOps and other applications. Since it was open-sourced in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. IT DevOps metric data usually are time sensitive, for example: - System resource metrics: CPU, memory, IO, bandwidth, etc. - Software system metrics: health status, number of connections, number of requests, number of timeouts, number of errors, response time, service type, and other business-related metrics. -The current mainstream IT DevOps visualization system usually contains a data collection module, a data persistent module, and a visual display module. collectd/StatsD, as an old-fashion open source data collection tool, has a wide user base. However, collectd/StatsD has limited functionality, and often needs to be combined with Telegraf, Grafana, and a time-series database to build a complete monitoring system. +The current mainstream IT DevOps visualization system usually contains a data collection module, a data persistence module, and a visual display module. collectd/StatsD, as an old-fashion open source data collection tool, has a wide user base. However, collectd/StatsD has limited functionality, and often needs to be combined with Telegraf, Grafana, and a time-series database to build a complete monitoring system. The new version of TDengine supports multiple data protocols and can accept data from collectd and StatsD directly, and provides Grafana dashboard for graphical display. -This article introduces how to quickly build an IT DevOps visualization system based on TDengine + collectd / StatsD + Grafana without writing even a single line of code but by simply modifying a few lines of configuration files. The architecture is shown in the following figure. +This article introduces how to quickly build an IT DevOps visualization system based on TDengine + collectd / StatsD + Grafana without writing even a single line of code but by simply modifying a few lines in configuration files. The architecture is shown in the following figure. -![IT-DevOps-Solutions-Collectd-StatsD.webp](./IT-DevOps-Solutions-Collectd-StatsD.webp) +![TDengine Database IT-DevOps-Solutions-Collectd-StatsD](./IT-DevOps-Solutions-Collectd-StatsD.webp) ## Installation Steps @@ -83,22 +83,22 @@ Click on the gear icon on the left and select `Plugins`, you should find the TDe Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json`, click the plus icon on the left and select Import, follow the instructions to import the JSON file. After that, you can see The dashboard can be seen in the following screen. -![IT-DevOps-Solutions-collectd-dashboard.webp](./IT-DevOps-Solutions-collectd-dashboard.webp) +![TDengine Database IT-DevOps-Solutions-collectd-dashboard](./IT-DevOps-Solutions-collectd-dashboard.webp) #### import collectd dashboard Download the dashboard json file from `https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json`. Download the dashboard json file, click the plus icon on the left side and select `Import`, and follow the interface prompts to select the JSON file to import. After that, you can see dashboard with the following interface. -![IT-DevOps-Solutions-collectd-dashboard.webp](./IT-DevOps-Solutions-collectd-dashboard.webp) +![IT-DevOps-Solutions-collectd-dashboard](./IT-DevOps-Solutions-collectd-dashboard.webp) #### Importing the StatsD dashboard Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json`. Click on the plus icon on the left and select `Import`, and follow the interface prompts to import the JSON file. You will then see the dashboard in the following screen. -![IT-DevOps-Solutions-statsd-dashboard.webp](./IT-DevOps-Solutions-statsd-dashboard.webp) +![TDengine Database IT-DevOps-Solutions-statsd-dashboard](./IT-DevOps-Solutions-statsd-dashboard.webp) ## Wrap-up -TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing function in TDengine version 2.4.0.0 and the powerful ecological software adaptation capability, users can build an efficient and easy-to-use IT DevOps visualization system or adapt to an existing system in just a few minutes. +TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system, or adapt an existing system, in just a few minutes. For TDengine's powerful data writing and querying performance and other features, please refer to the official documentation and successful product implementation cases. diff --git a/docs-en/25-application/03-immigrate.md b/docs-en/25-application/03-immigrate.md index 68d8a2b8cc25c80b8a647332df66874bee344715..4d47aec1d76014ba63f6be91004abcc3934769f7 100644 --- a/docs-en/25-application/03-immigrate.md +++ b/docs-en/25-application/03-immigrate.md @@ -3,10 +3,9 @@ sidebar_label: OpenTSDB Migration to TDengine title: Best Practices for Migrating OpenTSDB Applications to TDengine --- -As a distributed, scalable, HBase-based distributed time-series database software, thanks to its first-mover advantage, OpenTSDB has been introduced and widely used in DevOps by people. However, using new technologies like cloud computing, microservices, and containerization technology with rapid development. Enterprise-level services are becoming more and more diverse. The architecture is becoming more complex. +As a distributed, scalable, distributed time-series database platform based on HBase, and thanks to its first-mover advantage, OpenTSDB is widely used for monitoring in DevOps. However, as new technologies like cloud computing, microservices, and containerization technology has developed rapidly, Enterprise-level services are becoming more and more diverse and the architecture is becoming more complex. -From this situation, it increasingly plagues to use of OpenTSDB as a DevOps backend storage for monitoring by performance issues and delayed feature upgrades. The resulting increase in application deployment costs and reduced operational efficiency. -These problems are becoming increasingly severe as the system scales up. +As a result, as a DevOps backend for monitoring, OpenTSDB is plagued by performance issues and delayed feature upgrades. This has resulted in increased application deployment costs and reduced operational efficiency. These problems become increasingly severe as the system tries to scale up. To meet the fast-growing IoT big data market and technical needs, TAOSData developed an innovative big-data processing product, **TDengine**. @@ -14,14 +13,14 @@ After learning the advantages of many traditional relational databases and NoSQL Compared with OpenTSDB, TDengine has the following distinctive features. -- Performance of data writing and querying far exceeds that of OpenTSDB. -- Efficient compression mechanism for time-series data, which compresses less than 1/5 of the storage space on disk. -- The installation and deployment are straightforward. A single installation package can complete the installation and deployment and does not rely on other third-party software. The entire installation and deployment process in a few seconds; -- The built-in functions cover all of OpenTSDB's query functions. And support more time-series data query functions, scalar functions, and aggregation functions. And support advanced query functions such as multiple time-window aggregations, join query, expression operation, multiple group aggregation, user-defined sorting, and user-defined functions. Adopting SQL-like syntax rules is more straightforward and has no learning cost. +- Data writing and querying performance far exceeds that of OpenTSDB. +- Efficient compression mechanism for time-series data, which compresses to less than 1/5 of the storage space, on disk. +- The installation and deployment are straightforward. A single installation package can complete the installation and deployment and does not rely on other third-party software. The entire installation and deployment process takes a few seconds. +- The built-in functions cover all of OpenTSDB's query functions and TDengine supports more time-series data query functions, scalar functions, and aggregation functions. TDengine also supports advanced query functions such as multiple time-window aggregations, join query, expression operation, multiple group aggregation, user-defined sorting, and user-defined functions. With a SQL-like query language, querying is more straightforward and has no learning cost. - Supports up to 128 tags, with a total tag length of 16 KB. - In addition to the REST interface, it also provides interfaces to Java, Python, C, Rust, Go, C# and other languages. Its supports a variety of enterprise-class standard connector protocols such as JDBC. -If we migrate the applications originally running on OpenTSDB to TDengine, we will effectively reduce the compute and storage resource consumption and the number of deployed servers. And will also significantly reduce the operation and maintenance costs, making operation and maintenance management more straightforward and more accessible, and considerably reducing the total cost of ownership. Like OpenTSDB, TDengine has also been open-sourced, including the stand-alone version and the cluster version source code. So there is no need to be concerned about the vendor-lock problem. +Migrating applications originally running on OpenTSDB to TDengine, effectively reduces compute and storage resource consumption and the number of deployed servers. It also significantly reduces operation and maintenance costs, makes operation and maintenance management more straightforward and more accessible, and considerably reduces the total cost of ownership. Like OpenTSDB, TDengine has also been open-sourced. Both the stand-alone version and the cluster version are open-sourced and there is no need to be concerned about the vendor-lock problem. We will explain how to migrate OpenTSDB applications to TDengine quickly, securely, and reliably without coding, using the most typical DevOps scenarios. Subsequent chapters will go into more depth to facilitate migration for non-DevOps systems. @@ -32,9 +31,9 @@ We will explain how to migrate OpenTSDB applications to TDengine quickly, secure The following figure (Figure 1) shows the system's overall architecture for a typical DevOps application scenario. **Figure 1. Typical architecture in a DevOps scenario** -![IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp "Figure 1. Typical architecture in a DevOps scenario") +![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp "Figure 1. Typical architecture in a DevOps scenario") -In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics. Data collectors to aggregate information collected by agents, systems for persistent data storage and management, and tools for monitoring data visualization (e.g., Grafana, etc.). +In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics. There are also data collectors to aggregate information collected by agents, systems for persistent data storage and management, and tools for data visualization (e.g., Grafana, etc.). The agents deployed in the application nodes are responsible for providing operational metrics from different sources to collectd/Statsd. And collectd/StatsD is accountable for pushing the aggregated data to the OpenTSDB cluster system and then visualizing the data using the visualization kanban board software, Grafana. @@ -44,15 +43,15 @@ The agents deployed in the application nodes are responsible for providing opera First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to the blog ["Installation and Uninstallation of TDengine Multiple Installation Packages"](https://www.taosdata.com/blog/2019/08/09/566.html). -Note that once the installation is complete, do not start the `taosd` service immediately, but after properly configuring the parameters. +Note that once the installation is complete, do not start the `taosd` service before properly configuring the parameters. - **Adjusting the data collector configuration** TDengine version 2.4 and later version includes `taosAdapter`. taosAdapter is a stateless, rapidly elastic, and scalable component. taosAdapter supports Influxdb's Line Protocol and OpenTSDB's telnet/JSON writing protocol specification, providing rich data access capabilities, effectively saving user migration costs and reducing the difficulty of user migration. -Users can flexibly deploy taosAdapter instances according to their requirements to rapidly improve the throughput of data writes in conjunction with the needs of scenarios and provide guarantees for data writes in different application scenarios. +Users can flexibly deploy taosAdapter instances, based on their requirements, to improve data writing throughput and provide guarantees for data writes in different application scenarios. -Through taosAdapter, users can directly push the data collected by `collectd` or `StatsD` to TDengine to achieve seamless migration of application scenarios, which is very easy and convenient. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/). +Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/). If using collectd, modify the configuration file in its default location `/etc/collectd/collectd.conf` to point to the IP address and port of the node where to deploy taosAdapter. For example, assuming the taosAdapter IP address is 192.168.1.130 and port 6046, configure it as follows. @@ -66,29 +65,29 @@ LoadPlugin write_tsdb ``` -You can use collectd and push the data to taosAdapter utilizing the push to OpenTSDB plugin. taosAdapter will call the API to write the data to TDengine, thus completing the writing of the data. If you are using StatsD, adjust the profile information accordingly. +You can use collectd and push the data to taosAdapter utilizing the write_tsdb plugin. taosAdapter will call the API to write the data to TDengine. If you are using StatsD, adjust the profile information accordingly. - **Tuning the Dashboard system** -After writing the data to TDengine properly, you can adapt Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](/third-party/grafana). +After writing the data to TDengine, you can configure Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](/third-party/grafana). TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use. **Importing Grafana Templates** Figure 2. -![](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp "Figure 2. Importing a Grafana Template") +![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp "Figure 2. Importing a Grafana Template") -After the above steps, you completed the migration to replace OpenTSDB with TDengine. You can see that the whole process is straightforward, there is no need to write any code, and only some configuration files need to be adjusted to meet the migration work. +With the above steps completed, you have finished replacing OpenTSDB with TDengine. You can see that the whole process is straightforward, there is no need to write any code, and only some configuration files need to be changed. ### 3. Post-migration architecture -After completing the migration, the figure below (Figure 3) shows the system's overall architecture. The whole process of the acquisition side, the data writing, and the monitoring and presentation side are all kept stable, except for a few configuration adjustments, which do not involve any critical changes or alterations. OpenTSDB to TDengine migration action, using TDengine more powerful processing power and query performance. +After completing the migration, the figure below (Figure 3) shows the system's overall architecture. The whole process of the acquisition side, the data writing, and the monitoring and presentation side are all kept stable. There are a few configuration adjustments, which do not involve any critical changes or alterations. Migrating to TDengine from OpenTSDB leads to powerful processing power and query performance. -In most DevOps scenarios, if you have a small OpenTSDB cluster (3 or fewer nodes) for providing the storage layer of DevOps and rely on OpenTSDB to give a data persistence layer and query capabilities, you can safely replace OpenTSDB with TDengine. TDengine will save more compute and storage resources. With the same compute resource allocation, a single TDengine can meet the service capacity provided by 3 to 5 OpenTSDB nodes. If the scale is more prominent, then TDengine clustering is required. - -Suppose your application is particularly complex, or the application domain is not a DevOps scenario. You can continue reading subsequent chapters for a more comprehensive and in-depth look at the advanced topics of migrating an OpenTSDB application to TDengine. +In most DevOps scenarios, if you have a small OpenTSDB cluster (3 or fewer nodes) which provides storage and data persistence layer in addition to query capability, you can safely replace OpenTSDB with TDengine. TDengine will save compute and storage resources. With the same compute resource allocation, a single TDengine can meet the service capacity provided by 3 to 5 OpenTSDB nodes. TDengine clustering may be required depending on the scale of the application. **Figure 3. System architecture after migration** -![IT-DevOps-Solutions-Immigrate-TDengine-Arch](./IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp "Figure 3. System architecture after migration completion") +![TDengine Database IT-DevOps-Solutions-Immigrate-TDengine-Arch](./IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp "Figure 3. System architecture after migration completion") + +The following chapters provide a more comprehensive and in-depth look at the advanced topics of migrating an OpenTSDB application to TDengine. This will be useful if your application is particularly complex and is not a DevOps application. ## Migration evaluation and strategy for other scenarios @@ -96,26 +95,25 @@ Suppose your application is particularly complex, or the application domain is n This chapter describes the differences between OpenTSDB and TDengine at the system functionality level. After reading this chapter, you can fully evaluate whether you can migrate some complex OpenTSDB-based applications to TDengine, and what you should pay attention to after migration. -TDengine currently only supports Grafana for visual kanban rendering, so if your application uses front-end kanban boards other than Grafana (e.g., [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github.com/box/StatusWolf), etc.). You cannot directly migrate those front-end kanbans to TDengine, and the front-end kanban will need to be ported to Grafana to work correctly. +TDengine currently only supports Grafana for visual kanban rendering, so if your application uses front-end kanban boards other than Grafana (e.g., [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github.com/box/StatusWolf), etc.) you cannot directly migrate those front-end kanbans to TDengine. The front-end kanban will need to be ported to Grafana to work correctly. -TDengine version 2.3.0.x only supports collectd and StatsD as data collection aggregation software but will provide more data collection aggregation software in the future. If you use other data aggregators on the collection side, your application needs to be ported to these two data aggregation systems to write data correctly. +TDengine version 2.3.0.x only supports collectd and StatsD as data collection and aggregation software but future versions will provide support for more data collection and aggregation software in the future. If you use other data aggregators on the collection side, your application needs to be ported to these two data aggregation systems to write data correctly. In addition to the two data aggregator software protocols mentioned above, TDengine also supports writing data directly via InfluxDB's line protocol and OpenTSDB's data writing protocol, JSON format. You can rewrite the logic on the data push side to write data using the line protocols supported by TDengine. -In addition, if your application uses the following features of OpenTSDB, you need to understand the following considerations before migrating your application to TDengine. +In addition, if your application uses the following features of OpenTSDB, you need to take into account the following considerations before migrating your application to TDengine. 1. `/api/stats`: If your application uses this feature to monitor the service status of OpenTSDB, and you have built the relevant logic to link the processing in your application, then this part of the status reading and fetching logic needs to be re-adapted to TDengine. TDengine provides a new mechanism for handling cluster state monitoring to meet the monitoring and maintenance needs of your application. -2. `/api/tree`: If you rely on this feature of OpenTSDB for the hierarchical organization and maintenance of timelines, you cannot migrate it directly to TDengine, which uses a database -> super table -> sub-table hierarchy to organize and maintain timelines, with all timelines belonging to the same super table in the same system hierarchy, but it is possible to simulate a logical multi-level structure of the application through the unique construction of different tag values. -3. `Rollup And PreAggregates`: The use of Rollup and PreAggregates requires the application to decide where to access the Rollup results and, in some scenarios, to access the actual results. The opacity of this structure makes the application processing logic extraordinarily complex and not portable at all. We think this strategy is a compromise when the time-series database does not. -TDengine does not support automatic downsampling of multiple timelines and preaggregation (for a range of periods) for the time being. Still, thanks to its high-performance query processing logic can provide very high-performance query responses without relying on Rollup and preaggregation (for a range of periods), making your application query processing logic much more straightforward. -The logic is much simpler. -4. `Rate`: TDengine provides two functions to calculate the rate of change of values, namely `Derivative` (the result is consistent with the Derivative behavior of InfluxDB) and `IRate` (the result is compatible with the IRate function in Prometheus). However, the results of these two functions are slightly different from Rate, but the functions are more powerful overall. In addition, TDengine supports all the calculation functions provided by OpenTSDB, and TDengine's query functions are much more potent than those supported by OpenTSDB, which can significantly simplify the processing logic of your application. +2. `/api/tree`: If you rely on this feature of OpenTSDB for the hierarchical organization and maintenance of timelines, you cannot migrate it directly to TDengine, which uses a database -> super table -> sub-table hierarchy to organize and maintain timelines, with all timelines belonging to the same super table in the same system hierarchy. But it is possible to simulate a logical multi-level structure of the application through the unique construction of different tag values. +3. `Rollup And PreAggregates`: The use of Rollup and PreAggregates requires the application to decide where to access the Rollup results and, in some scenarios, to access the actual results. The opacity of this structure makes the application processing logic extraordinarily complex and not portable at all. +While TDengine does not currently support automatic downsampling of multiple timelines and preaggregation (for a range of periods), thanks to its high-performance query processing logic, it can provide very high-performance query responses without relying on Rollup and preaggregation (for a range of periods). This makes your application query processing logic straightforward and simple. +4. `Rate`: TDengine provides two functions to calculate the rate of change of values, namely `Derivative` (the result is consistent with the Derivative behavior of InfluxDB) and `IRate` (the result is compatible with the IRate function in Prometheus). However, the results of these two functions are slightly different from that of Rate. But the TDengine functions are more powerful. In addition, TDengine supports all the calculation functions provided by OpenTSDB. TDengine's query functions are much more powerful than those supported by OpenTSDB, which can significantly simplify the processing logic of your application. -Through the above introduction, I believe you should be able to understand the changes brought about by the migration of OpenTSDB to TDengine. And this information will also help you correctly determine whether you would migrate your application to TDengine to experience the powerful and convenient time-series data processing capability provided by TDengine. +With the above introduction, we believe you should be able to understand the changes brought about by the migration of OpenTSDB to TDengine. And this information will also help you correctly determine whether you should migrate your application to TDengine to experience the powerful and convenient time-series data processing capability provided by TDengine. ### 2. Migration strategy suggestion -First, the OpenTSDB-based system migration involves data schema design, system scale estimation, and data write end transformation, data streaming, and application adaptation; after that, the two systems will run in parallel for a while and then migrate the historical data to TDengine. Of course, if your application has some functions that strongly depend on the above OpenTSDB features and you do not want to stop using them, you can migrate the historical data to TDengine. -You can consider keeping the original OpenTSDB system running while starting TDengine to provide the primary services. +OpenTSDB-based system migration involves data schema design, system scale estimation, data write transformation, data streaming, and application changes. The two systems should run in parallel for a while and then the historical data should be migrated to TDengine if your application has some functions that strongly depend on the above OpenTSDB features and you do not want to stop using them. +You can also consider keeping the original OpenTSDB system running while using TDengine to provide the primary services. ## Data model design @@ -129,16 +127,19 @@ Let us now assume a DevOps scenario where we use collectd to collect the underly | 2 | swap | value | double | host | swap_type | swap_type_instance | source | n/a | | 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source | -TDengine requires the data stored to have a data schema, i.e., you need to create a super table and specify the schema of the super table before writing the data. For data schema creation, you have two ways to do this: 1) Take advantage of TDengine's native data writing support for OpenTSDB by calling the TDengine API to write (text line or JSON format) -and automate the creation of single-value models. This approach does not require significant adjustments to the data writing application, nor does it require converting the written data format. +TDengine requires the data stored to have a data schema, i.e., you need to create a super table and specify the schema of the super table before writing the data. For data schema creation, you have two ways to do this: +1) Take advantage of TDengine's native data writing support for OpenTSDB by calling the TDengine API to write (text line or JSON format) and automate the creation of single-value models. This approach does not require significant adjustments to the data writing application, nor does it require converting the written data format. At the C level, TDengine provides the `taos_schemaless_insert()` function to write data in OpenTSDB format directly (in early version this function was named `taos_insert_lines()`). Please refer to the sample code `schemaless.c` in the installation package directory as reference. -(2) based on a complete understanding of TDengine's data model, to establish the mapping relationship between OpenTSDB and TDengine's data model adjustment manually. Considering that OpenTSDB is a single-value mapping model, recommended using the single-value model in TDengine. TDengine can support both multi-value and single-value models. +(2) Based on a thorough understanding of TDengine's data model, establish a mapping between OpenTSDB and TDengine's data model. Considering that OpenTSDB is a single-value mapping model, we recommended using the single-value model in TDengine for simplicity. But keep in mind that TDengine supports both multi-value and single-value models. - **Single-valued model**. -The steps are as follows: use the name of the metrics as the name of the TDengine super table, which build with two basic data columns - timestamp and value, and the label of the super table is equivalent to the label information of the metrics, and the number of labels is equal to the number of labels of the metrics. The names of sub-tables are named with fixed rules: `metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ...` as the sub-table name. +The steps are as follows: +- Use the name of the metrics as the name of the TDengine super table +- Build with two basic data columns - timestamp and value. The label of the super table is equivalent to the label information of the metrics, and the number of labels is equal to the number of labels of the metrics. +- The names of sub-tables are named with fixed rules: `metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ...` as the sub-table name. Create 3 super tables in TDengine. @@ -158,13 +159,13 @@ The final system will have about 340 sub-tables and three super-tables. Note tha - **Multi-value model** -Suppose you want to take advantage of TDengine's multi-value modeling capabilities. In that case, you need first to meet the requirements that different collection quantities have the same collection frequency and can reach the **data write side simultaneously via a message queue**, thus ensuring writing multiple metrics at once using SQL statements. The metric's name is used as the name of the super table to create a multi-column model of data that has the same collection frequency and can arrive simultaneously. The names of the sub-tables are named using a fixed rule. Each of the above metrics contains only one measurement value, so converting it into a multi-value model is impossible. +Ideally you should take advantage of TDengine's multi-value modeling capabilities. In that case, you first need to meet the requirement that different collection quantities have the same collection frequency and can reach the **data write side simultaneously via a message queue**, thus ensuring writing multiple metrics at once, using SQL statements. The metric's name is used as the name of the super table to create a multi-column model of data that has the same collection frequency and can arrive simultaneously. The sub-tables are named using a fixed rule. Each of the above metrics contains only one measurement value, so converting it into a multi-value model is impossible. ## Data triage and application adaptation -Subscribe data from the message queue and start the adapted writer to write the data. +Subscribe to the message queue and start writing data to TDengine. -After writing the data starts for a while, you can use SQL statements to check whether the amount of data written meets the expected writing requirements. Use the following SQL statement to count the amount of data. +After data has been written for a while, you can use SQL statements to check whether the amount of data written meets the expected writing requirements. Use the following SQL statement to count the amount of data. ```sql select count(*) from memory @@ -184,7 +185,7 @@ To facilitate historical data migration, we provide a plug-in for the data synch For the specific usage of DataX and how to use DataX to write data to TDengine, please refer to [DataX-based TDengine Data Migration Tool](https://www.taosdata.com/blog/2021/10/26/3156.html). -After migrating via DataX, we found that we can significantly improve the efficiency of migrating historical data by starting multiple processes and migrating numerous metrics simultaneously. The following are some records of the migration process. I wish to use these for application migration as a reference. +After migrating via DataX, we found that we can significantly improve the efficiency of migrating historical data by starting multiple processes and migrating numerous metrics simultaneously. The following are some records of the migration process. We provide these as a reference for application migration. | Number of datax instances (number of concurrent processes) | Migration record speed (pieces/second) | | ----------------------------- | ------------------- -- | @@ -202,13 +203,13 @@ Suppose you need to use the multi-value model for data writing. In that case, yo Manual migration of data requires attention to the following two issues: -1) When storing the exported data on the disk, the disk needs to have enough storage space to accommodate the exported data files fully. Adopting the partial import mode to avoid the shortage of disk file storage after the total amount of data is exported. Preferentially export the timelines belonging to the same super table. Then the exported data files are imported into the TDengine system. +1) When storing the exported data on the disk, the disk needs to have enough storage space to accommodate the exported data files fully. To avoid running out of disk space, you can adopt a partial import mode in which you preferentially export the timelines belonging to the same super table and then only those files are imported into TDengine. -2) Under the full load of the system, if there are enough remaining computing and IO resources, establish a multi-threaded importing to maximize the efficiency of data migration. Considering the vast load that data parsing brings to the CPU, it is necessary to control the maximum number of parallel tasks to avoid the overall overload of the system triggered by importing historical data. +2) Under the full load of the system, if there are enough remaining computing and IO resources, establish a multi-threaded import to maximize the efficiency of data migration. Considering the vast load that data parsing brings to the CPU, it is necessary to control the maximum number of parallel tasks to avoid overloading the system when importing historical data. Due to the ease of operation of TDengine itself, there is no need to perform index maintenance and data format change processing in the entire process. The whole process only needs to be executed sequentially. -When wholly importing the historical data into TDengine, the two systems run simultaneously and then switch the query request to TDengine to achieve seamless application switching. +While importing historical data into TDengine, the two systems should run simultaneously. Once all the data is migrated, switch the query request to TDengine to achieve seamless application switching. ## Appendix 1: OpenTSDB query function correspondence table @@ -222,12 +223,12 @@ Example: SELECT avg(val) FROM (SELECT first(val) FROM super_table WHERE ts >= startTime and ts <= endTime INTERVAL(20s) Fill(linear)) INTERVAL(20s) ``` -Remark: +Remarks: 1. The value in Interval needs to be the same as the interval value in the outer query. -2. The interpolation processing in TDengine needs to use subqueries to assist in the completion. As shown above, it is enough to specify the interpolation type in the inner query. Since the interpolation of the values ​​in OpenTSDB uses linear interpolation, use fill( in the interpolation clause. linear) to declare the interpolation type. The following functions with the exact interpolation calculation requirements are processed by this method. -3. The parameter 20s in Interval indicates that the inner query will generate results according to a time window of 20 seconds. In an actual query, it needs to adjust to the time interval between different records. It ensures that producing interpolation results equivalent to the original data. -4. Due to the particular interpolation strategy and mechanism of OpenTSDB, the method of the first interpolation and then calculation in the aggregate query (Aggregate) makes the calculation results impossible to be utterly consistent with TDengine. But in the case of downsampling (Downsample), TDengine and OpenTSDB can obtain consistent results (since OpenTSDB performs aggregation and downsampling queries). +2. Interpolation processing in TDengine uses subqueries to assist in completion. As shown above, it is enough to specify the interpolation type in the inner query. Since OpenTSDB uses linear interpolation, use `fill(linear)` to declare the interpolation type in TDengine. Some of the functions mentioned below have exactly the same interpolation calculation requirements. +3. The parameter 20s in Interval indicates that the inner query will generate results according to a time window of 20 seconds. In an actual query, it needs to adjust to the time interval between different records. It ensures that interpolation results are equivalent to the original data. +4. Due to the particular interpolation strategy and mechanism of OpenTSDB i.e. interpolation followed by aggregate calculation, it is impossible for the results to be completely consistent with those of TDengine. But in the case of downsampling (Downsample), TDengine and OpenTSDB can obtain consistent results (since OpenTSDB performs aggregation and downsampling queries). ### Count @@ -261,7 +262,7 @@ Select apercentile(col1, 50, “t-digest”) from table_name Remark: -1. During the approximate query processing, OpenTSDB uses the t-digest algorithm by default, so in order to obtain the same calculation result, the algorithm used needs to be specified in the `apercentile()` function. TDengine can support two different approximation processing algorithms, declared by "default" and "t-digest" respectively. +1. When calculating estimate percentiles, OpenTSDB uses the t-digest algorithm by default. In order to obtain the same calculation results in TDengine, the algorithm used needs to be specified in the `apercentile()` function. TDengine can support two different percentile calculation algorithms named "default" and "t-digest" respectively. ### First @@ -379,35 +380,34 @@ We still use the hypothetical environment from Chapter 4. There are three measur ### Storage resource estimation Assuming that the number of sensor devices that generate data and need to be stored is `n`, the frequency of data generation is `t` per second, and the length of each record is `L` bytes, the scale of data generated per day is `n * t * L` bytes. Assuming the compression ratio is `C`, the daily data size is `(n * t * L)/C` bytes. The storage resources are estimated to accommodate the data scale for 1.5 years. In the production environment, the compression ratio C of TDengine is generally between 5 and 7. -With additional 20% ​​redundancy, you can calculate the required storage resources: +With additional 20% redundancy, you can calculate the required storage resources: ```matlab (n * t * L) * (365 * 1.5) * (1+20%)/C ```` - -Combined with the above calculation formula, bring the parameters into the formula, and the raw data scale generated every year is 11.8TB without considering the label information. Note that since tag information is associated with each timeline in TDengine, not every record. The scale of the amount of data to be recorded is somewhat reduced relative to the generated data, and this part of label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB. +Substituting in the above formula, the raw data generated every year is 11.8TB without considering the label information. Note that tag information is associated with each timeline in TDengine, not every record. The amount of data to be recorded is somewhat reduced relative to the generated data, and label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB. ### Storage Device Selection Considerations -The hard disk should be capable of better random read performance. Considering using an SSD as much as possible is a better choice. A disk with better random read performance is a great help to improve the system's query performance and improve the query response performance as a whole system. To obtain better query performance, the performance index of the single-threaded random read IOPS of the hard disk device should not be lower than 1000, and it is better to reach 5000 IOPS or more. Recommend to use `fio` utility software to evaluate the running performance (please refer to Appendix 1 for specific usage) for the random IO read of the current device to confirm whether it can meet the requirements of random read of large files. +A disk with better random read performance, such as an SSD, improves the system's query performance and improves the query response performance of the whole system. To obtain better query performance, the performance index of the single-threaded random read IOPS of the hard disk device should not be lower than 1000, and it is better to reach 5000 IOPS or more. We recommend using `fio` utility software to evaluate the running performance (please refer to Appendix 1 for specific usage) for the random IO read of the current device to confirm whether it can meet the requirements of random read of large files. Hard disk writing performance has little effect on TDengine. The TDengine writing process adopts the append write mode, so as long as it has good sequential write performance, both SAS hard disks and SSDs in the general sense can well meet TDengine's requirements for disk write performance. ### Computational resource estimates -Due to the particularity of IoT data, after the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](/operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second. +Due to the characteristics of IoT data, when the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](/operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second. -In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many as 10 cores or 20 cores. +In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many cores i.e. 20 cores. ### Memory resource estimation -The database allocates 16MB\*3 buffer memory for each Vnode by default. If the cluster system includes 22 CPU cores, TDengine will create 22 Vnodes (virtual nodes) by default. Each Vnode contains 1000 tables, which can accommodate all the tables. Then it takes about 1.5 hours to write a block, which triggers the drop, and no adjustment is required. A total of 22 Vnodes require about 1GB of memory cache. Considering the memory needed for the query, assuming that the memory overhead of each query is about 50MB, the memory required for 500 queries concurrently is about 25GB. +The database allocates 16MB\*3 buffer memory for each Vnode by default. If the cluster system includes 22 CPU cores, TDengine will create 22 Vnodes (virtual nodes) by default. Each Vnode contains 1000 tables, which is more than enough to accommodate all the tables in our hypothetical scenario. Then it takes about 1.5 hours to write a block, which triggers persistence to disk without requiring any adjustment. A total of 22 Vnodes require about 1GB of memory cache. Considering the memory needed for the query, assuming that the memory overhead of each query is about 50MB, the memory required for 500 queries concurrently is about 25GB. In summary, using a single 16-core 32GB machine or a cluster of 2 8-core 16GB machines is enough. ## Appendix 3: Cluster Deployment and Startup -TDengine provides a wealth of help documents to explain many aspects of cluster installation and deployment. Here is the list of corresponding document for your reference. +TDengine provides a wealth of help documents to explain many aspects of cluster installation and deployment. Here is the list of documents for your reference. ### Cluster Deployment @@ -421,7 +421,7 @@ To ensure that the system can obtain the necessary information for regular opera FQDN, firstEp, secondEP, dataDir, logDir, tmpDir, serverPort. For the specific meaning and setting requirements of each parameter, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)" -Follow the same steps to set parameters on the nodes that need running, start the taosd service, and then add Dnodes to the cluster. +Follow the same steps to set parameters on the other nodes, start the taosd service, and then add Dnodes to the cluster. Finally, start `taos` and execute the `show dnodes` command. If you can see all the nodes that have joined the cluster, the cluster building process was successfully completed. For specific operation procedures and precautions, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)". diff --git a/docs-en/27-train-faq/01-faq.md b/docs-en/27-train-faq/01-faq.md index 439775170937ef11fc964914232b2739d688b26f..e182e25b9e98bad11b9c90146400e3720605489e 100644 --- a/docs-en/27-train-faq/01-faq.md +++ b/docs-en/27-train-faq/01-faq.md @@ -5,38 +5,38 @@ title: Frequently Asked Questions ## Submit an Issue -If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem description, including TDengine version, hardware and OS information, the steps to reproduce the problem, etc. It would be very helpful if you package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine, if they have been changed in your configuration, please use according to the actual configuration. It's recommended to firstly set `debugFlag` to 135 in `taos.cfg`, restart `taosd`, then reproduce the problem and collect logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131. +If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem. In your description please include the TDengine version, hardware and OS information, the steps to reproduce the problem and any other relevant information. It would be very helpful if you can package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine. If you have changed the default directories in your configuration, please package the files in your configured directories. We recommended setting `debugFlag` to 135 in `taos.cfg`, restarting `taosd`, then reproducing the problem and collecting the logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131. ## Frequently Asked Questions ### 1. How to upgrade to TDengine 2.0 from older version? -version 2.x is not compatible with version 1.x regarding configuration file and data file, please do following before upgrading: +version 2.x is not compatible with version 1.x. With regard to the configuration and data files, please perform the following steps before upgrading. Please follow data integrity, security, backup and other relevant SOPs, best practices before removing/deleting any data. -1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg` +1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg` 2. Delete log files: `sudo rm -rf /var/log/taos/` 3. Delete data files if the data doesn't need to be kept: `sudo rm -rf /var/lib/taos/` -4. Install latests 2.x version -5. If the data needs to be kept and migrated to newer version, please contact professional service of TDengine for assistance +4. Install latest 2.x version +5. If the data needs to be kept and migrated to newer version, please contact professional service at TDengine for assistance. ### 2. How to handle "Unable to establish connection"? -When the client is unable to connect to the server, you can try following ways to find out why. +When the client is unable to connect to the server, you can try the following ways to troubleshoot and resolve the problem. 1. Check the network - - Check if the hosts where the client and server are running can be accessible to each other, for example by `ping` command. - - Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. It's better to firstly disable firewall for diagnostics. - - Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side - - Check if the `firstEp` is set properly in the `taos.cfg` used by the client side + - Check if the hosts where the client and server are running are accessible to each other, for example by `ping` command. + - Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. If possible, disable the firewall for diagnostics, but please ensure that you are following security and other relevant protocols. + - Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side. + - Check if the `firstEp` is set properly in the `taos.cfg` used by the client side. 2. Make sure the client version and server version are same. 3. On server side, check the running status of `taosd` by executing `systemctl status taosd` . If your server is started using another way instead of `systemctl`, use the proper method to check whether the server process is running normally. -4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect toe the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`. +4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect to the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`. -5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path, it's suggested to put `taos.dll` under `C:\Windows\System32`. +5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path. We recommend putting `taos.dll` under `C:\Windows\System32`. 6. Some advanced network diagnostics tools @@ -45,7 +45,7 @@ When the client is unable to connect to the server, you can try following ways t Check whether a TCP port on server side is open: `nc -l {port}` Check whether a TCP port on client side is open: `nc {hostIP} {port}` - - On Windows system `Net-TestConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on serer side is open for access. + - On Windows system `Net-TestConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on server side is open for access. 7. TDengine CLI `taos` can also be used to check network, please refer to [TDengine CLI](/reference/taos-shell). diff --git a/docs-en/27-train-faq/03-docker.md b/docs-en/27-train-faq/03-docker.md index 3f560bcfef6119480b5499649cee1602656dbd6f..afee13c1377b0b4331d6f7ec20251d1aa2db81a1 100644 --- a/docs-en/27-train-faq/03-docker.md +++ b/docs-en/27-train-faq/03-docker.md @@ -3,15 +3,15 @@ sidebar_label: TDengine in Docker title: Deploy TDengine in Docker --- -Even though it's not recommended to deploy TDengine using docker in production system, docker is still very useful in development environment, especially when your host is not Linux. From version 2.0.14.0, the official image of TDengine can support X86-64, X86, arm64, and rm32 . +We do not recommend deploying TDengine using Docker in a production system. However, Docker is still very useful in a development environment, especially when your host is not Linux. From version 2.0.14.0, the official image of TDengine can support X86-64, X86, arm64, and rm32 . -In this chapter a simple step by step guide of using TDengine in docker is introduced. +In this chapter we introduce a simple step by step guide to use TDengine in Docker. ## Install Docker -The installation of docker please refer to [Get Docker](https://docs.docker.com/get-docker/). +To install Docker please refer to [Get Docker](https://docs.docker.com/get-docker/). -After docker is installed, you can check whether Docker is installed properly by displaying Docker version. +After Docker is installed, you can check whether Docker is installed properly by displaying Docker version. ```bash $ docker -v @@ -27,7 +27,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdeng 526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd ``` -In the above command, a docker container is started to run TDengine server, the port range 6030-6049 of the container is mapped to host port range 6030-6049. If port range 6030-6049 has been occupied on the host, please change to an available host port range. Regarding the requirements about ports on the host, please refer to [Port Configuration](/reference/config/#serverport). +In the above command, a docker container is started to run TDengine server, the port range 6030-6049 of the container is mapped to host port range 6030-6049. If port range 6030-6049 has been occupied on the host, please change to an available host port range. For port requirements on the host, please refer to [Port Configuration](/reference/config/#serverport). - **docker run**: Launch a docker container - **-d**: the container will run in background mode @@ -95,7 +95,7 @@ In TDengine CLI, SQL commands can be executed to create/drop databases, tables, ### Access TDengine from host -If `-p` used to map ports properly between host and container, it's also able to access TDengine in container from the host as long as `firstEp` is configured correctly for the client on host. +If option `-p` used to map ports properly between host and container, it's also able to access TDengine in container from the host as long as `firstEp` is configured correctly for the client on host. ``` $ taos @@ -118,7 +118,7 @@ Output is like below: {"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} ``` -For details of REST API please refer to [REST API]](/reference/rest-api/). +For details of REST API please refer to [REST API](/reference/rest-api/). ### Run TDengine server and taosAdapter inside container @@ -265,13 +265,13 @@ Below is an example output: $ taos> select groupid, location from test.d0; groupid | location | ================================= - 0 | California.SanDieo | + 0 | California.SanDiego | Query OK, 1 row(s) in set (0.003490s) ``` ### Access TDengine from 3rd party tools -A lot of 3rd party tools can be used to write data into TDengine through `taosAdapter` , for details please refer to [3rd party tools](/third-party/). +A lot of 3rd party tools can be used to write data into TDengine through `taosAdapter`, for details please refer to [3rd party tools](/third-party/). There is nothing different from the 3rd party side to access TDengine server inside a container, as long as the end point is specified correctly, the end point should be the FQDN and the mapped port of the host. diff --git a/docs-examples/c/async_query_example.c b/docs-examples/c/async_query_example.c index 262757f02b5c52f2d4402d363663db80bb38a54d..b370420b124a21b05f8e0b4041fb1461b1e2478a 100644 --- a/docs-examples/c/async_query_example.c +++ b/docs-examples/c/async_query_example.c @@ -182,14 +182,14 @@ int main() { // query callback ... // ts current voltage phase location groupid // numOfRow = 8 -// 1538548685000 10.300000 219 0.310000 beijing.chaoyang 2 -// 1538548695000 12.600000 218 0.330000 beijing.chaoyang 2 -// 1538548696800 12.300000 221 0.310000 beijing.chaoyang 2 -// 1538548696650 10.300000 218 0.250000 beijing.chaoyang 3 -// 1538548685500 11.800000 221 0.280000 beijing.haidian 2 -// 1538548696600 13.400000 223 0.290000 beijing.haidian 2 -// 1538548685000 10.800000 223 0.290000 beijing.haidian 3 -// 1538548686500 11.500000 221 0.350000 beijing.haidian 3 +// 1538548685500 11.800000 221 0.280000 california.losangeles 2 +// 1538548696600 13.400000 223 0.290000 california.losangeles 2 +// 1538548685000 10.800000 223 0.290000 california.losangeles 3 +// 1538548686500 11.500000 221 0.350000 california.losangeles 3 +// 1538548685000 10.300000 219 0.310000 california.sanfrancisco 2 +// 1538548695000 12.600000 218 0.330000 california.sanfrancisco 2 +// 1538548696800 12.300000 221 0.310000 california.sanfrancisco 2 +// 1538548696650 10.300000 218 0.250000 california.sanfrancisco 3 // numOfRow = 0 // no more data, close the connection. // ANCHOR_END: demo \ No newline at end of file diff --git a/docs-examples/c/insert_example.c b/docs-examples/c/insert_example.c index ca12be9314efbda707dbd05449c746794c209743..ce8fdc5b9372aec7b02d3c9254ec25c4c4f62adc 100644 --- a/docs-examples/c/insert_example.c +++ b/docs-examples/c/insert_example.c @@ -36,10 +36,10 @@ int main() { executeSQL(taos, "CREATE DATABASE power"); executeSQL(taos, "USE power"); executeSQL(taos, "CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); - executeSQL(taos, "INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)" - "d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)" - "d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)" - "d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"); + executeSQL(taos, "INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)" + "d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)" + "d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)" + "d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"); taos_close(taos); taos_cleanup(); } diff --git a/docs-examples/c/json_protocol_example.c b/docs-examples/c/json_protocol_example.c index 182fd201308facc80c76f36cfa57580784d70413..9d276127a64c3d74322e30587ab2e319c29cbf65 100644 --- a/docs-examples/c/json_protocol_example.c +++ b/docs-examples/c/json_protocol_example.c @@ -29,11 +29,11 @@ int main() { executeSQL(taos, "USE test"); char *line = "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": " - "\"Beijing.Chaoyang\", \"groupid\": 2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, " - "\"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}},{\"metric\": \"meters.current\", " - "\"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": " + "\"California.SanFrancisco\", \"groupid\": 2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, " + "\"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}},{\"metric\": \"meters.current\", " + "\"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": " "2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": " - "\"Beijing.Haidian\", \"groupid\": 1}}]"; + "\"California.LosAngeles\", \"groupid\": 1}}]"; char *lines[] = {line}; TAOS_RES *res = taos_schemaless_insert(taos, lines, 1, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED); diff --git a/docs-examples/c/line_example.c b/docs-examples/c/line_example.c index 8dd4b1a5075369625645959da0476b76b9fbf290..ce39f8d9df744082a450ce246529bf56adebd1e0 100644 --- a/docs-examples/c/line_example.c +++ b/docs-examples/c/line_example.c @@ -27,10 +27,10 @@ int main() { executeSQL(taos, "DROP DATABASE IF EXISTS test"); executeSQL(taos, "CREATE DATABASE test"); executeSQL(taos, "USE test"); - char *lines[] = {"meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"}; + char *lines[] = {"meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"}; TAOS_RES *res = taos_schemaless_insert(taos, lines, 4, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); if (taos_errno(res) != 0) { printf("failed to insert schema-less data, reason: %s\n", taos_errstr(res)); diff --git a/docs-examples/c/multi_bind_example.c b/docs-examples/c/multi_bind_example.c index fe11df9caad3e216fbd0b1ff2f40a54fe3ba86e5..02e6568e9e88ac8703a4993ed406e770d23c2438 100644 --- a/docs-examples/c/multi_bind_example.c +++ b/docs-examples/c/multi_bind_example.c @@ -52,7 +52,7 @@ void insertData(TAOS *taos) { checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare"); // bind table name and tags TAOS_BIND tags[2]; - char *location = "Beijing.Chaoyang"; + char *location = "California.SanFrancisco"; int groupId = 2; tags[0].buffer_type = TSDB_DATA_TYPE_BINARY; tags[0].buffer_length = strlen(location); diff --git a/docs-examples/c/query_example.c b/docs-examples/c/query_example.c index f88b2467ceb3d9bbeaf6b3beb6a24befd3e398c6..fcae95bcd45a282eaa3ae911b4115e6300c6af8e 100644 --- a/docs-examples/c/query_example.c +++ b/docs-examples/c/query_example.c @@ -139,5 +139,5 @@ int main() { // output: // ts current voltage phase location groupid -// 1648432611249 10.300000 219 0.310000 Beijing.Chaoyang 2 -// 1648432611749 12.600000 218 0.330000 Beijing.Chaoyang 2 \ No newline at end of file +// 1648432611249 10.300000 219 0.310000 California.SanFrancisco 2 +// 1648432611749 12.600000 218 0.330000 California.SanFrancisco 2 \ No newline at end of file diff --git a/docs-examples/c/stmt_example.c b/docs-examples/c/stmt_example.c index fab1506f953ef68050e4318406fa2ba1a0202929..28dae5f9d5ea2faec0aa3c0a784d39e252651c65 100644 --- a/docs-examples/c/stmt_example.c +++ b/docs-examples/c/stmt_example.c @@ -59,7 +59,7 @@ void insertData(TAOS *taos) { checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare"); // bind table name and tags TAOS_BIND tags[2]; - char* location = "Beijing.Chaoyang"; + char* location = "California.SanFrancisco"; int groupId = 2; tags[0].buffer_type = TSDB_DATA_TYPE_BINARY; tags[0].buffer_length = strlen(location); diff --git a/docs-examples/c/telnet_line_example.c b/docs-examples/c/telnet_line_example.c index 913d433f6aec07b3bce115d45536ffa4b45a0481..da62da4ba492856b0d73a564c1bf9cdd60b5b742 100644 --- a/docs-examples/c/telnet_line_example.c +++ b/docs-examples/c/telnet_line_example.c @@ -28,14 +28,14 @@ int main() { executeSQL(taos, "CREATE DATABASE test"); executeSQL(taos, "USE test"); char *lines[] = { - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", }; TAOS_RES *res = taos_schemaless_insert(taos, lines, 8, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED); if (taos_errno(res) != 0) { diff --git a/docs-examples/csharp/AsyncQueryExample.cs b/docs-examples/csharp/AsyncQueryExample.cs index fe30d21efe82e8d1dc414bd4723227ca93bc944f..3dabbebd1630a207af2e1b1b11cc4ba15bdd94a9 100644 --- a/docs-examples/csharp/AsyncQueryExample.cs +++ b/docs-examples/csharp/AsyncQueryExample.cs @@ -224,15 +224,15 @@ namespace TDengineExample } //output: -//Connect to TDengine success -//8 rows async retrieved - -//1538548685000 | 10.3 | 219 | 0.31 | beijing.chaoyang | 2 | -//1538548695000 | 12.6 | 218 | 0.33 | beijing.chaoyang | 2 | -//1538548696800 | 12.3 | 221 | 0.31 | beijing.chaoyang | 2 | -//1538548696650 | 10.3 | 218 | 0.25 | beijing.chaoyang | 3 | -//1538548685500 | 11.8 | 221 | 0.28 | beijing.haidian | 2 | -//1538548696600 | 13.4 | 223 | 0.29 | beijing.haidian | 2 | -//1538548685000 | 10.8 | 223 | 0.29 | beijing.haidian | 3 | -//1538548686500 | 11.5 | 221 | 0.35 | beijing.haidian | 3 | -//async retrieve complete. \ No newline at end of file +// Connect to TDengine success +// 8 rows async retrieved + +// 1538548685500 | 11.8 | 221 | 0.28 | california.losangeles | 2 | +// 1538548696600 | 13.4 | 223 | 0.29 | california.losangeles | 2 | +// 1538548685000 | 10.8 | 223 | 0.29 | california.losangeles | 3 | +// 1538548686500 | 11.5 | 221 | 0.35 | california.losangeles | 3 | +// 1538548685000 | 10.3 | 219 | 0.31 | california.sanfrancisco | 2 | +// 1538548695000 | 12.6 | 218 | 0.33 | california.sanfrancisco | 2 | +// 1538548696800 | 12.3 | 221 | 0.31 | california.sanfrancisco | 2 | +// 1538548696650 | 10.3 | 218 | 0.25 | california.sanfrancisco | 3 | +// async retrieve complete. \ No newline at end of file diff --git a/docs-examples/csharp/InfluxDBLineExample.cs b/docs-examples/csharp/InfluxDBLineExample.cs index 7aad08825209db568d61e5963ec7a00034ab7ca7..7b4453f4ac0b14dd76d166e395bdacb46a5d3fbc 100644 --- a/docs-examples/csharp/InfluxDBLineExample.cs +++ b/docs-examples/csharp/InfluxDBLineExample.cs @@ -9,10 +9,10 @@ namespace TDengineExample IntPtr conn = GetConnection(); PrepareDatabase(conn); string[] lines = { - "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250" + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250" }; IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS); if (TDengine.ErrorNo(res) != 0) diff --git a/docs-examples/csharp/OptsJsonExample.cs b/docs-examples/csharp/OptsJsonExample.cs index d774a325afa1a8d93eb858f23dcd97dd29f8653d..2c41acc5c9628befda7eb4ad5c30af5b921de948 100644 --- a/docs-examples/csharp/OptsJsonExample.cs +++ b/docs-examples/csharp/OptsJsonExample.cs @@ -8,10 +8,10 @@ namespace TDengineExample { IntPtr conn = GetConnection(); PrepareDatabase(conn); - string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}, " + - "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}]" + string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " + + "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]" }; IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED); diff --git a/docs-examples/csharp/OptsTelnetExample.cs b/docs-examples/csharp/OptsTelnetExample.cs index 81608c32213fa0618a2ca6e0769aacf8e9c8e64d..bb752db1afbbb2ef68df9ca25314c8b91cd9a266 100644 --- a/docs-examples/csharp/OptsTelnetExample.cs +++ b/docs-examples/csharp/OptsTelnetExample.cs @@ -9,14 +9,14 @@ namespace TDengineExample IntPtr conn = GetConnection(); PrepareDatabase(conn); string[] lines = { - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", }; IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED); if (TDengine.ErrorNo(res) != 0) diff --git a/docs-examples/csharp/QueryExample.cs b/docs-examples/csharp/QueryExample.cs index f00e391100c7ce42177e2987f5b0b32dc02262c4..97f0c456d412e2ed608c345ba87469d3f5ccfc15 100644 --- a/docs-examples/csharp/QueryExample.cs +++ b/docs-examples/csharp/QueryExample.cs @@ -158,5 +158,5 @@ namespace TDengineExample // Connect to TDengine success // fieldCount=6 // ts current voltage phase location groupid -// 1648432611249 10.3 219 0.31 Beijing.Chaoyang 2 -// 1648432611749 12.6 218 0.33 Beijing.Chaoyang 2 \ No newline at end of file +// 1648432611249 10.3 219 0.31 California.SanFrancisco 2 +// 1648432611749 12.6 218 0.33 California.SanFrancisco 2 \ No newline at end of file diff --git a/docs-examples/csharp/SQLInsertExample.cs b/docs-examples/csharp/SQLInsertExample.cs index fa2e2a50daf06f4d948479e7f5b0df82c517f809..d5462c1062e01fd5c93bac983696d0350117ad92 100644 --- a/docs-examples/csharp/SQLInsertExample.cs +++ b/docs-examples/csharp/SQLInsertExample.cs @@ -15,10 +15,10 @@ namespace TDengineExample CheckRes(conn, res, "failed to change database"); res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); CheckRes(conn, res, "failed to create stable"); - var sql = "INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + - "d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + - "d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + - "d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + var sql = "INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + + "d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + + "d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + + "d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; res = TDengine.Query(conn, sql); CheckRes(conn, res, "failed to insert data"); int affectedRows = TDengine.AffectRows(res); diff --git a/docs-examples/csharp/StmtInsertExample.cs b/docs-examples/csharp/StmtInsertExample.cs index d6e00dd4ac54ab8dbfc33b93896d19fc585e7642..6ade424b95d64529b7a40a782de13e3106d0c78a 100644 --- a/docs-examples/csharp/StmtInsertExample.cs +++ b/docs-examples/csharp/StmtInsertExample.cs @@ -21,7 +21,7 @@ namespace TDengineExample CheckStmtRes(res, "failed to prepare stmt"); // 2. bind table name and tags - TAOS_BIND[] tags = new TAOS_BIND[2] { TaosBind.BindBinary("Beijing.Chaoyang"), TaosBind.BindInt(2) }; + TAOS_BIND[] tags = new TAOS_BIND[2] { TaosBind.BindBinary("California.SanFrancisco"), TaosBind.BindInt(2) }; res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags); CheckStmtRes(res, "failed to bind table name and tags"); diff --git a/docs-examples/go/connect/cgoexample/main.go b/docs-examples/go/connect/cgoexample/main.go index 8b9aba4ce4217c00605bc8796c788f3dd52805e6..ba7ed0f728a1cd546dbc3199ce4c0dc854ebee91 100644 --- a/docs-examples/go/connect/cgoexample/main.go +++ b/docs-examples/go/connect/cgoexample/main.go @@ -20,4 +20,4 @@ func main() { // use // var taosDSN = "root:taosdata@tcp(localhost:6030)/dbName" -// if you want to connect to a default database. +// if you want to connect a specified database named "dbName". diff --git a/docs-examples/go/connect/restexample/main.go b/docs-examples/go/connect/restexample/main.go index 9c05e7eed80dee4ae7e6b20637d265f388d7438d..1efc98b988c183c4c680884057bf2a72a9dd19e9 100644 --- a/docs-examples/go/connect/restexample/main.go +++ b/docs-examples/go/connect/restexample/main.go @@ -18,6 +18,6 @@ func main() { defer taos.Close() } -// use +// use // var taosDSN = "root:taosdata@http(localhost:6041)/dbName" -// if you want to connect to a default database. +// if you want to connect a specified database named "dbName". diff --git a/docs-examples/go/insert/json/main.go b/docs-examples/go/insert/json/main.go index 47d9e9984adc05896fb9954ad3deffde3764b836..6be375270e32a5091c015f88de52c9dda2246b59 100644 --- a/docs-examples/go/insert/json/main.go +++ b/docs-examples/go/insert/json/main.go @@ -25,10 +25,10 @@ func main() { defer conn.Close() prepareDatabase(conn) - payload := `[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "Beijing.Haidian", "groupid": 1}}, - {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}]` + payload := `[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, + {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]` err = conn.OpenTSDBInsertJsonPayload(payload) if err != nil { diff --git a/docs-examples/go/insert/line/main.go b/docs-examples/go/insert/line/main.go index bbc41468fe5f13d3e6f896445bb88f3eba584d0f..c17e1a5270850e6a8b497e0dbec4ae714ee1e2d6 100644 --- a/docs-examples/go/insert/line/main.go +++ b/docs-examples/go/insert/line/main.go @@ -25,10 +25,10 @@ func main() { defer conn.Close() prepareDatabase(conn) var lines = []string{ - "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250", + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250", } err = conn.InfluxDBInsertLines(lines, "ms") diff --git a/docs-examples/go/insert/sql/main.go b/docs-examples/go/insert/sql/main.go index 91386855334c1930af721e0b4f43395c6a6d8e82..6cd5f860e65f4fffd139668f69cc1772f5310eae 100644 --- a/docs-examples/go/insert/sql/main.go +++ b/docs-examples/go/insert/sql/main.go @@ -19,10 +19,10 @@ func createStable(taos *sql.DB) { } func insertData(taos *sql.DB) { - sql := `INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)` + sql := `INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)` result, err := taos.Exec(sql) if err != nil { fmt.Println("failed to insert, err:", err) diff --git a/docs-examples/go/insert/stmt/main.go b/docs-examples/go/insert/stmt/main.go index c50200ebb427c4c64c2737cb8fe4c3d287551a34..7093fdf1e52bc5a14fc92cec995fd81e70717d9f 100644 --- a/docs-examples/go/insert/stmt/main.go +++ b/docs-examples/go/insert/stmt/main.go @@ -37,7 +37,7 @@ func main() { checkErr(err, "failed to create prepare statement") // bind table name and tags - tagParams := param.NewParam(2).AddBinary([]byte("Beijing.Chaoyang")).AddInt(2) + tagParams := param.NewParam(2).AddBinary([]byte("California.SanFrancisco")).AddInt(2) err = stmt.SetTableNameWithTags("d1001", tagParams) checkErr(err, "failed to execute SetTableNameWithTags") diff --git a/docs-examples/go/insert/telnet/main.go b/docs-examples/go/insert/telnet/main.go index 879e6d5cece74fd0b7c815dd34614dca3c9d4544..91fafbe71adbf60d9341b903f5a25708b7011852 100644 --- a/docs-examples/go/insert/telnet/main.go +++ b/docs-examples/go/insert/telnet/main.go @@ -25,14 +25,14 @@ func main() { defer conn.Close() prepareDatabase(conn) var lines = []string{ - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", } err = conn.OpenTSDBInsertTelnetLines(lines) diff --git a/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java b/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java index c6ce2ef9785a010daa55ad29415f81711760cd57..84292f7e8682dbb8171c807da74a603f4ae8256e 100644 --- a/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java @@ -22,4 +22,4 @@ public class JNIConnectExample { // use // String jdbcUrl = "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata"; -// if you want to connect to a default database. \ No newline at end of file +// if you want to connect a specified database named "dbName". \ No newline at end of file diff --git a/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java b/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java index cb83424576a4fd7dfa09ea297294ed77b66bd12d..c8e649482fbd747cdc238daa9e7a237cf63295b6 100644 --- a/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java @@ -23,10 +23,10 @@ public class JSONProtocolExample { } private static String getJSONData() { - return "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}, " + - "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}]"; + return "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " + + "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]"; } public static void main(String[] args) throws SQLException { diff --git a/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java b/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java index 8a2eabe0a91f7966cc3cc6b7dfeeb71b71b88d92..990922b7a516bd32a7e299f5743bd1b5e321868a 100644 --- a/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java @@ -12,11 +12,11 @@ import java.sql.Statement; public class LineProtocolExample { // format: measurement,tag_set field_set timestamp private static String[] lines = { - "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", // micro + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", // micro // seconds - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", }; private static Connection getConnection() throws SQLException { diff --git a/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java b/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java index de89f26cbe38f9343d60aeb8d3e9ce7f67c2e764..af97fe4373ca964260e5614f133f359e229b0e15 100644 --- a/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java @@ -16,28 +16,28 @@ public class RestInsertExample { private static List getRawData() { return Arrays.asList( - "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2", - "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3", - "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2", - "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2", - "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3", - "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3" + "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2", + "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3", + "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2", + "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2", + "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3", + "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3" ); } /** * The generated SQL is: - * INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) - * power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) - * power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) - * power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) - * power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) - * power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) - * power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) - * power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000) + * INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) + * power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) + * power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) + * power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) + * power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) + * power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) + * power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) + * power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000) */ private static String getSQL() { StringBuilder sb = new StringBuilder("INSERT INTO "); diff --git a/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java b/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java index b1a1d224c6d9af2b83ac039726dcdb49a33ec2b0..a3581a1f4733e8bf3e3f561bb6cab5a725d8a1c0 100644 --- a/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java @@ -51,5 +51,5 @@ public class RestQueryExample { // possible output: // avg(voltage) location -// 222.0 Beijing.Haidian -// 219.0 Beijing.Chaoyang +// 222.0 California.LosAngeles +// 219.0 California.SanFrancisco diff --git a/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java b/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java index 2a7ccebf41cae1a22d7516966e2c6ffb10011b64..bbcc92b22f67c31384b0fb7a082975eaac2ff2bc 100644 --- a/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java @@ -30,14 +30,14 @@ public class StmtInsertExample { private static List getRawData() { return Arrays.asList( - "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2", - "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3", - "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2", - "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2", - "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3", - "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3" + "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2", + "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3", + "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2", + "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2", + "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3", + "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3" ); } diff --git a/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java b/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java index 1431eccf16dabaac20f60ae7e971ef49707ba509..4c9368288df74f829121aeab5b925d1d083d29f0 100644 --- a/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java @@ -11,14 +11,14 @@ import java.sql.Statement; public class TelnetLineProtocolExample { // format: =[ =] - private static String[] lines = { "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + private static String[] lines = { "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", }; private static Connection getConnection() throws SQLException { diff --git a/docs-examples/java/src/test/java/com/taos/test/TestAll.java b/docs-examples/java/src/test/java/com/taos/test/TestAll.java index 92fe14a49d5f5ea5d7ea5f1d809867b3de0cc9d2..42db24485afec05298159f7b0c3a4e15835d98ed 100644 --- a/docs-examples/java/src/test/java/com/taos/test/TestAll.java +++ b/docs-examples/java/src/test/java/com/taos/test/TestAll.java @@ -23,16 +23,16 @@ public class TestAll { String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; try (Connection conn = DriverManager.getConnection(jdbcUrl)) { try (Statement stmt = conn.createStatement()) { - String sql = "INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" + - " power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" + - " power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" + - " power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" + - " power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" + - " power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)"; + String sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" + + " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" + + " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" + + " power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" + + " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" + + " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)"; stmt.execute(sql); } diff --git a/docs-examples/node/nativeexample/influxdb_line_example.js b/docs-examples/node/nativeexample/influxdb_line_example.js index a9fc6d11df0b335b92bb3292baaa017cb4bc42ea..2050bee54506a3ee6fe7d89de97b3b41334dd4a6 100644 --- a/docs-examples/node/nativeexample/influxdb_line_example.js +++ b/docs-examples/node/nativeexample/influxdb_line_example.js @@ -13,10 +13,10 @@ function createDatabase() { function insertData() { const lines = [ - "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250", + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250", ]; cursor.schemalessInsert( lines, diff --git a/docs-examples/node/nativeexample/insert_example.js b/docs-examples/node/nativeexample/insert_example.js index 85a353f889176655654d8c39c9a905054d3b6622..ade9d83158362cbf00a856b43a973de31def7601 100644 --- a/docs-examples/node/nativeexample/insert_example.js +++ b/docs-examples/node/nativeexample/insert_example.js @@ -11,10 +11,10 @@ try { cursor.execute( "CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)" ); - var sql = `INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) -power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) -power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) -power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`; + var sql = `INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) +power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) +power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) +power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`; cursor.execute(sql); } finally { cursor.close(); diff --git a/docs-examples/node/nativeexample/multi_bind_example.js b/docs-examples/node/nativeexample/multi_bind_example.js index d52581ec8e10c6edfbc8fc8f7ca78512b5c93d74..6ef8b30c097393fef8c6a2837f8683c736b363f1 100644 --- a/docs-examples/node/nativeexample/multi_bind_example.js +++ b/docs-examples/node/nativeexample/multi_bind_example.js @@ -25,7 +25,7 @@ function insertData() { // bind table name and tags let tagBind = new taos.TaosBind(2); - tagBind.bindBinary("Beijing.Chaoyang"); + tagBind.bindBinary("California.SanFrancisco"); tagBind.bindInt(2); cursor.stmtSetTbnameTags("d1001", tagBind.getBind()); diff --git a/docs-examples/node/nativeexample/opentsdb_json_example.js b/docs-examples/node/nativeexample/opentsdb_json_example.js index 6d436a8e9ebe0230bba22064e8fb6c180c14b5d1..2d78444a3f805bc77ab5e11925a28dd18fe221fe 100644 --- a/docs-examples/node/nativeexample/opentsdb_json_example.js +++ b/docs-examples/node/nativeexample/opentsdb_json_example.js @@ -17,25 +17,25 @@ function insertData() { metric: "meters.current", timestamp: 1648432611249, value: 10.3, - tags: { location: "Beijing.Chaoyang", groupid: 2 }, + tags: { location: "California.SanFrancisco", groupid: 2 }, }, { metric: "meters.voltage", timestamp: 1648432611249, value: 219, - tags: { location: "Beijing.Haidian", groupid: 1 }, + tags: { location: "California.LosAngeles", groupid: 1 }, }, { metric: "meters.current", timestamp: 1648432611250, value: 12.6, - tags: { location: "Beijing.Chaoyang", groupid: 2 }, + tags: { location: "California.SanFrancisco", groupid: 2 }, }, { metric: "meters.voltage", timestamp: 1648432611250, value: 221, - tags: { location: "Beijing.Haidian", groupid: 1 }, + tags: { location: "California.LosAngeles", groupid: 1 }, }, ]; diff --git a/docs-examples/node/nativeexample/opentsdb_telnet_example.js b/docs-examples/node/nativeexample/opentsdb_telnet_example.js index 01e79c2dcacd923cd708d1d228959a628d0ff26a..7f80f558838e18f07ad79e580e7d08638b74e940 100644 --- a/docs-examples/node/nativeexample/opentsdb_telnet_example.js +++ b/docs-examples/node/nativeexample/opentsdb_telnet_example.js @@ -13,14 +13,14 @@ function createDatabase() { function insertData() { const lines = [ - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", ]; cursor.schemalessInsert( lines, diff --git a/docs-examples/node/nativeexample/param_bind_example.js b/docs-examples/node/nativeexample/param_bind_example.js index 9117f46c3eeabd9009b72fa9d4a8503e65884242..c7e04c71a0d19ff8666f3d43fe09109009741266 100644 --- a/docs-examples/node/nativeexample/param_bind_example.js +++ b/docs-examples/node/nativeexample/param_bind_example.js @@ -24,7 +24,7 @@ function insertData() { // bind table name and tags let tagBind = new taos.TaosBind(2); - tagBind.bindBinary("Beijing.Chaoyang"); + tagBind.bindBinary("California.SanFrancisco"); tagBind.bindInt(2); cursor.stmtSetTbnameTags("d1001", tagBind.getBind()); diff --git a/docs-examples/php/connect.php b/docs-examples/php/connect.php index 5af77b9768e5c5ac4b774b433479a4ac8902beda..b825b447805a3923248042d2cdff79c51bdcdbe3 100644 --- a/docs-examples/php/connect.php +++ b/docs-examples/php/connect.php @@ -4,7 +4,7 @@ use TDengine\Connection; use TDengine\Exception\TDengineException; try { - // 实例化 + // instantiate $host = 'localhost'; $port = 6030; $username = 'root'; @@ -12,9 +12,9 @@ try { $dbname = null; $connection = new Connection($host, $port, $username, $password, $dbname); - // 连接 + // connect $connection->connect(); } catch (TDengineException $e) { - // 连接失败捕获异常 + // throw exception throw $e; } diff --git a/docs-examples/php/insert.php b/docs-examples/php/insert.php index 0d9cfc4843a2ec3e72d0ad128fa4c2650d6b9cf6..6e38fa0c46d31aa0a939d471ccbd255cfa453a16 100644 --- a/docs-examples/php/insert.php +++ b/docs-examples/php/insert.php @@ -4,7 +4,7 @@ use TDengine\Connection; use TDengine\Exception\TDengineException; try { - // 实例化 + // instantiate $host = 'localhost'; $port = 6030; $username = 'root'; @@ -12,22 +12,22 @@ try { $dbname = 'power'; $connection = new Connection($host, $port, $username, $password, $dbname); - // 连接 + // connect $connection->connect(); - // 插入 + // insert $connection->query('CREATE DATABASE if not exists power'); $connection->query('CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)'); $resource = $connection->query(<<<'SQL' - INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) + INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) SQL); - // 影响行数 + // get affected rows var_dump($resource->affectedRows()); } catch (TDengineException $e) { - // 捕获异常 + // throw exception throw $e; } diff --git a/docs-examples/php/insert_stmt.php b/docs-examples/php/insert_stmt.php index 5d4b4809d215d781807c21172982feff2171fe07..99a9a6aef3f69a8880316355e17396e06ca985c9 100644 --- a/docs-examples/php/insert_stmt.php +++ b/docs-examples/php/insert_stmt.php @@ -4,7 +4,7 @@ use TDengine\Connection; use TDengine\Exception\TDengineException; try { - // 实例化 + // instantiate $host = 'localhost'; $port = 6030; $username = 'root'; @@ -12,18 +12,18 @@ try { $dbname = 'power'; $connection = new Connection($host, $port, $username, $password, $dbname); - // 连接 + // connect $connection->connect(); - // 插入 + // insert $connection->query('CREATE DATABASE if not exists power'); $connection->query('CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)'); $stmt = $connection->prepare('INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)'); - // 设置表名和标签 + // set table name and tags $stmt->setTableNameTags('d1001', [ // 支持格式同参数绑定 - [TDengine\TSDB_DATA_TYPE_BINARY, 'Beijing.Chaoyang'], + [TDengine\TSDB_DATA_TYPE_BINARY, 'California.SanFrancisco'], [TDengine\TSDB_DATA_TYPE_INT, 2], ]); @@ -41,9 +41,9 @@ try { ]); $resource = $stmt->execute(); - // 影响行数 + // get affected rows var_dump($resource->affectedRows()); } catch (TDengineException $e) { - // 捕获异常 + // throw exception throw $e; } diff --git a/docs-examples/php/query.php b/docs-examples/php/query.php index 4e86a2cec7426887686049977a8647e786ac2744..2607940ea06a70eaa30e4c165c05bd72aa89857c 100644 --- a/docs-examples/php/query.php +++ b/docs-examples/php/query.php @@ -4,7 +4,7 @@ use TDengine\Connection; use TDengine\Exception\TDengineException; try { - // 实例化 + // instantiate $host = 'localhost'; $port = 6030; $username = 'root'; @@ -12,12 +12,12 @@ try { $dbname = 'power'; $connection = new Connection($host, $port, $username, $password, $dbname); - // 连接 + // connect $connection->connect(); $resource = $connection->query('SELECT ts, current FROM meters LIMIT 2'); var_dump($resource->fetch()); } catch (TDengineException $e) { - // 捕获异常 + // throw exception throw $e; } diff --git a/docs-examples/python/bind_param_example.py b/docs-examples/python/bind_param_example.py index 503a2eb5dd91a3516f87a4d3c1c3218cb6505236..6a67434f876f159cf32069a55e9527ca19034640 100644 --- a/docs-examples/python/bind_param_example.py +++ b/docs-examples/python/bind_param_example.py @@ -2,14 +2,14 @@ import taos from datetime import datetime # note: lines have already been sorted by table name -lines = [('d1001', '2018-10-03 14:38:05.000', 10.30000, 219, 0.31000, 'Beijing.Chaoyang', 2), - ('d1001', '2018-10-03 14:38:15.000', 12.60000, 218, 0.33000, 'Beijing.Chaoyang', 2), - ('d1001', '2018-10-03 14:38:16.800', 12.30000, 221, 0.31000, 'Beijing.Chaoyang', 2), - ('d1002', '2018-10-03 14:38:16.650', 10.30000, 218, 0.25000, 'Beijing.Chaoyang', 3), - ('d1003', '2018-10-03 14:38:05.500', 11.80000, 221, 0.28000, 'Beijing.Haidian', 2), - ('d1003', '2018-10-03 14:38:16.600', 13.40000, 223, 0.29000, 'Beijing.Haidian', 2), - ('d1004', '2018-10-03 14:38:05.000', 10.80000, 223, 0.29000, 'Beijing.Haidian', 3), - ('d1004', '2018-10-03 14:38:06.500', 11.50000, 221, 0.35000, 'Beijing.Haidian', 3)] +lines = [('d1001', '2018-10-03 14:38:05.000', 10.30000, 219, 0.31000, 'California.SanFrancisco', 2), + ('d1001', '2018-10-03 14:38:15.000', 12.60000, 218, 0.33000, 'California.SanFrancisco', 2), + ('d1001', '2018-10-03 14:38:16.800', 12.30000, 221, 0.31000, 'California.SanFrancisco', 2), + ('d1002', '2018-10-03 14:38:16.650', 10.30000, 218, 0.25000, 'California.SanFrancisco', 3), + ('d1003', '2018-10-03 14:38:05.500', 11.80000, 221, 0.28000, 'California.LosAngeles', 2), + ('d1003', '2018-10-03 14:38:16.600', 13.40000, 223, 0.29000, 'California.LosAngeles', 2), + ('d1004', '2018-10-03 14:38:05.000', 10.80000, 223, 0.29000, 'California.LosAngeles', 3), + ('d1004', '2018-10-03 14:38:06.500', 11.50000, 221, 0.35000, 'California.LosAngeles', 3)] def get_ts(ts: str): diff --git a/docs-examples/python/conn_native_pandas.py b/docs-examples/python/conn_native_pandas.py index 314759f7662c7bf4c9df2c8b3396ad3101c91cd4..56942ef57085766cd128b03cabb7a357587eab16 100644 --- a/docs-examples/python/conn_native_pandas.py +++ b/docs-examples/python/conn_native_pandas.py @@ -13,7 +13,7 @@ print(df.head(3)) # output: # RangeIndex(start=0, stop=8, step=1) # -# ts current voltage phase location groupid -# 0 2018-10-03 14:38:05.000 10.3 219 0.31 beijing.chaoyang 2 -# 1 2018-10-03 14:38:15.000 12.6 218 0.33 beijing.chaoyang 2 -# 2 2018-10-03 14:38:16.800 12.3 221 0.31 beijing.chaoyang 2 +# ts current ... location groupid +# 0 2018-10-03 14:38:05.500 11.8 ... california.losangeles 2 +# 1 2018-10-03 14:38:16.600 13.4 ... california.losangeles 2 +# 2 2018-10-03 14:38:05.000 10.8 ... california.losangeles 3 diff --git a/docs-examples/python/conn_rest_pandas.py b/docs-examples/python/conn_rest_pandas.py index 143e4275fa4eda685766297e4b90cba3935a574d..0164080cd5a05e72dce40b1d111ea423623ff9b2 100644 --- a/docs-examples/python/conn_rest_pandas.py +++ b/docs-examples/python/conn_rest_pandas.py @@ -11,9 +11,9 @@ print(type(df.ts[0])) print(df.head(3)) # output: -# # RangeIndex(start=0, stop=8, step=1) -# ts current ... location groupid -# 0 2018-10-03 14:38:05+08:00 10.3 ... beijing.chaoyang 2 -# 1 2018-10-03 14:38:15+08:00 12.6 ... beijing.chaoyang 2 -# 2 2018-10-03 14:38:16.800000+08:00 12.3 ... beijing.chaoyang 2 +# +# ts current ... location groupid +# 0 2018-10-03 06:38:05.500000+00:00 11.8 ... california.losangeles 2 +# 1 2018-10-03 06:38:16.600000+00:00 13.4 ... california.losangeles 2 +# 2 2018-10-03 06:38:05+00:00 10.8 ... california.losangeles 3 diff --git a/docs-examples/python/connect_rest_examples.py b/docs-examples/python/connect_rest_examples.py index a043d506b965bc31179dbb6f38749d196ab338ff..3303eb0e194ac28e9486ab153183c3b1f0b639f2 100644 --- a/docs-examples/python/connect_rest_examples.py +++ b/docs-examples/python/connect_rest_examples.py @@ -16,10 +16,10 @@ cursor.execute("CREATE DATABASE power") cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") # insert data -cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") +cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") print("inserted row count:", cursor.rowcount) # query data @@ -38,8 +38,7 @@ for row in data: # inserted row count: 8 # queried row count: 3 # ['ts', 'current', 'voltage', 'phase', 'location', 'groupid'] -# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.3, 219, 0.31, 'beijing.chaoyang', 2] -# [datetime.datetime(2018, 10, 3, 14, 38, 15, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 12.6, 218, 0.33, 'beijing.chaoyang', 2] -# [datetime.datetime(2018, 10, 3, 14, 38, 16, 800000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 12.3, 221, 0.31, 'beijing.chaoyang', 2] - +# [datetime.datetime(2018, 10, 3, 14, 38, 5, 500000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 11.8, 221, 0.28, 'california.losangeles', 2] +# [datetime.datetime(2018, 10, 3, 14, 38, 16, 600000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 13.4, 223, 0.29, 'california.losangeles', 2] +# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.8, 223, 0.29, 'california.losangeles', 3] # ANCHOR_END: basic diff --git a/docs-examples/python/json_protocol_example.py b/docs-examples/python/json_protocol_example.py index 5bb4d629bccf3d79e74b381d6259de86d6522315..58b38f3ff667bcbbd902434d3409441a4d2c5b45 100644 --- a/docs-examples/python/json_protocol_example.py +++ b/docs-examples/python/json_protocol_example.py @@ -3,12 +3,12 @@ import json import taos from taos import SmlProtocol, SmlPrecision -lines = [{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, +lines = [{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, - "tags": {"location": "Beijing.Haidian", "groupid": 1}}, + "tags": {"location": "California.LosAngeles", "groupid": 1}}, {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, - "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}] + "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}] def get_connection(): diff --git a/docs-examples/python/line_protocol_example.py b/docs-examples/python/line_protocol_example.py index 02baeb2104f9f48984b4d34afb5e67af641d4e32..735e8e7eb8aed1a8133de7a6de50bd50d076c472 100644 --- a/docs-examples/python/line_protocol_example.py +++ b/docs-examples/python/line_protocol_example.py @@ -1,10 +1,10 @@ import taos from taos import SmlProtocol, SmlPrecision -lines = ["meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", +lines = ["meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", ] diff --git a/docs-examples/python/multi_bind_example.py b/docs-examples/python/multi_bind_example.py index 1714121d72705ab8d619a41f3463af4aa3193871..205ba69fb267ae1781415e4f0995b41f908ceb17 100644 --- a/docs-examples/python/multi_bind_example.py +++ b/docs-examples/python/multi_bind_example.py @@ -3,10 +3,10 @@ from datetime import datetime # ANCHOR: bind_batch table_tags = { - "d1001": ('Beijing.Chaoyang', 2), - "d1002": ('Beijing.Chaoyang', 3), - "d1003": ('Beijing.Haidian', 2), - "d1004": ('Beijing.Haidian', 3) + "d1001": ('California.SanFrancisco', 2), + "d1002": ('California.SanFrancisco', 3), + "d1003": ('California.LosAngeles', 2), + "d1004": ('California.LosAngeles', 3) } table_values = { diff --git a/docs-examples/python/native_insert_example.py b/docs-examples/python/native_insert_example.py index 94d4888a8f5330b9e39d5ae051fcb68f9825505f..3b6b73cb2236c8d9d11019349f99f79135a5c1d6 100644 --- a/docs-examples/python/native_insert_example.py +++ b/docs-examples/python/native_insert_example.py @@ -1,13 +1,13 @@ import taos -lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2", - "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3", - "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2", - "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3", - "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3", - "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2", - "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2"] +lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2", + "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3", + "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2", + "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3", + "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3", + "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2", + "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2"] def get_connection() -> taos.TaosConnection: @@ -25,10 +25,10 @@ def create_stable(conn: taos.TaosConnection): # The generated SQL is: -# INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) -# d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) -# d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) -# d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) +# INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) +# d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) +# d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) +# d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) def get_sql(): global lines diff --git a/docs-examples/python/query_example.py b/docs-examples/python/query_example.py index 6d33c49c968d9210b475931b5d8cecca0ceff3e3..8afd7f07358d7e9c9a3677ee04f8eb92aae6856b 100644 --- a/docs-examples/python/query_example.py +++ b/docs-examples/python/query_example.py @@ -12,10 +12,10 @@ def query_api_demo(conn: taos.TaosConnection): # field count: 7 -# meta of files[1]: {name: ts, type: 9, bytes: 8} +# meta of fields[1]: {name: ts, type: 9, bytes: 8} # ======================Iterate on result========================= -# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 5), 10.300000190734863, 219, 0.3100000023841858, 'Beijing.Chaoyang', 2) -# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 15), 12.600000381469727, 218, 0.33000001311302185, 'Beijing.Chaoyang', 2) +# ('d1003', datetime.datetime(2018, 10, 3, 14, 38, 5, 500000), 11.800000190734863, 221, 0.2800000011920929, 'california.losangeles', 2) +# ('d1003', datetime.datetime(2018, 10, 3, 14, 38, 16, 600000), 13.399999618530273, 223, 0.28999999165534973, 'california.losangeles', 2) # ANCHOR_END: iter # ANCHOR: fetch_all @@ -29,8 +29,8 @@ def fetch_all_demo(conn: taos.TaosConnection): # row count: 2 # ===============all data=================== -# [{'ts': datetime.datetime(2018, 10, 3, 14, 38, 5), 'current': 10.300000190734863}, -# {'ts': datetime.datetime(2018, 10, 3, 14, 38, 15), 'current': 12.600000381469727}] +# [{'ts': datetime.datetime(2018, 10, 3, 14, 38, 5, 500000), 'current': 11.800000190734863}, +# {'ts': datetime.datetime(2018, 10, 3, 14, 38, 16, 600000), 'current': 13.399999618530273}] # ANCHOR_END: fetch_all if __name__ == '__main__': diff --git a/docs-examples/python/telnet_line_protocol_example.py b/docs-examples/python/telnet_line_protocol_example.py index 072835109ee238940e6fe5880b72b2b04e0157fa..d812e186af86be6811ee7774f10458e46df1f39f 100644 --- a/docs-examples/python/telnet_line_protocol_example.py +++ b/docs-examples/python/telnet_line_protocol_example.py @@ -2,14 +2,14 @@ import taos from taos import SmlProtocol, SmlPrecision # format: =[ =] -lines = ["meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", +lines = ["meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", ] diff --git a/docs-examples/rust/nativeexample/examples/stmt_example.rs b/docs-examples/rust/nativeexample/examples/stmt_example.rs index a791a4135984a33dded145e8175d7ade57de8d77..190f8c1ef6d50a8e9c925178c1a9d31c22e3d4df 100644 --- a/docs-examples/rust/nativeexample/examples/stmt_example.rs +++ b/docs-examples/rust/nativeexample/examples/stmt_example.rs @@ -12,7 +12,7 @@ async fn main() -> Result<(), Error> { stmt.set_tbname_tags( "d1001", [ - Field::Binary(BString::from("Beijing.Chaoyang")), + Field::Binary(BString::from("California.SanFrancisco")), Field::Int(2), ], )?; diff --git a/docs-examples/rust/restexample/examples/insert_example.rs b/docs-examples/rust/restexample/examples/insert_example.rs index d7acc98d096fb3cd6bea22d6c5f6f0f5caea50af..9261536f627c297fc707708f88f57eed647dbf3e 100644 --- a/docs-examples/rust/restexample/examples/insert_example.rs +++ b/docs-examples/rust/restexample/examples/insert_example.rs @@ -5,10 +5,10 @@ async fn main() -> Result<(), Error> { let taos = TaosCfg::default().connect().expect("fail to connect"); taos.create_database("power").await?; taos.exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)").await?; - let sql = "INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + let sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; let result = taos.query(sql).await?; println!("{:?}", result); Ok(()) diff --git a/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs b/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs index e93888cc83d12f3bec7370a66e8a85d38cec42ad..64d1a3c9ac6037c16e3e1c3be0258e19cce632a0 100644 --- a/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs +++ b/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs @@ -5,10 +5,10 @@ fn main() { let taos = TaosCfg::default().connect().expect("fail to connect"); taos.raw_query("CREATE DATABASE test").unwrap(); taos.raw_query("USE test").unwrap(); - let lines = ["meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"]; + let lines = ["meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"]; let affected_rows = taos .schemaless_insert( &lines, diff --git a/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs b/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs index 1d66bd1f2b1bcbe82dc3ee3e8e25ea4c521c81f0..e61691596704c8aaf979081429802df6e5aa86f9 100644 --- a/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs +++ b/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs @@ -6,10 +6,10 @@ fn main() { taos.raw_query("CREATE DATABASE test").unwrap(); taos.raw_query("USE test").unwrap(); let lines = [ - r#"[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "Beijing.Haidian", "groupid": 1}}, - {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}]"#, + r#"[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, + {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#, ]; let affected_rows = taos diff --git a/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs b/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs index 18d7500714d9e41b1bebd490199d296ead3dc7c4..c8cab7655a24806e5c7659af80e83da383539c55 100644 --- a/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs +++ b/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs @@ -6,14 +6,14 @@ fn main() { taos.raw_query("CREATE DATABASE test").unwrap(); taos.raw_query("USE test").unwrap(); let lines = [ - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", ]; let affected_rows = taos .schemaless_insert( diff --git a/example/CMakeLists.txt b/example/CMakeLists.txt deleted file mode 100644 index 365b1b7172f394111c5e75b113a9ce1e1ce8822b..0000000000000000000000000000000000000000 --- a/example/CMakeLists.txt +++ /dev/null @@ -1,49 +0,0 @@ -add_executable(tmq "") -add_executable(tstream "") -add_executable(demoapi "") - -target_sources(tmq - PRIVATE - "src/tmq.c" -) - -target_sources(tstream - PRIVATE - "src/tstream.c" -) - -target_sources(demoapi - PRIVATE - "src/demoapi.c" -) - -target_link_libraries(tmq - taos_static -) - -target_link_libraries(tstream - taos_static -) - -target_link_libraries(demoapi - taos_static -) - -target_include_directories(tmq - PUBLIC "${TD_SOURCE_DIR}/include/os" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" -) - -target_include_directories(tstream - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" -) - -target_include_directories(demoapi - PUBLIC "${TD_SOURCE_DIR}/include/client" - PUBLIC "${TD_SOURCE_DIR}/include/os" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" -) - -SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq) -SET_TARGET_PROPERTIES(tstream PROPERTIES OUTPUT_NAME tstream) -SET_TARGET_PROPERTIES(demoapi PROPERTIES OUTPUT_NAME demoapi) diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt index 17a9257c499c6a1efd24fb23b47a9e9835ad7ade..4a9007acecaa679dc716c5665eea7f0cd1e34dbb 100644 --- a/examples/c/CMakeLists.txt +++ b/examples/c/CMakeLists.txt @@ -3,20 +3,70 @@ PROJECT(TDengine) IF (TD_LINUX) INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc) AUX_SOURCE_DIRECTORY(. SRC) - ADD_EXECUTABLE(demo apitest.c) - TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread ) - ADD_EXECUTABLE(sml schemaless.c) - TARGET_LINK_LIBRARIES(sml taos_static trpc tutil pthread ) - ADD_EXECUTABLE(subscribe subscribe.c) - TARGET_LINK_LIBRARIES(subscribe taos_static trpc tutil pthread ) - ADD_EXECUTABLE(epoll epoll.c) - TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua) + # ADD_EXECUTABLE(demo apitest.c) + #TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread ) + #ADD_EXECUTABLE(sml schemaless.c) + #TARGET_LINK_LIBRARIES(sml taos_static trpc tutil pthread ) + #ADD_EXECUTABLE(subscribe subscribe.c) + #TARGET_LINK_LIBRARIES(subscribe taos_static trpc tutil pthread ) + #ADD_EXECUTABLE(epoll epoll.c) + #TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua) + + add_executable(tmq "") + add_executable(stream_demo "") + add_executable(demoapi "") + + target_sources(tmq + PRIVATE + "tmq.c" + ) + + target_sources(stream_demo + PRIVATE + "stream_demo.c" + ) + + target_sources(demoapi + PRIVATE + "demoapi.c" + ) + + target_link_libraries(tmq + taos_static + ) + + target_link_libraries(stream_demo + taos_static + ) + + target_link_libraries(demoapi + taos_static + ) + + target_include_directories(tmq + PUBLIC "${TD_SOURCE_DIR}/include/os" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + ) + + target_include_directories(stream_demo + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + ) + + target_include_directories(demoapi + PUBLIC "${TD_SOURCE_DIR}/include/client" + PUBLIC "${TD_SOURCE_DIR}/include/os" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" + ) + + SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq) + SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo) + SET_TARGET_PROPERTIES(demoapi PROPERTIES OUTPUT_NAME demoapi) ENDIF () IF (TD_DARWIN) INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc) AUX_SOURCE_DIRECTORY(. SRC) - ADD_EXECUTABLE(demo demo.c) - TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread lua) - ADD_EXECUTABLE(epoll epoll.c) - TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua) + #ADD_EXECUTABLE(demo demo.c) + #TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread lua) + #ADD_EXECUTABLE(epoll epoll.c) + #TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua) ENDIF () diff --git a/example/src/demoapi.c b/examples/c/demoapi.c similarity index 100% rename from example/src/demoapi.c rename to examples/c/demoapi.c diff --git a/example/src/tstream.c b/examples/c/stream_demo.c similarity index 100% rename from example/src/tstream.c rename to examples/c/stream_demo.c diff --git a/examples/c/subscribe.c b/examples/c/subscribe.c deleted file mode 100644 index 66d64d295ce5c2700088842dd2c3ce013225f3bd..0000000000000000000000000000000000000000 --- a/examples/c/subscribe.c +++ /dev/null @@ -1,263 +0,0 @@ -// sample code for TDengine subscribe/consume API -// to compile: gcc -o subscribe subscribe.c -ltaos - -#include -#include -#include -#include -#include "../../../include/client/taos.h" // include TDengine header file - -int nTotalRows; - -void print_result(TAOS_RES* res, int blockFetch) { - TAOS_ROW row = NULL; - int num_fields = taos_num_fields(res); - TAOS_FIELD* fields = taos_fetch_fields(res); - int nRows = 0; - - if (blockFetch) { - nRows = taos_fetch_block(res, &row); - //for (int i = 0; i < nRows; i++) { - // taos_print_row(buf, row + i, fields, num_fields); - // puts(buf); - //} - } else { - while ((row = taos_fetch_row(res))) { - char buf[4096] = {0}; - taos_print_row(buf, row, fields, num_fields); - puts(buf); - nRows++; - } - } - - nTotalRows += nRows; - printf("%d rows consumed.\n", nRows); -} - - -void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { - print_result(res, *(int*)param); -} - - -void check_row_count(int line, TAOS_RES* res, int expected) { - int actual = 0; - TAOS_ROW row; - while ((row = taos_fetch_row(res))) { - actual++; - } - if (actual != expected) { - printf("line %d: row count mismatch, expected: %d, actual: %d\n", line, expected, actual); - } else { - printf("line %d: %d rows consumed as expected\n", line, actual); - } -} - - -void do_query(TAOS* taos, const char* sql) { - TAOS_RES* res = taos_query(taos, sql); - taos_free_result(res); -} - - -void run_test(TAOS* taos) { - do_query(taos, "drop database if exists test;"); - - usleep(100000); - do_query(taos, "create database test;"); - usleep(100000); - do_query(taos, "use test;"); - - usleep(100000); - do_query(taos, "create table meters(ts timestamp, a int) tags(area int);"); - - do_query(taos, "create table t0 using meters tags(0);"); - do_query(taos, "create table t1 using meters tags(1);"); - do_query(taos, "create table t2 using meters tags(2);"); - do_query(taos, "create table t3 using meters tags(3);"); - do_query(taos, "create table t4 using meters tags(4);"); - do_query(taos, "create table t5 using meters tags(5);"); - do_query(taos, "create table t6 using meters tags(6);"); - do_query(taos, "create table t7 using meters tags(7);"); - do_query(taos, "create table t8 using meters tags(8);"); - do_query(taos, "create table t9 using meters tags(9);"); - - do_query(taos, "insert into t0 values('2020-01-01 00:00:00.000', 0);"); - do_query(taos, "insert into t0 values('2020-01-01 00:01:00.000', 0);"); - do_query(taos, "insert into t0 values('2020-01-01 00:02:00.000', 0);"); - do_query(taos, "insert into t1 values('2020-01-01 00:00:00.000', 0);"); - do_query(taos, "insert into t1 values('2020-01-01 00:01:00.000', 0);"); - do_query(taos, "insert into t1 values('2020-01-01 00:02:00.000', 0);"); - do_query(taos, "insert into t1 values('2020-01-01 00:03:00.000', 0);"); - do_query(taos, "insert into t2 values('2020-01-01 00:00:00.000', 0);"); - do_query(taos, "insert into t2 values('2020-01-01 00:01:00.000', 0);"); - do_query(taos, "insert into t2 values('2020-01-01 00:01:01.000', 0);"); - do_query(taos, "insert into t2 values('2020-01-01 00:01:02.000', 0);"); - do_query(taos, "insert into t3 values('2020-01-01 00:01:02.000', 0);"); - do_query(taos, "insert into t4 values('2020-01-01 00:01:02.000', 0);"); - do_query(taos, "insert into t5 values('2020-01-01 00:01:02.000', 0);"); - do_query(taos, "insert into t6 values('2020-01-01 00:01:02.000', 0);"); - do_query(taos, "insert into t7 values('2020-01-01 00:01:02.000', 0);"); - do_query(taos, "insert into t8 values('2020-01-01 00:01:02.000', 0);"); - do_query(taos, "insert into t9 values('2020-01-01 00:01:02.000', 0);"); - - // super tables subscription - usleep(1000000); - - TAOS_SUB* tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0); - TAOS_RES* res = taos_consume(tsub); - check_row_count(__LINE__, res, 18); - - res = taos_consume(tsub); - check_row_count(__LINE__, res, 0); - - do_query(taos, "insert into t0 values('2020-01-01 00:02:00.001', 0);"); - do_query(taos, "insert into t8 values('2020-01-01 00:01:03.000', 0);"); - res = taos_consume(tsub); - check_row_count(__LINE__, res, 2); - - do_query(taos, "insert into t2 values('2020-01-01 00:01:02.001', 0);"); - do_query(taos, "insert into t1 values('2020-01-01 00:03:00.001', 0);"); - res = taos_consume(tsub); - check_row_count(__LINE__, res, 2); - - do_query(taos, "insert into t1 values('2020-01-01 00:03:00.002', 0);"); - res = taos_consume(tsub); - check_row_count(__LINE__, res, 1); - - // keep progress information and restart subscription - taos_unsubscribe(tsub, 1); - do_query(taos, "insert into t0 values('2020-01-01 00:04:00.000', 0);"); - tsub = taos_subscribe(taos, 1, "test", "select * from meters;", NULL, NULL, 0); - res = taos_consume(tsub); - check_row_count(__LINE__, res, 24); - - // keep progress information and continue previous subscription - taos_unsubscribe(tsub, 1); - tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0); - res = taos_consume(tsub); - check_row_count(__LINE__, res, 0); - - // don't keep progress information and continue previous subscription - taos_unsubscribe(tsub, 0); - tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0); - res = taos_consume(tsub); - check_row_count(__LINE__, res, 24); - - // single meter subscription - - taos_unsubscribe(tsub, 0); - tsub = taos_subscribe(taos, 0, "test", "select * from t0;", NULL, NULL, 0); - res = taos_consume(tsub); - check_row_count(__LINE__, res, 5); - - res = taos_consume(tsub); - check_row_count(__LINE__, res, 0); - - do_query(taos, "insert into t0 values('2020-01-01 00:04:00.001', 0);"); - res = taos_consume(tsub); - check_row_count(__LINE__, res, 1); - - taos_unsubscribe(tsub, 0); -} - - -int main(int argc, char *argv[]) { - const char* host = "127.0.0.1"; - const char* user = "root"; - const char* passwd = "taosdata"; - const char* sql = "select * from meters;"; - const char* topic = "test-multiple"; - int async = 1, restart = 0, keep = 1, test = 0, blockFetch = 0; - - for (int i = 1; i < argc; i++) { - if (strncmp(argv[i], "-h=", 3) == 0) { - host = argv[i] + 3; - continue; - } - if (strncmp(argv[i], "-u=", 3) == 0) { - user = argv[i] + 3; - continue; - } - if (strncmp(argv[i], "-p=", 3) == 0) { - passwd = argv[i] + 3; - continue; - } - if (strcmp(argv[i], "-sync") == 0) { - async = 0; - continue; - } - if (strcmp(argv[i], "-restart") == 0) { - restart = 1; - continue; - } - if (strcmp(argv[i], "-single") == 0) { - sql = "select * from t0;"; - topic = "test-single"; - continue; - } - if (strcmp(argv[i], "-nokeep") == 0) { - keep = 0; - continue; - } - if (strncmp(argv[i], "-sql=", 5) == 0) { - sql = argv[i] + 5; - topic = "test-custom"; - continue; - } - if (strcmp(argv[i], "-test") == 0) { - test = 1; - continue; - } - if (strcmp(argv[i], "-block-fetch") == 0) { - blockFetch = 1; - continue; - } - } - - TAOS* taos = taos_connect(host, user, passwd, "", 0); - if (taos == NULL) { - printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); - exit(1); - } - - if (test) { - run_test(taos); - taos_close(taos); - exit(0); - } - - taos_select_db(taos, "test"); - TAOS_SUB* tsub = NULL; - if (async) { - // create an asynchronized subscription, the callback function will be called every 1s - tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000); - } else { - // create an synchronized subscription, need to call 'taos_consume' manually - tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0); - } - - if (tsub == NULL) { - printf("failed to create subscription.\n"); - exit(0); - } - - if (async) { - getchar(); - } else while(1) { - TAOS_RES* res = taos_consume(tsub); - if (res == NULL) { - printf("failed to consume data."); - break; - } else { - print_result(res, blockFetch); - getchar(); - } - } - - printf("total rows consumed: %d\n", nTotalRows); - taos_unsubscribe(tsub, keep); - taos_close(taos); - - return 0; -} diff --git a/example/src/tmq.c b/examples/c/tmq.c similarity index 84% rename from example/src/tmq.c rename to examples/c/tmq.c index 913096ee90294cf65ba81d605ed3e7d4f2fa803c..2e8aa21da7a2bdd83e4a995beccb99ac40228a48 100644 --- a/example/src/tmq.c +++ b/examples/c/tmq.c @@ -24,6 +24,7 @@ static void msg_process(TAOS_RES* msg) { char buf[1024]; /*memset(buf, 0, 1024);*/ printf("topic: %s\n", tmq_get_topic_name(msg)); + printf("db: %s\n", tmq_get_db_name(msg)); printf("vg: %d\n", tmq_get_vgroup_id(msg)); while (1) { TAOS_ROW row = taos_fetch_row(msg); @@ -106,7 +107,7 @@ int32_t create_topic() { } taos_free_result(pRes); - /*pRes = taos_query(pConn, "create topic topic_ctb_column as abc1");*/ + /*pRes = taos_query(pConn, "create topic topic_ctb_column as database abc1");*/ pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1"); if (taos_errno(pRes) != 0) { printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes)); @@ -165,7 +166,6 @@ tmq_t* build_consumer() { tmq_conf_set(conf, "group.id", "tg2"); tmq_conf_set(conf, "td.connect.user", "root"); tmq_conf_set(conf, "td.connect.pass", "taosdata"); - /*tmq_conf_set(conf, "td.connect.db", "abc1");*/ tmq_conf_set(conf, "msg.with.table.name", "true"); tmq_conf_set(conf, "enable.auto.commit", "false"); tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); @@ -191,20 +191,18 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) { return; } int32_t cnt = 0; - /*clock_t startTime = clock();*/ while (running) { TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 0); if (tmqmessage) { cnt++; + msg_process(tmqmessage); + /*if (cnt >= 2) break;*/ /*printf("get data\n");*/ - /*msg_process(tmqmessage);*/ taos_free_result(tmqmessage); /*} else {*/ /*break;*/ } } - /*clock_t endTime = clock();*/ - /*printf("log cnt: %d %f s\n", cnt, (double)(endTime - startTime) / CLOCKS_PER_SEC);*/ err = tmq_consumer_close(tmq); if (err) @@ -253,39 +251,6 @@ void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) { fprintf(stderr, "%% Consumer closed\n"); } -void perf_loop(tmq_t* tmq, tmq_list_t* topics) { - tmq_resp_err_t err; - - if ((err = tmq_subscribe(tmq, topics))) { - fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(err)); - printf("subscribe err\n"); - return; - } - int32_t batchCnt = 0; - int32_t skipLogNum = 0; - clock_t startTime = clock(); - while (running) { - TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 500); - if (tmqmessage) { - batchCnt++; - /*skipLogNum += tmqGetSkipLogNum(tmqmessage);*/ - /*msg_process(tmqmessage);*/ - taos_free_result(tmqmessage); - } else { - break; - } - } - clock_t endTime = clock(); - printf("log batch cnt: %d, skip log cnt: %d, time used:%f s\n", batchCnt, skipLogNum, - (double)(endTime - startTime) / CLOCKS_PER_SEC); - - err = tmq_consumer_close(tmq); - if (err) - fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(err)); - else - fprintf(stderr, "%% Consumer closed\n"); -} - int main(int argc, char* argv[]) { if (argc > 1) { printf("env init\n"); @@ -296,7 +261,6 @@ int main(int argc, char* argv[]) { } tmq_t* tmq = build_consumer(); tmq_list_t* topic_list = build_topic_list(); - /*perf_loop(tmq, topic_list);*/ - /*basic_consume_loop(tmq, topic_list);*/ - sync_consume_loop(tmq, topic_list); + basic_consume_loop(tmq, topic_list); + /*sync_consume_loop(tmq, topic_list);*/ } diff --git a/include/client/taos.h b/include/client/taos.h index 0b8c67aa794363ff851c69e5848978c78c6a4abc..b65091f52bdd218138891970f079158033cb2d69 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -85,6 +85,14 @@ typedef struct taosField { int32_t bytes; } TAOS_FIELD; +typedef struct TAOS_FIELD_E { + char name[65]; + int8_t type; + uint8_t precision; + uint8_t scale; + int32_t bytes; +} TAOS_FIELD_E; + #ifdef WINDOWS #define DLL_EXPORT __declspec(dllexport) #else @@ -134,7 +142,10 @@ DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos); DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length); DLL_EXPORT int taos_stmt_set_tbname_tags(TAOS_STMT *stmt, const char *name, TAOS_MULTI_BIND *tags); DLL_EXPORT int taos_stmt_set_tbname(TAOS_STMT *stmt, const char *name); +DLL_EXPORT int taos_stmt_set_tags(TAOS_STMT *stmt, TAOS_MULTI_BIND *tags); DLL_EXPORT int taos_stmt_set_sub_tbname(TAOS_STMT *stmt, const char *name); +DLL_EXPORT int taos_stmt_get_tag_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields); +DLL_EXPORT int taos_stmt_get_col_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields); DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert); DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums); @@ -230,7 +241,7 @@ DLL_EXPORT const char *tmq_err2str(tmq_resp_err_t); DLL_EXPORT tmq_resp_err_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); DLL_EXPORT tmq_resp_err_t tmq_unsubscribe(tmq_t *tmq); DLL_EXPORT tmq_resp_err_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); -DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t wait_time); +DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); DLL_EXPORT tmq_resp_err_t tmq_consumer_close(tmq_t *tmq); DLL_EXPORT tmq_resp_err_t tmq_commit_sync(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets); DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets, tmq_commit_cb *cb, void *param); @@ -258,6 +269,7 @@ DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_comm /* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */ DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); +DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res); diff --git a/include/common/taosdef.h b/include/common/taosdef.h index d39c7a121593e6feeb5cfbf104d07642bdbfaff7..516df71b0b886872fc1676bb058c9dc91ea9c3cb 100644 --- a/include/common/taosdef.h +++ b/include/common/taosdef.h @@ -97,6 +97,7 @@ extern char *qtypeStr[]; #undef TD_DEBUG_PRINT_ROW #undef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS +#undef TD_DEBUG_PRINT_TAG #ifdef __cplusplus } diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 45745403f3eec74cea7febbadf06b029c226e9a2..88fa0e728f397006759e296cf1e3533816ee540f 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -105,12 +105,14 @@ typedef struct SColumnInfoData { } SColumnInfoData; typedef struct SQueryTableDataCond { - STimeWindow twindow; + //STimeWindow twindow; int32_t order; // desc|asc order to iterate the data block int32_t numOfCols; SColumnInfo *colList; bool loadExternalRows; // load external rows or not int32_t type; // data block load type: + int32_t numOfTWindows; + STimeWindow *twindows; } SQueryTableDataCond; void* blockDataDestroy(SSDataBlock* pBlock); diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index b6af1ee7a6c486e4cd307d3458286f61ce162174..66b81efc5b32b961de01fce1dbe5a5a6cee808ef 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -227,10 +227,13 @@ int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n); SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData); +void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress); +const char* blockCompressDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData); + void blockDebugShowData(const SArray* dataBlocks); int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, - tb_uid_t uid, tb_uid_t suid); + tb_uid_t suid); SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid, const char* stbFullName, int32_t vgId); @@ -246,57 +249,8 @@ static FORCE_INLINE int32_t blockCompressColData(SColumnInfoData* pColRes, int32 colSize + COMP_OVERFLOW_BYTES, compressed, NULL, 0); } -static FORCE_INLINE void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, - int8_t needCompress) { - int32_t* actualLen = (int32_t*)data; - data += sizeof(int32_t); - - uint64_t* groupId = (uint64_t*)data; - data += sizeof(uint64_t); - - int32_t* colSizes = (int32_t*)data; - data += numOfCols * sizeof(int32_t); - - *dataLen = (numOfCols * sizeof(int32_t) + sizeof(uint64_t) + sizeof(int32_t)); - - int32_t numOfRows = pBlock->info.rows; - for (int32_t col = 0; col < numOfCols; ++col) { - SColumnInfoData* pColRes = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, col); - - // copy the null bitmap - if (IS_VAR_DATA_TYPE(pColRes->info.type)) { - size_t metaSize = numOfRows * sizeof(int32_t); - memcpy(data, pColRes->varmeta.offset, metaSize); - data += metaSize; - (*dataLen) += metaSize; - } else { - int32_t len = BitmapLen(numOfRows); - memcpy(data, pColRes->nullbitmap, len); - data += len; - (*dataLen) += len; - } - - if (needCompress) { - colSizes[col] = blockCompressColData(pColRes, numOfRows, data, needCompress); - data += colSizes[col]; - (*dataLen) += colSizes[col]; - } else { - colSizes[col] = colDataGetLength(pColRes, numOfRows); - (*dataLen) += colSizes[col]; - memmove(data, pColRes->pData, colSizes[col]); - data += colSizes[col]; - } - - colSizes[col] = htonl(colSizes[col]); - } - - *actualLen = *dataLen; - *groupId = pBlock->info.groupId; -} - #ifdef __cplusplus } #endif #endif /*_TD_COMMON_EP_H_*/ - diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index ef931ed3b1c52b7bdc9d12da77f3bdc8ad1f7837..073d796717013feccf61867078215503c443bc37 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -18,6 +18,7 @@ #include "os.h" #include "talgo.h" +#include "tarray.h" #include "tencode.h" #include "ttypes.h" #include "tutil.h" @@ -29,6 +30,7 @@ extern "C" { typedef struct SSchema SSchema; typedef struct STColumn STColumn; typedef struct STSchema STSchema; +typedef struct SValue SValue; typedef struct SColVal SColVal; typedef struct STSRow2 STSRow2; typedef struct STSRowBuilder STSRowBuilder; @@ -39,32 +41,38 @@ typedef struct STag STag; int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t nCols, STSchema **ppTSchema); void tTSchemaDestroy(STSchema *pTSchema); -// SColVal -#define ColValNONE ((SColVal){.type = COL_VAL_NONE, .nData = 0, .pData = NULL}) -#define ColValNULL ((SColVal){.type = COL_VAL_NULL, .nData = 0, .pData = NULL}) -#define ColValDATA(nData, pData) ((SColVal){.type = COL_VAL_DATA, .nData = (nData), .pData = (pData)}) - // STSRow2 +#define COL_VAL_NONE(CID) ((SColVal){.cid = (CID), .isNone = 1}) +#define COL_VAL_NULL(CID) ((SColVal){.cid = (CID), .isNull = 1}) +#define COL_VAL_VALUE(CID, V) ((SColVal){.cid = (CID), .value = (V)}) + +int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, STSRow2 **ppRow); +int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow); +void tTSRowFree(STSRow2 *pRow); +void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal); +int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray); int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow); int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow); -int32_t tTSRowDup(const STSRow2 *pRow, STSRow2 **ppRow); -void tTSRowFree(STSRow2 *pRow); -int32_t tTSRowGet(const STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal); // STSRowBuilder -int32_t tTSRowBuilderInit(STSRowBuilder *pBuilder, int32_t sver, int32_t nCols, SSchema *pSchema); -void tTSRowBuilderClear(STSRowBuilder *pBuilder); -void tTSRowBuilderReset(STSRowBuilder *pBuilder); -int32_t tTSRowBuilderPut(STSRowBuilder *pBuilder, int32_t cid, uint8_t *pData, uint32_t nData); -int32_t tTSRowBuilderGetRow(STSRowBuilder *pBuilder, const STSRow2 **ppRow); +#define tsRowBuilderInit() ((STSRowBuilder){0}) +#define tsRowBuilderClear(B) \ + do { \ + if ((B)->pBuf) { \ + taosMemoryFree((B)->pBuf); \ + } \ + } while (0) // STag -int32_t tTagNew(STagVal *pTagVals, int16_t nTag, STag **ppTag); +int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag); void tTagFree(STag *pTag); -int32_t tTagSet(STag *pTag, SSchema *pSchema, int32_t nCols, int iCol, uint8_t *pData, uint32_t nData, STag **ppTag); -void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, uint32_t *nData); +bool tTagGet(const STag *pTag, STagVal *pTagVal); +char *tTagValToData(const STagVal *pTagVal, bool isJson); int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag); int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag); +int32_t tTagToValArray(const STag *pTag, SArray **ppArray); +void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove +void debugCheckTags(STag *pTag); // TODO: remove // STRUCT ================= struct STColumn { @@ -87,7 +95,9 @@ struct STSchema { #define TSROW_HAS_NONE ((uint8_t)0x1) #define TSROW_HAS_NULL ((uint8_t)0x2U) #define TSROW_HAS_VAL ((uint8_t)0x4U) -#define TSROW_KV_ROW ((uint8_t)0x10U) +#define TSROW_KV_SMALL ((uint8_t)0x10U) +#define TSROW_KV_MID ((uint8_t)0x20U) +#define TSROW_KV_BIG ((uint8_t)0x40U) struct STSRow2 { TSKEY ts; uint8_t flags; @@ -97,33 +107,65 @@ struct STSRow2 { }; struct STSRowBuilder { - STSchema *pTSchema; - int32_t szBitMap1; - int32_t szBitMap2; - int32_t szKVBuf; - uint8_t *pKVBuf; - int32_t szTPBuf; - uint8_t *pTPBuf; - int32_t iCol; - int32_t vlenKV; - int32_t vlenTP; - STSRow2 row; + STSRow2 tsRow; + int32_t szBuf; + uint8_t *pBuf; +}; + +struct SValue { + union { + int8_t i8; // TSDB_DATA_TYPE_BOOL||TSDB_DATA_TYPE_TINYINT + uint8_t u8; // TSDB_DATA_TYPE_UTINYINT + int16_t i16; // TSDB_DATA_TYPE_SMALLINT + uint16_t u16; // TSDB_DATA_TYPE_USMALLINT + int32_t i32; // TSDB_DATA_TYPE_INT + uint32_t u32; // TSDB_DATA_TYPE_UINT + int64_t i64; // TSDB_DATA_TYPE_BIGINT + uint64_t u64; // TSDB_DATA_TYPE_UBIGINT + TSKEY ts; // TSDB_DATA_TYPE_TIMESTAMP + float f; // TSDB_DATA_TYPE_FLOAT + double d; // TSDB_DATA_TYPE_DOUBLE + struct { + uint32_t nData; + uint8_t *pData; + }; + }; }; -typedef enum { COL_VAL_NONE = 0, COL_VAL_NULL = 1, COL_VAL_DATA = 2 } EColValT; struct SColVal { - EColValT type; - uint32_t nData; - uint8_t *pData; + int16_t cid; + int8_t isNone; + int8_t isNull; + SValue value; }; +#pragma pack(push, 1) struct STagVal { - int16_t cid; - int8_t type; - uint32_t nData; - uint8_t *pData; + union { + int16_t cid; + char *pKey; + }; + int8_t type; + union { + int64_t i64; + struct { + uint32_t nData; + uint8_t *pData; + }; + }; }; +#define TD_TAG_JSON ((int8_t)0x40) // distinguish JSON string and JSON value with the highest bit +#define TD_TAG_LARGE ((int8_t)0x20) +struct STag { + int8_t flags; + int16_t len; + int16_t nTag; + int32_t ver; + int8_t idx[]; +}; +#pragma pack(pop) + #if 1 //================================================================================================================================================ // Imported since 3.0 and use bitmap to demonstrate None/Null/Norm, while use Null/Norm below 3.0 without of bitmap. #define TD_SUPPORT_BITMAP @@ -366,109 +408,6 @@ SDataCols *tdFreeDataCols(SDataCols *pCols); int32_t tdMergeDataCols(SDataCols *target, SDataCols *source, int32_t rowsToMerge, int32_t *pOffset, bool update, TDRowVerT maxVer); -// ----------------- K-V data row structure -/* |<-------------------------------------- len -------------------------------------------->| - * |<----- header ----->|<--------------------------- body -------------------------------->| - * +----------+----------+---------------------------------+---------------------------------+ - * | uint16_t | int16_t | | | - * +----------+----------+---------------------------------+---------------------------------+ - * | len | ncols | cols index | data part | - * +----------+----------+---------------------------------+---------------------------------+ - */ -typedef void *SKVRow; - -typedef struct { - int16_t colId; - uint16_t offset; -} SColIdx; - -#define TD_KV_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t)) - -#define kvRowLen(r) (*(uint16_t *)(r)) -#define kvRowNCols(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(uint16_t))) -#define kvRowSetLen(r, len) kvRowLen(r) = (len) -#define kvRowSetNCols(r, n) kvRowNCols(r) = (n) -#define kvRowColIdx(r) (SColIdx *)POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE) -#define kvRowValues(r) POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * kvRowNCols(r)) -#define kvRowCpy(dst, r) memcpy((dst), (r), kvRowLen(r)) -#define kvRowColVal(r, colIdx) POINTER_SHIFT(kvRowValues(r), (colIdx)->offset) -#define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i)) -#define kvRowFree(r) taosMemoryFreeClear(r) -#define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r)) -#define kvRowValLen(r) (kvRowLen(r) - TD_KV_ROW_HEAD_SIZE - sizeof(SColIdx) * kvRowNCols(r)) -#define kvRowTKey(r) (*(TKEY *)(kvRowValues(r))) -#define kvRowKey(r) tdGetKey(kvRowTKey(r)) -#define kvRowKeys(r) POINTER_SHIFT(r, *(uint16_t *)POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE + sizeof(int16_t))) -#define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r)) - -SKVRow tdKVRowDup(SKVRow row); -int32_t tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value); -int32_t tdEncodeKVRow(void **buf, SKVRow row); -void *tdDecodeKVRow(void *buf, SKVRow *row); -void tdSortKVRowByColIdx(SKVRow row); - -static FORCE_INLINE int32_t comparTagId(const void *key1, const void *key2) { - if (*(int16_t *)key1 > ((SColIdx *)key2)->colId) { - return 1; - } else if (*(int16_t *)key1 < ((SColIdx *)key2)->colId) { - return -1; - } else { - return 0; - } -} - -static FORCE_INLINE void *tdGetKVRowValOfCol(const SKVRow row, int16_t colId) { - void *ret = taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_EQ); - if (ret == NULL) return NULL; - return kvRowColVal(row, (SColIdx *)ret); -} - -static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) { - return taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_EQ); -} - -// ----------------- K-V data row builder -typedef struct { - int16_t tCols; - int16_t nCols; - SColIdx *pColIdx; - uint16_t alloc; - uint16_t size; - void *buf; -} SKVRowBuilder; - -int32_t tdInitKVRowBuilder(SKVRowBuilder *pBuilder); -void tdDestroyKVRowBuilder(SKVRowBuilder *pBuilder); -void tdResetKVRowBuilder(SKVRowBuilder *pBuilder); -SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder); - -static FORCE_INLINE int32_t tdAddColToKVRow(SKVRowBuilder *pBuilder, col_id_t colId, const void *value, int32_t tlen) { - if (pBuilder->nCols >= pBuilder->tCols) { - pBuilder->tCols *= 2; - SColIdx *pColIdx = (SColIdx *)taosMemoryRealloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); - if (pColIdx == NULL) return -1; - pBuilder->pColIdx = pColIdx; - } - - pBuilder->pColIdx[pBuilder->nCols].colId = colId; - pBuilder->pColIdx[pBuilder->nCols].offset = pBuilder->size; - - pBuilder->nCols++; - - if (tlen > pBuilder->alloc - pBuilder->size) { - while (tlen > pBuilder->alloc - pBuilder->size) { - pBuilder->alloc *= 2; - } - void *buf = taosMemoryRealloc(pBuilder->buf, pBuilder->alloc); - if (buf == NULL) return -1; - pBuilder->buf = buf; - } - - memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen); - pBuilder->size += tlen; - - return 0; -} #endif #ifdef __cplusplus diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 2a4ef565dd1c6b6742446adee2daf953665b99e5..30ae6c2adb49a811803d04309f43f3068065269c 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -45,6 +45,8 @@ extern bool tsEnableSlaveQuery; extern bool tsPrintAuth; extern int64_t tsTickPerMin[3]; +extern int32_t tsCountAlwaysReturnValue; + // multi-process extern int32_t tsMultiProcess; extern int32_t tsMnodeShmSize; @@ -102,7 +104,6 @@ extern int32_t tsMaxStreamComputDelay; extern int32_t tsStreamCompStartDelay; extern int32_t tsRetryStreamCompDelay; extern float tsStreamComputDelayRatio; // the delayed computing ration of the whole time window -extern int32_t tsProjectExecInterval; extern int64_t tsMaxRetentWindow; // build info diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 647da79e563a9d697c556b73c6aad2b701516fb2..e0dcfbd54442854c991f03510b1e7ba78fc9032b 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -244,7 +244,7 @@ typedef struct { const void* pMsg; } SSubmitMsgIter; -int32_t tInitSubmitMsgIter(SSubmitReq* pMsg, SSubmitMsgIter* pIter); +int32_t tInitSubmitMsgIter(const SSubmitReq* pMsg, SSubmitMsgIter* pIter); int32_t tGetSubmitMsgNext(SSubmitMsgIter* pIter, SSubmitBlk** pPBlock); int32_t tInitSubmitBlkIter(SSubmitMsgIter* pMsgIter, SSubmitBlk* pBlock, SSubmitBlkIter* pIter); STSRow* tGetSubmitBlkNext(SSubmitBlkIter* pIter); @@ -287,7 +287,7 @@ typedef struct SSchema { char name[TSDB_COL_NAME_LEN]; } SSchema; -#define COL_IS_SET(FLG) ((FLG) & (COL_SET_VAL | COL_SET_NULL) != 0) +#define COL_IS_SET(FLG) (((FLG) & (COL_SET_VAL | COL_SET_NULL)) != 0) #define COL_CLR_SET(FLG) ((FLG) &= (~(COL_SET_VAL | COL_SET_NULL))) #define IS_BSMA_ON(s) (((s)->flags & 0x01) == COL_SMA_ON) @@ -479,12 +479,8 @@ int32_t tDecodeSEpSet(SDecoder* pDecoder, SEpSet* pEp); int32_t taosEncodeSEpSet(void** buf, const SEpSet* pEp); void* taosDecodeSEpSet(const void* buf, SEpSet* pEp); -typedef struct { - SEpSet epSet; -} SMEpSet; - -int32_t tSerializeSMEpSet(void* buf, int32_t bufLen, SMEpSet* pReq); -int32_t tDeserializeSMEpSet(void* buf, int32_t buflen, SMEpSet* pReq); +int32_t tSerializeSEpSet(void* buf, int32_t bufLen, const SEpSet* pEpset); +int32_t tDeserializeSEpSet(void* buf, int32_t buflen, SEpSet* pEpset); typedef struct { int8_t connType; @@ -575,13 +571,6 @@ int32_t tSerializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pR int32_t tDeserializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pRsp); void tFreeSGetUserAuthRsp(SGetUserAuthRsp* pRsp); -typedef struct { - int16_t colId; // column id - int16_t colIndex; // column index in colList if it is a normal column or index in tagColList if a tag - int16_t flag; // denote if it is a tag or a normal column - char name[TSDB_DB_FNAME_LEN]; -} SColIndex; - typedef struct { int16_t lowerRelOptr; int16_t upperRelOptr; @@ -656,6 +645,9 @@ typedef struct { typedef struct { int32_t code; + char tbFName[TSDB_TABLE_FNAME_LEN]; + int32_t sversion; + int32_t tversion; } SQueryTableRsp; int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp); @@ -780,6 +772,7 @@ typedef struct { int8_t cacheLastRow; int32_t numOfRetensions; SArray* pRetensions; + int8_t schemaless; } SDbCfgRsp; int32_t tSerializeSDbCfgRsp(void* buf, int32_t bufLen, const SDbCfgRsp* pRsp); @@ -792,19 +785,24 @@ typedef struct { int32_t tSerializeSQnodeListReq(void* buf, int32_t bufLen, SQnodeListReq* pReq); int32_t tDeserializeSQnodeListReq(void* buf, int32_t bufLen, SQnodeListReq* pReq); +typedef struct SQueryNodeAddr { + int32_t nodeId; // vgId or qnodeId + SEpSet epSet; +} SQueryNodeAddr; + +typedef struct { + SQueryNodeAddr addr; + uint64_t load; +} SQueryNodeLoad; + typedef struct { - SArray* addrsList; // SArray + SArray* qnodeList; // SArray } SQnodeListRsp; int32_t tSerializeSQnodeListRsp(void* buf, int32_t bufLen, SQnodeListRsp* pRsp); int32_t tDeserializeSQnodeListRsp(void* buf, int32_t bufLen, SQnodeListRsp* pRsp); void tFreeSQnodeListRsp(SQnodeListRsp* pRsp); -typedef struct SQueryNodeAddr { - int32_t nodeId; // vgId or qnodeId - SEpSet epSet; -} SQueryNodeAddr; - typedef struct { SArray* pArray; // Array of SUseDbRsp } SUseDbBatchRsp; @@ -927,6 +925,20 @@ typedef struct { int32_t syncState; } SMnodeLoad; +typedef struct { + int32_t dnodeId; + int64_t numOfProcessedQuery; + int64_t numOfProcessedCQuery; + int64_t numOfProcessedFetch; + int64_t numOfProcessedDrop; + int64_t numOfProcessedHb; + int64_t cacheDataSize; + int64_t numOfQueryInQueue; + int64_t numOfFetchInQueue; + int64_t timeInQueryQueue; + int64_t timeInFetchQueue; +} SQnodeLoad; + typedef struct { int32_t sver; // software version int64_t dnodeVer; // dnode table version in sdb @@ -938,6 +950,7 @@ typedef struct { int32_t numOfSupportVnodes; char dnodeEp[TSDB_EP_LEN]; SMnodeLoad mload; + SQnodeLoad qload; SClusterCfg clusterCfg; SArray* pVloads; // array of SVnodeLoad } SStatusReq; @@ -982,7 +995,6 @@ typedef struct { typedef struct { int32_t vgId; - int32_t dnodeId; char db[TSDB_DB_FNAME_LEN]; int64_t dbUid; int32_t vgVersion; @@ -1005,15 +1017,14 @@ typedef struct { int8_t compression; int8_t strict; int8_t cacheLastRow; + int8_t isTsma; + int8_t standby; int8_t replica; int8_t selfIndex; SReplica replicas[TSDB_MAX_REPLICA]; int32_t numOfRetensions; SArray* pRetensions; // SRetention - - // for tsma - int8_t isTsma; - + void* pTsma; } SCreateVnodeReq; int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq); @@ -1051,8 +1062,8 @@ typedef struct { int8_t walLevel; int8_t strict; int8_t cacheLastRow; - int8_t replica; int8_t selfIndex; + int8_t replica; SReplica replicas[TSDB_MAX_REPLICA]; } SAlterVnodeReq; @@ -1110,6 +1121,14 @@ typedef struct { SSchema* pSchemas; } STableMetaRsp; +typedef struct { + STableMetaRsp* pMeta; +} SMAlterStbRsp; + +int32_t tEncodeSMAlterStbRsp(SEncoder *pEncoder, const SMAlterStbRsp *pRsp); +int32_t tDecodeSMAlterStbRsp(SDecoder *pDecoder, SMAlterStbRsp *pRsp); +void tFreeSMAlterStbRsp(SMAlterStbRsp* pRsp); + int32_t tSerializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp); int32_t tDeserializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp); void tFreeSTableMetaRsp(STableMetaRsp* pRsp); @@ -1440,8 +1459,10 @@ typedef struct { int32_t code; } STaskDropRsp; -#define STREAM_TRIGGER_AT_ONCE 1 -#define STREAM_TRIGGER_WINDOW_CLOSE 2 +#define STREAM_TRIGGER_AT_ONCE_SMA 0 +#define STREAM_TRIGGER_AT_ONCE 1 +#define STREAM_TRIGGER_WINDOW_CLOSE 2 +#define STREAM_TRIGGER_WINDOW_CLOSE_SMA 3 typedef struct { char name[TSDB_TABLE_FNAME_LEN]; @@ -1473,15 +1494,22 @@ typedef struct { int64_t streamId; } SMVCreateStreamRsp, SMSCreateStreamRsp; +enum { + TOPIC_SUB_TYPE__DB = 1, + TOPIC_SUB_TYPE__TABLE, + TOPIC_SUB_TYPE__COLUMN, +}; + typedef struct { char name[TSDB_TOPIC_FNAME_LEN]; // accout.topic int8_t igExists; - int8_t withTbName; - int8_t withSchema; - int8_t withTag; + int8_t subType; char* sql; - char* ast; - char subscribeDbName[TSDB_DB_NAME_LEN]; + char subDbName[TSDB_DB_FNAME_LEN]; + union { + char* ast; + char subStbName[TSDB_TABLE_FNAME_LEN]; + }; } SCMCreateTopicReq; int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTopicReq* pReq); @@ -1664,6 +1692,10 @@ typedef struct { int32_t tSerializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq); int32_t tDeserializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq); +typedef struct { + int8_t reserved; +} SMDropCgroupRsp; + typedef struct { char name[TSDB_TABLE_FNAME_LEN]; int8_t alterType; @@ -1725,9 +1757,9 @@ int32_t tDecodeSVDropStbReq(SDecoder* pCoder, SVDropStbReq* pReq); #define TD_CREATE_IF_NOT_EXISTS 0x1 typedef struct SVCreateTbReq { int32_t flags; + char* name; tb_uid_t uid; int64_t ctime; - char* name; int32_t ttl; int8_t type; union { @@ -1744,6 +1776,15 @@ typedef struct SVCreateTbReq { int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq); int tDecodeSVCreateTbReq(SDecoder* pCoder, SVCreateTbReq* pReq); +static FORCE_INLINE void tdDestroySVCreateTbReq(SVCreateTbReq* req) { + taosMemoryFreeClear(req->name); + if (req->type == TSDB_CHILD_TABLE) { + taosMemoryFreeClear(req->ctb.pTag); + } else if (req->type == TSDB_NORMAL_TABLE) { + taosMemoryFreeClear(req->ntb.schemaRow.pSchema); + } +} + typedef struct { int32_t nReqs; union { @@ -1841,7 +1882,8 @@ int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq); int32_t tDecodeSVAlterTbReq(SDecoder* pDecoder, SVAlterTbReq* pReq); typedef struct { - int32_t code; + int32_t code; + STableMetaRsp* pMeta; } SVAlterTbRsp; int32_t tEncodeSVAlterTbRsp(SEncoder* pEncoder, const SVAlterTbRsp* pRsp); @@ -1934,6 +1976,7 @@ typedef struct { int8_t killConnection; int8_t align[3]; SEpSet epSet; + SArray* pQnodeList; } SQueryHbRspBasic; typedef struct { @@ -2013,7 +2056,10 @@ static FORCE_INLINE void tFreeClientKv(void* pKv) { static FORCE_INLINE void tFreeClientHbRsp(void* pRsp) { SClientHbRsp* rsp = (SClientHbRsp*)pRsp; - taosMemoryFreeClear(rsp->query); + if (rsp->query) { + taosArrayDestroy(rsp->query->pQnodeList); + taosMemoryFreeClear(rsp->query); + } if (rsp->info) taosArrayDestroyEx(rsp->info, tFreeClientKv); } @@ -2141,11 +2187,6 @@ static FORCE_INLINE void* taosDecodeSMqMsg(void* buf, SMqHbMsg* pMsg) { return buf; } -enum { - TOPIC_SUB_TYPE__DB = 1, - TOPIC_SUB_TYPE__TABLE, -}; - typedef struct { SMsgHead head; int64_t leftForVer; @@ -2165,10 +2206,8 @@ typedef struct { int64_t newConsumerId; char subKey[TSDB_SUBSCRIBE_KEY_LEN]; int8_t subType; - int8_t withTbName; - int8_t withSchema; - int8_t withTag; char* qmsg; + int64_t suid; } SMqRebVgReq; static FORCE_INLINE int32_t tEncodeSMqRebVgReq(void** buf, const SMqRebVgReq* pReq) { @@ -2179,11 +2218,10 @@ static FORCE_INLINE int32_t tEncodeSMqRebVgReq(void** buf, const SMqRebVgReq* pR tlen += taosEncodeFixedI64(buf, pReq->newConsumerId); tlen += taosEncodeString(buf, pReq->subKey); tlen += taosEncodeFixedI8(buf, pReq->subType); - tlen += taosEncodeFixedI8(buf, pReq->withTbName); - tlen += taosEncodeFixedI8(buf, pReq->withSchema); - tlen += taosEncodeFixedI8(buf, pReq->withTag); - if (pReq->subType == TOPIC_SUB_TYPE__TABLE) { + if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) { tlen += taosEncodeString(buf, pReq->qmsg); + } else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) { + tlen += taosEncodeFixedI64(buf, pReq->suid); } return tlen; } @@ -2195,11 +2233,10 @@ static FORCE_INLINE void* tDecodeSMqRebVgReq(const void* buf, SMqRebVgReq* pReq) buf = taosDecodeFixedI64(buf, &pReq->newConsumerId); buf = taosDecodeStringTo(buf, pReq->subKey); buf = taosDecodeFixedI8(buf, &pReq->subType); - buf = taosDecodeFixedI8(buf, &pReq->withTbName); - buf = taosDecodeFixedI8(buf, &pReq->withSchema); - buf = taosDecodeFixedI8(buf, &pReq->withTag); - if (pReq->subType == TOPIC_SUB_TYPE__TABLE) { + if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) { buf = taosDecodeString(buf, &pReq->qmsg); + } else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) { + buf = taosDecodeFixedI64(buf, &pReq->suid); } return (void*)buf; } @@ -2263,6 +2300,7 @@ typedef struct { int8_t intervalUnit; // MACRO: TIME_UNIT_XXX int8_t slidingUnit; // MACRO: TIME_UNIT_XXX int8_t timezoneInt; // sma data expired if timezone changes. + int32_t dstVgId; char indexName[TSDB_INDEX_NAME_LEN]; int32_t exprLen; int32_t tagsFilterLen; @@ -2309,19 +2347,19 @@ typedef struct { STSma* tSma; } STSmaWrapper; -static FORCE_INLINE void tdDestroyTSma(STSma* pSma) { +static FORCE_INLINE void tDestroyTSma(STSma* pSma) { if (pSma) { taosMemoryFreeClear(pSma->expr); taosMemoryFreeClear(pSma->tagsFilter); } } -static FORCE_INLINE void tdDestroyTSmaWrapper(STSmaWrapper* pSW, bool deepCopy) { +static FORCE_INLINE void tDestroyTSmaWrapper(STSmaWrapper* pSW, bool deepCopy) { if (pSW) { if (pSW->tSma) { if (deepCopy) { for (uint32_t i = 0; i < pSW->number; ++i) { - tdDestroyTSma(pSW->tSma + i); + tDestroyTSma(pSW->tSma + i); } } taosMemoryFreeClear(pSW->tSma); @@ -2329,8 +2367,8 @@ static FORCE_INLINE void tdDestroyTSmaWrapper(STSmaWrapper* pSW, bool deepCopy) } } -static FORCE_INLINE void* tdFreeTSmaWrapper(STSmaWrapper* pSW, bool deepCopy) { - tdDestroyTSmaWrapper(pSW, deepCopy); +static FORCE_INLINE void* tFreeTSmaWrapper(STSmaWrapper* pSW, bool deepCopy) { + tDestroyTSmaWrapper(pSW, deepCopy); taosMemoryFreeClear(pSW); return NULL; } @@ -2357,6 +2395,17 @@ static int32_t tDecodeTSmaWrapper(SDecoder* pDecoder, STSmaWrapper* pReq) { return 0; } +typedef struct { + int64_t tsmaIndexUid; + STimeWindow queryWindow; +} SVGetTsmaExpWndsReq; + +typedef struct { + int64_t tsmaIndexUid; + int32_t numExpWnds; + TSKEY* expWndsStartTs; +} SVGetTsmaExpWndsRsp; + typedef struct { int idx; } SMCreateFullTextReq; @@ -2405,7 +2454,7 @@ typedef struct { int32_t epoch; uint64_t reqId; int64_t consumerId; - int64_t waitTime; + int64_t timeout; int64_t currentOffset; } SMqPollReq; @@ -2432,7 +2481,7 @@ static FORCE_INLINE void* tDecodeSMqSubVgEp(void* buf, SMqSubVgEp* pVgEp) { typedef struct { char topic[TSDB_TOPIC_FNAME_LEN]; - int8_t isSchemaAdaptive; + char db[TSDB_DB_FNAME_LEN]; SArray* vgs; // SArray SSchemaWrapper schema; } SMqSubTopicEp; @@ -2440,7 +2489,7 @@ typedef struct { static FORCE_INLINE int32_t tEncodeSMqSubTopicEp(void** buf, const SMqSubTopicEp* pTopicEp) { int32_t tlen = 0; tlen += taosEncodeString(buf, pTopicEp->topic); - tlen += taosEncodeFixedI8(buf, pTopicEp->isSchemaAdaptive); + tlen += taosEncodeString(buf, pTopicEp->db); int32_t sz = taosArrayGetSize(pTopicEp->vgs); tlen += taosEncodeFixedI32(buf, sz); for (int32_t i = 0; i < sz; i++) { @@ -2453,7 +2502,7 @@ static FORCE_INLINE int32_t tEncodeSMqSubTopicEp(void** buf, const SMqSubTopicEp static FORCE_INLINE void* tDecodeSMqSubTopicEp(void* buf, SMqSubTopicEp* pTopicEp) { buf = taosDecodeStringTo(buf, pTopicEp->topic); - buf = taosDecodeFixedI8(buf, &pTopicEp->isSchemaAdaptive); + buf = taosDecodeStringTo(buf, pTopicEp->db); int32_t sz; buf = taosDecodeFixedI32(buf, &sz); pTopicEp->vgs = taosArrayInit(sz, sizeof(SMqSubVgEp)); @@ -2531,6 +2580,12 @@ static FORCE_INLINE void* tDecodeSMqDataBlkRsp(const void* buf, SMqDataBlkRsp* p buf = taosDecodeFixedI8(buf, &pRsp->withTbName); buf = taosDecodeFixedI8(buf, &pRsp->withSchema); buf = taosDecodeFixedI8(buf, &pRsp->withTag); + if (pRsp->withTbName) { + pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void*)); + } + if (pRsp->withSchema) { + pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void*)); + } for (int32_t i = 0; i < pRsp->blockNum; i++) { int32_t bLen = 0; @@ -2540,20 +2595,14 @@ static FORCE_INLINE void* tDecodeSMqDataBlkRsp(const void* buf, SMqDataBlkRsp* p taosArrayPush(pRsp->blockDataLen, &bLen); taosArrayPush(pRsp->blockData, &data); if (pRsp->withSchema) { - pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void*)); SSchemaWrapper* pSW = (SSchemaWrapper*)taosMemoryMalloc(sizeof(SSchemaWrapper)); buf = taosDecodeSSchemaWrapper(buf, pSW); taosArrayPush(pRsp->blockSchema, &pSW); - } else { - pRsp->blockSchema = NULL; } if (pRsp->withTbName) { - pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void*)); char* name = NULL; buf = taosDecodeString(buf, &name); taosArrayPush(pRsp->blockTbName, &name); - } else { - pRsp->blockTbName = NULL; } } } @@ -2620,6 +2669,23 @@ typedef struct { int32_t tEncodeSVSubmitReq(SEncoder* pCoder, const SVSubmitReq* pReq); int32_t tDecodeSVSubmitReq(SDecoder* pCoder, SVSubmitReq* pReq); +// TDMT_VND_DELETE +typedef struct { + TSKEY sKey; + TSKEY eKey; + + // super table + char* stbName; + + // child/normal + char* tbName; +} SVDeleteReq; + +typedef struct { + int32_t code; + // TODO +} SVDeleteRsp; + #pragma pack(pop) #ifdef __cplusplus diff --git a/include/common/tmsgcb.h b/include/common/tmsgcb.h index 9fa657a2a6ad78fdd70ed1b4e2ed816b06780351..e99377f9b4b27871506d1739520060b8caa51417 100644 --- a/include/common/tmsgcb.h +++ b/include/common/tmsgcb.h @@ -38,7 +38,7 @@ typedef enum { QUEUE_MAX, } EQueueType; -typedef int32_t (*PutToQueueFp)(void* pMgmt, SRpcMsg* pMsg); +typedef int32_t (*PutToQueueFp)(void* pMgmt, EQueueType qtype, SRpcMsg* pMsg); typedef int32_t (*GetQueueSizeFp)(void* pMgmt, int32_t vgId, EQueueType qtype); typedef int32_t (*SendReqFp)(const SEpSet* pEpSet, SRpcMsg* pMsg); typedef void (*SendRspFp)(SRpcMsg* pMsg); @@ -50,7 +50,7 @@ typedef void (*ReportStartup)(const char* name, const char* desc); typedef struct { void* mgmt; void* clientRpc; - PutToQueueFp queueFps[QUEUE_MAX]; + PutToQueueFp putToQueueFp; GetQueueSizeFp qsizeFp; SendReqFp sendReqFp; SendRspFp sendRspFp; diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 4687062b180b719cbfa0aea18948d52face10de6..fe9ffda69cb82f4c0d2113218fb44b4889158730 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -70,106 +70,102 @@ enum { // Requests handled by DNODE TD_NEW_MSG_SEG(TDMT_DND_MSG) TD_DEF_MSG_TYPE(TDMT_DND_CREATE_MNODE, "dnode-create-mnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_DND_ALTER_MNODE, "dnode-alter-mnode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_DROP_MNODE, "dnode-drop-mnode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_CREATE_QNODE, "dnode-create-qnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_DND_ALTER_QNODE, "dnode-alter-qnode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_DROP_QNODE, "dnode-drop-qnode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_CREATE_SNODE, "dnode-create-snode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_DND_ALTER_SNODE, "dnode-alter-snode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_DROP_SNODE, "dnode-drop-snode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_CREATE_BNODE, "dnode-create-bnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_DND_ALTER_BNODE, "dnode-alter-bnode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_DROP_BNODE, "dnode-drop-bnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_DND_CREATE_VNODE, "dnode-create-vnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_DND_DROP_VNODE, "dnode-drop-vnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_DND_CONFIG_DNODE, "dnode-config-dnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_DND_SERVER_STATUS, "dnode-server-status", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_DND_NET_TEST, "dnode-net-test", NULL, NULL) - + TD_DEF_MSG_TYPE(TDMT_DND_CREATE_VNODE, "create-vnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_DND_DROP_VNODE, "drop-vnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_DND_SERVER_STATUS, "server-status", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_DND_NET_TEST, "net-test", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_DND_CONFIG_DNODE, "config-dnode", NULL, NULL) + // Requests handled by MNODE TD_NEW_MSG_SEG(TDMT_MND_MSG) - TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "mnode-connect", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_ACCT, "mnode-create-acct", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_ALTER_ACCT, "mnode-alter-acct", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_ACCT, "mnode-drop-acct", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_USER, "mnode-create-user", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_ALTER_USER, "mnode-alter-user", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_USER, "mnode-drop-user", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_GET_USER_AUTH, "mnode-get-user-auth", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_DNODE, "mnode-create-dnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CONFIG_DNODE, "mnode-config-dnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_ALTER_DNODE, "mnode-alter-dnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_DNODE, "mnode-drop-dnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_MNODE, "mnode-create-mnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_ALTER_MNODE, "mnode-alter-mnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_MNODE, "mnode-drop-mnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_QNODE, "mnode-create-qnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_ALTER_QNODE, "mnode-alter-qnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_QNODE, "mnode-drop-qnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_SNODE, "mnode-create-snode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_ALTER_SNODE, "mnode-alter-snode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_SNODE, "mnode-drop-snode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_BNODE, "mnode-create-bnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_ALTER_BNODE, "mnode-alter-bnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_BNODE, "mnode-drop-bnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_DB, "mnode-create-db", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_DB, "mnode-drop-db", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_USE_DB, "mnode-use-db", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_ALTER_DB, "mnode-alter-db", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_SYNC_DB, "mnode-sync-db", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_COMPACT_DB, "mnode-compact-db", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_FUNC, "mnode-create-func", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_RETRIEVE_FUNC, "mnode-retrieve-func", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_FUNC, "mnode-drop-func", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_STB, "mnode-create-stb", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_ALTER_STB, "mnode-alter-stb", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_STB, "mnode-drop-stb", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_SMA, "mnode-create-sma", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_SMA, "mnode-drop-sma", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_TABLE_META, "mnode-table-meta", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_VGROUP_LIST, "mnode-vgroup-list", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_QNODE_LIST, "mnode-qnode-list", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_KILL_QUERY, "mnode-kill-query", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_KILL_CONN, "mnode-kill-conn", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_HEARTBEAT, "mnode-heartbeat", SClientHbBatchReq, SClientHbBatchRsp) - TD_DEF_MSG_TYPE(TDMT_MND_SHOW, "mnode-show", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_SYSTABLE_RETRIEVE, "mnode-systable-retrieve", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_STATUS, "mnode-status", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "mnode-trans-tmr", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_KILL_TRANS, "mnode-kill-trans", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_TELEM_TIMER, "mnode-telem-tmr", SMTimerReq, SMTimerReq) - TD_DEF_MSG_TYPE(TDMT_MND_GRANT, "mnode-grant", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_AUTH, "mnode-auth", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_TOPIC, "mnode-create-topic", SMCreateTopicReq, SMCreateTopicRsp) - TD_DEF_MSG_TYPE(TDMT_MND_ALTER_TOPIC, "mnode-alter-topic", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_TOPIC, "mnode-drop-topic", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_CGROUP, "mnode-drop-cgroup", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_SUBSCRIBE, "mnode-subscribe", SCMSubscribeReq, SCMSubscribeRsp) - TD_DEF_MSG_TYPE(TDMT_MND_MQ_ASK_EP, "mnode-mq-ask-ep", SMqAskEpReq, SMqAskEpRsp) - TD_DEF_MSG_TYPE(TDMT_MND_MQ_TIMER, "mnode-mq-tmr", SMTimerReq, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_LOST, "mnode-mq-consumer-lost", SMqConsumerLostMsg, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_RECOVER, "mnode-mq-consumer-recover", SMqConsumerRecoverMsg, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_MQ_DO_REBALANCE, "mnode-mq-do-rebalance", SMqDoRebalanceMsg, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_MQ_COMMIT_OFFSET, "mnode-mq-commit-offset", SMqCMCommitOffsetReq, SMqCMCommitOffsetRsp) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_STREAM, "mnode-create-stream", SCMCreateStreamReq, SCMCreateStreamRsp) - TD_DEF_MSG_TYPE(TDMT_MND_ALTER_STREAM, "mnode-alter-stream", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_STREAM, "mnode-drop-stream", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_CREATE_INDEX, "mnode-create-index", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_DROP_INDEX, "mnode-drop-index", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_GET_DB_CFG, "mnode-get-db-cfg", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_GET_INDEX, "mnode-get-index", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_APPLY_MSG, "mnode-apply-msg", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_ACCT, "create-acct", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_ALTER_ACCT, "alter-acct", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_ACCT, "drop-acct", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_USER, "create-user", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_ALTER_USER, "alter-user", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_USER, "drop-user", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_GET_USER_AUTH, "get-user-auth", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_DNODE, "create-dnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CONFIG_DNODE, "config-dnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_ALTER_DNODE, "alter-dnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_DNODE, "drop-dnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_MNODE, "create-mnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_ALTER_MNODE, "alter-mnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_MNODE, "drop-mnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_QNODE, "create-qnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_ALTER_QNODE, "alter-qnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_QNODE, "drop-qnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_SNODE, "create-snode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_ALTER_SNODE, "alter-snode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_SNODE, "drop-snode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_BNODE, "create-bnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_ALTER_BNODE, "alter-bnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_BNODE, "drop-bnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_DB, "create-db", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_DB, "drop-db", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_USE_DB, "use-db", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_ALTER_DB, "alter-db", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_SYNC_DB, "sync-db", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_COMPACT_DB, "compact-db", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_FUNC, "create-func", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_RETRIEVE_FUNC, "retrieve-func", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_FUNC, "drop-func", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_STB, "create-stb", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_ALTER_STB, "alter-stb", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_STB, "drop-stb", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_SMA, "create-sma", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_SMA, "drop-sma", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_TABLE_META, "table-meta", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_VGROUP_LIST, "vgroup-list", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_QNODE_LIST, "qnode-list", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_KILL_QUERY, "kill-query", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_KILL_CONN, "kill-conn", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_HEARTBEAT, "heartbeat", SClientHbBatchReq, SClientHbBatchRsp) + TD_DEF_MSG_TYPE(TDMT_MND_SHOW, "show", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_SYSTABLE_RETRIEVE, "systable-retrieve", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_STATUS, "status", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "trans-tmr", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_KILL_TRANS, "kill-trans", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_TELEM_TIMER, "telem-tmr", SMTimerReq, SMTimerReq) + TD_DEF_MSG_TYPE(TDMT_MND_GRANT, "grant", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_AUTH, "auth", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_TOPIC, "create-topic", SMCreateTopicReq, SMCreateTopicRsp) + TD_DEF_MSG_TYPE(TDMT_MND_ALTER_TOPIC, "alter-topic", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_TOPIC, "drop-topic", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_SUBSCRIBE, "subscribe", SCMSubscribeReq, SCMSubscribeRsp) + TD_DEF_MSG_TYPE(TDMT_MND_MQ_ASK_EP, "mq-ask-ep", SMqAskEpReq, SMqAskEpRsp) + TD_DEF_MSG_TYPE(TDMT_MND_MQ_TIMER, "mq-tmr", SMTimerReq, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_LOST, "mq-consumer-lost", SMqConsumerLostMsg, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_RECOVER, "mq-consumer-recover", SMqConsumerRecoverMsg, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_MQ_DO_REBALANCE, "mq-do-rebalance", SMqDoRebalanceMsg, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_MQ_DROP_CGROUP, "mq-drop-cgroup", SMqDropCGroupReq, SMqDropCGroupRsp) + TD_DEF_MSG_TYPE(TDMT_MND_MQ_COMMIT_OFFSET, "mq-commit-offset", SMqCMCommitOffsetReq, SMqCMCommitOffsetRsp) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_STREAM, "create-stream", SCMCreateStreamReq, SCMCreateStreamRsp) + TD_DEF_MSG_TYPE(TDMT_MND_ALTER_STREAM, "alter-stream", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_STREAM, "drop-stream", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_CREATE_INDEX, "create-index", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_DROP_INDEX, "drop-index", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_GET_DB_CFG, "get-db-cfg", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_GET_INDEX, "get-index", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_APPLY_MSG, "apply-msg", NULL, NULL) // Requests handled by VNODE TD_NEW_MSG_SEG(TDMT_VND_MSG) - TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "vnode-submit", SSubmitReq, SSubmitRsp) - TD_DEF_MSG_TYPE(TDMT_VND_QUERY, "vnode-query", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_VND_FETCH, "vnode-fetch", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TABLE, "vnode-create-table", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_VND_ALTER_TABLE, "vnode-alter-table", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_VND_DROP_TABLE, "vnode-drop-table", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_VND_UPDATE_TAG_VAL, "vnode-update-tag-val", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp) + TD_DEF_MSG_TYPE(TDMT_VND_QUERY, "query", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_FETCH, "fetch", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TABLE, "create-table", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_ALTER_TABLE, "alter-table", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_DROP_TABLE, "drop-table", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_UPDATE_TAG_VAL, "update-tag-val", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TABLE_META, "vnode-table-meta", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TABLES_META, "vnode-tables-meta", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CREATE_STB, "vnode-create-stb", SVCreateStbReq, NULL) @@ -181,8 +177,6 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_MQ_DISCONNECT, "vnode-mq-disconnect", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_CHANGE, "vnode-mq-vg-change", SMqRebVgReq, SMqRebVgRsp) TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_DELETE, "vnode-mq-vg-delete", SMqVDeleteReq, SMqVDeleteRsp) - TD_DEF_MSG_TYPE(TDMT_VND_RES_READY, "vnode-res-ready", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_VND_TASKS_STATUS, "vnode-tasks-status", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_TASK, "vnode-cancel-task", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_TASK, "vnode-drop-task", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TOPIC, "vnode-create-topic", NULL, NULL) @@ -207,6 +201,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp) + TD_DEF_MSG_TYPE(TDMT_VND_GET_TSMA_EXP_WNDS, "vnode-get-tsma-expired-windows", SVGetTsmaExpWndsReq, SVGetTsmaExpWndsRsp) TD_DEF_MSG_TYPE(TDMT_VND_SYNC_TIMEOUT, "vnode-sync-timeout", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SYNC_PING, "vnode-sync-ping", NULL, NULL) @@ -223,9 +218,11 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_SYNC_APPLY_MSG, "vnode-sync-apply-msg", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SYNC_CONFIG_CHANGE, "vnode-sync-config-change", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_VND_SYNC_VNODE, "vnode-sync-vnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_VND_ALTER_VNODE, "vnode-alter-vnode", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_VND_COMPACT_VNODE, "vnode-compact-vnode", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "vnode-alter-config", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "vnode-alter-replica", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_VND_COMPACT, "vnode-compact", NULL, NULL) + + TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "vnode-delete-data", SVDeleteReq, SVDeleteRsp) // Requests handled by QNODE TD_NEW_MSG_SEG(TDMT_QND_MSG) @@ -254,6 +251,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_MON_BM_INFO, "monitor-binfo", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MON_VM_LOAD, "monitor-vload", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MON_MM_LOAD, "monitor-mload", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MON_QM_LOAD, "monitor-qload", NULL, NULL) #if defined(TD_MSG_NUMBER_) TDMT_MAX diff --git a/include/common/ttokendef.h b/include/common/ttokendef.h index 2fc524eeac39eefba6ce87c39d7bf4746fd83de1..c3b0e54f3da416ccc2e2b2dbd6c05ec356a50a30 100644 --- a/include/common/ttokendef.h +++ b/include/common/ttokendef.h @@ -127,134 +127,131 @@ #define TK_BLOB 109 #define TK_VARBINARY 110 #define TK_DECIMAL 111 -#define TK_DELAY 112 -#define TK_FILE_FACTOR 113 -#define TK_NK_FLOAT 114 -#define TK_ROLLUP 115 -#define TK_TTL 116 -#define TK_SMA 117 -#define TK_SHOW 118 -#define TK_DATABASES 119 -#define TK_TABLES 120 -#define TK_STABLES 121 -#define TK_MNODES 122 -#define TK_MODULES 123 -#define TK_QNODES 124 -#define TK_FUNCTIONS 125 -#define TK_INDEXES 126 -#define TK_ACCOUNTS 127 -#define TK_APPS 128 -#define TK_CONNECTIONS 129 -#define TK_LICENCE 130 -#define TK_GRANTS 131 -#define TK_QUERIES 132 -#define TK_SCORES 133 -#define TK_TOPICS 134 -#define TK_VARIABLES 135 -#define TK_BNODES 136 -#define TK_SNODES 137 -#define TK_CLUSTER 138 -#define TK_TRANSACTIONS 139 -#define TK_LIKE 140 -#define TK_INDEX 141 -#define TK_FULLTEXT 142 -#define TK_FUNCTION 143 -#define TK_INTERVAL 144 -#define TK_TOPIC 145 -#define TK_AS 146 -#define TK_CGROUP 147 -#define TK_WITH 148 -#define TK_SCHEMA 149 -#define TK_DESC 150 -#define TK_DESCRIBE 151 -#define TK_RESET 152 -#define TK_QUERY 153 -#define TK_CACHE 154 -#define TK_EXPLAIN 155 -#define TK_ANALYZE 156 -#define TK_VERBOSE 157 -#define TK_NK_BOOL 158 -#define TK_RATIO 159 -#define TK_COMPACT 160 -#define TK_VNODES 161 -#define TK_IN 162 -#define TK_OUTPUTTYPE 163 -#define TK_AGGREGATE 164 -#define TK_BUFSIZE 165 -#define TK_STREAM 166 -#define TK_INTO 167 -#define TK_TRIGGER 168 -#define TK_AT_ONCE 169 -#define TK_WINDOW_CLOSE 170 -#define TK_WATERMARK 171 -#define TK_KILL 172 -#define TK_CONNECTION 173 -#define TK_TRANSACTION 174 -#define TK_MERGE 175 -#define TK_VGROUP 176 -#define TK_REDISTRIBUTE 177 -#define TK_SPLIT 178 -#define TK_SYNCDB 179 -#define TK_NULL 180 -#define TK_NK_QUESTION 181 -#define TK_NK_ARROW 182 -#define TK_ROWTS 183 -#define TK_TBNAME 184 -#define TK_QSTARTTS 185 -#define TK_QENDTS 186 -#define TK_WSTARTTS 187 -#define TK_WENDTS 188 -#define TK_WDURATION 189 -#define TK_CAST 190 -#define TK_NOW 191 -#define TK_TODAY 192 -#define TK_TIMEZONE 193 -#define TK_COUNT 194 -#define TK_FIRST 195 -#define TK_LAST 196 -#define TK_LAST_ROW 197 -#define TK_BETWEEN 198 -#define TK_IS 199 -#define TK_NK_LT 200 -#define TK_NK_GT 201 -#define TK_NK_LE 202 -#define TK_NK_GE 203 -#define TK_NK_NE 204 -#define TK_MATCH 205 -#define TK_NMATCH 206 -#define TK_CONTAINS 207 -#define TK_JOIN 208 -#define TK_INNER 209 -#define TK_SELECT 210 -#define TK_DISTINCT 211 -#define TK_WHERE 212 -#define TK_PARTITION 213 -#define TK_BY 214 -#define TK_SESSION 215 -#define TK_STATE_WINDOW 216 -#define TK_SLIDING 217 -#define TK_FILL 218 -#define TK_VALUE 219 -#define TK_NONE 220 -#define TK_PREV 221 -#define TK_LINEAR 222 -#define TK_NEXT 223 -#define TK_GROUP 224 -#define TK_HAVING 225 -#define TK_ORDER 226 -#define TK_SLIMIT 227 -#define TK_SOFFSET 228 -#define TK_LIMIT 229 -#define TK_OFFSET 230 -#define TK_ASC 231 -#define TK_NULLS 232 -#define TK_ID 233 -#define TK_NK_BITNOT 234 -#define TK_INSERT 235 -#define TK_VALUES 236 -#define TK_IMPORT 237 -#define TK_NK_SEMI 238 -#define TK_FILE 239 +#define TK_FILE_FACTOR 112 +#define TK_NK_FLOAT 113 +#define TK_ROLLUP 114 +#define TK_TTL 115 +#define TK_SMA 116 +#define TK_SHOW 117 +#define TK_DATABASES 118 +#define TK_TABLES 119 +#define TK_STABLES 120 +#define TK_MNODES 121 +#define TK_MODULES 122 +#define TK_QNODES 123 +#define TK_FUNCTIONS 124 +#define TK_INDEXES 125 +#define TK_ACCOUNTS 126 +#define TK_APPS 127 +#define TK_CONNECTIONS 128 +#define TK_LICENCE 129 +#define TK_GRANTS 130 +#define TK_QUERIES 131 +#define TK_SCORES 132 +#define TK_TOPICS 133 +#define TK_VARIABLES 134 +#define TK_BNODES 135 +#define TK_SNODES 136 +#define TK_CLUSTER 137 +#define TK_TRANSACTIONS 138 +#define TK_LIKE 139 +#define TK_INDEX 140 +#define TK_FULLTEXT 141 +#define TK_FUNCTION 142 +#define TK_INTERVAL 143 +#define TK_TOPIC 144 +#define TK_AS 145 +#define TK_CONSUMER 146 +#define TK_GROUP 147 +#define TK_DESC 148 +#define TK_DESCRIBE 149 +#define TK_RESET 150 +#define TK_QUERY 151 +#define TK_CACHE 152 +#define TK_EXPLAIN 153 +#define TK_ANALYZE 154 +#define TK_VERBOSE 155 +#define TK_NK_BOOL 156 +#define TK_RATIO 157 +#define TK_COMPACT 158 +#define TK_VNODES 159 +#define TK_IN 160 +#define TK_OUTPUTTYPE 161 +#define TK_AGGREGATE 162 +#define TK_BUFSIZE 163 +#define TK_STREAM 164 +#define TK_INTO 165 +#define TK_TRIGGER 166 +#define TK_AT_ONCE 167 +#define TK_WINDOW_CLOSE 168 +#define TK_WATERMARK 169 +#define TK_KILL 170 +#define TK_CONNECTION 171 +#define TK_TRANSACTION 172 +#define TK_MERGE 173 +#define TK_VGROUP 174 +#define TK_REDISTRIBUTE 175 +#define TK_SPLIT 176 +#define TK_SYNCDB 177 +#define TK_NULL 178 +#define TK_NK_QUESTION 179 +#define TK_NK_ARROW 180 +#define TK_ROWTS 181 +#define TK_TBNAME 182 +#define TK_QSTARTTS 183 +#define TK_QENDTS 184 +#define TK_WSTARTTS 185 +#define TK_WENDTS 186 +#define TK_WDURATION 187 +#define TK_CAST 188 +#define TK_NOW 189 +#define TK_TODAY 190 +#define TK_TIMEZONE 191 +#define TK_COUNT 192 +#define TK_FIRST 193 +#define TK_LAST 194 +#define TK_LAST_ROW 195 +#define TK_BETWEEN 196 +#define TK_IS 197 +#define TK_NK_LT 198 +#define TK_NK_GT 199 +#define TK_NK_LE 200 +#define TK_NK_GE 201 +#define TK_NK_NE 202 +#define TK_MATCH 203 +#define TK_NMATCH 204 +#define TK_CONTAINS 205 +#define TK_JOIN 206 +#define TK_INNER 207 +#define TK_SELECT 208 +#define TK_DISTINCT 209 +#define TK_WHERE 210 +#define TK_PARTITION 211 +#define TK_BY 212 +#define TK_SESSION 213 +#define TK_STATE_WINDOW 214 +#define TK_SLIDING 215 +#define TK_FILL 216 +#define TK_VALUE 217 +#define TK_NONE 218 +#define TK_PREV 219 +#define TK_LINEAR 220 +#define TK_NEXT 221 +#define TK_HAVING 222 +#define TK_ORDER 223 +#define TK_SLIMIT 224 +#define TK_SOFFSET 225 +#define TK_LIMIT 226 +#define TK_OFFSET 227 +#define TK_ASC 228 +#define TK_NULLS 229 +#define TK_ID 230 +#define TK_NK_BITNOT 231 +#define TK_INSERT 232 +#define TK_VALUES 233 +#define TK_IMPORT 234 +#define TK_NK_SEMI 235 +#define TK_FILE 236 #define TK_NK_SPACE 300 #define TK_NK_COMMENT 301 diff --git a/include/common/ttypes.h b/include/common/ttypes.h index 31cdb28690caeb6610d4b5e4ec6307952a0760aa..16c59465cc1f71d1f9e3cc6a2e65d83939247322 100644 --- a/include/common/ttypes.h +++ b/include/common/ttypes.h @@ -30,7 +30,7 @@ typedef uint64_t TDRowVerT; typedef int16_t col_id_t; typedef int8_t col_type_t; typedef int32_t col_bytes_t; -typedef int32_t schema_ver_t; +typedef int32_t schema_ver_t; typedef int32_t func_id_t; #pragma pack(push, 1) @@ -49,8 +49,9 @@ typedef struct { #define varDataCopy(dst, v) memcpy((dst), (void *)(v), varDataTLen(v)) #define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE)) #define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len)) -#define IS_VAR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON)) -#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR)) +#define IS_VAR_DATA_TYPE(t) \ + (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON)) +#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR)) #define varDataNetLen(v) (htons(((VarDataLenT *)(v))[0])) #define varDataNetTLen(v) (sizeof(VarDataLenT) + varDataNetLen(v)) @@ -142,52 +143,56 @@ typedef struct { } \ } while (0) -#define NUM_TO_STRING(_inputType, _input, _outputBytes, _output) \ - do { \ - switch (_inputType) { \ - case TSDB_DATA_TYPE_TINYINT: \ - snprintf(_output, (int32_t)(_outputBytes), "%d", *(int8_t *)(_input)); \ - break; \ - case TSDB_DATA_TYPE_UTINYINT: \ - snprintf(_output, (int32_t)(_outputBytes), "%d", *(uint8_t *)(_input)); \ - break; \ - case TSDB_DATA_TYPE_SMALLINT: \ - snprintf(_output, (int32_t)(_outputBytes), "%d", *(int16_t *)(_input)); \ - break; \ - case TSDB_DATA_TYPE_USMALLINT: \ - snprintf(_output, (int32_t)(_outputBytes), "%d", *(uint16_t *)(_input)); \ - break; \ - case TSDB_DATA_TYPE_TIMESTAMP: \ - case TSDB_DATA_TYPE_BIGINT: \ - snprintf(_output, (int32_t)(_outputBytes), "%" PRId64, *(int64_t *)(_input)); \ - break; \ - case TSDB_DATA_TYPE_UBIGINT: \ - snprintf(_output, (int32_t)(_outputBytes), "%" PRIu64, *(uint64_t *)(_input)); \ - break; \ - case TSDB_DATA_TYPE_FLOAT: \ - snprintf(_output, (int32_t)(_outputBytes), "%f", *(float *)(_input)); \ - break; \ - case TSDB_DATA_TYPE_DOUBLE: \ - snprintf(_output, (int32_t)(_outputBytes), "%f", *(double *)(_input)); \ - break; \ - case TSDB_DATA_TYPE_UINT: \ - snprintf(_output, (int32_t)(_outputBytes), "%u", *(uint32_t *)(_input)); \ - break; \ - default: \ - snprintf(_output, (int32_t)(_outputBytes), "%d", *(int32_t *)(_input)); \ - break; \ - } \ +#define NUM_TO_STRING(_inputType, _input, _outputBytes, _output) \ + do { \ + switch (_inputType) { \ + case TSDB_DATA_TYPE_TINYINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%d", *(int8_t *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_UTINYINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%d", *(uint8_t *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_SMALLINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%d", *(int16_t *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_USMALLINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%d", *(uint16_t *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_TIMESTAMP: \ + case TSDB_DATA_TYPE_BIGINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%" PRId64, *(int64_t *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_UBIGINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%" PRIu64, *(uint64_t *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_FLOAT: \ + snprintf(_output, (int32_t)(_outputBytes), "%f", *(float *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_DOUBLE: \ + snprintf(_output, (int32_t)(_outputBytes), "%f", *(double *)(_input)); \ + break; \ + case TSDB_DATA_TYPE_UINT: \ + snprintf(_output, (int32_t)(_outputBytes), "%u", *(uint32_t *)(_input)); \ + break; \ + default: \ + snprintf(_output, (int32_t)(_outputBytes), "%d", *(int32_t *)(_input)); \ + break; \ + } \ } while (0) + //TODO: use varchar(0) to represent NULL type +#define IS_VAR_NULL_TYPE(_t, _b) ((_t) == TSDB_DATA_TYPE_VARCHAR && (_b) == 0) #define IS_NULL_TYPE(_t) ((_t) == TSDB_DATA_TYPE_NULL) + #define IS_SIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_TINYINT && (_t) <= TSDB_DATA_TYPE_BIGINT) #define IS_UNSIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_UTINYINT && (_t) <= TSDB_DATA_TYPE_UBIGINT) #define IS_FLOAT_TYPE(_t) ((_t) == TSDB_DATA_TYPE_FLOAT || (_t) == TSDB_DATA_TYPE_DOUBLE) #define IS_INTEGER_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t))) #define IS_NUMERIC_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)) || (IS_FLOAT_TYPE(_t))) -#define IS_MATHABLE_TYPE(_t) (IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP)) +#define IS_MATHABLE_TYPE(_t) \ + (IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP)) #define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX) #define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX) @@ -244,7 +249,7 @@ typedef struct tDataTypeDescriptor { int16_t type; int16_t nameLen; int32_t bytes; - char *name; + char * name; int64_t minValue; int64_t maxValue; int32_t (*compFunc)(const char *const input, int32_t inputSize, const int32_t nelements, char *const output, @@ -277,4 +282,4 @@ void *getDataMax(int32_t type); } #endif -#endif /*_TD_COMMON_TTYPE_H_*/ +#endif /*_TD_COMMON_TTYPE_H_*/ diff --git a/include/dnode/mnode/mnode.h b/include/dnode/mnode/mnode.h index ddd6f1c05f99766aaaf16762ebcfb60fcb1b34ef..ab090940f218abe745fff2bfea170c9b6abf9248 100644 --- a/include/dnode/mnode/mnode.h +++ b/include/dnode/mnode/mnode.h @@ -81,7 +81,7 @@ int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad); * @param pMsg The request msg. * @return int32_t 0 for success, -1 for failure. */ -int32_t mndProcessMsg(SRpcMsg *pMsg); +int32_t mndProcessRpcMsg(SRpcMsg *pMsg); int32_t mndProcessSyncMsg(SRpcMsg *pMsg); /** diff --git a/include/dnode/qnode/qnode.h b/include/dnode/qnode/qnode.h index 1ab101f705ac3f71fad134c200a22f903e4a8e86..7d342c4ba12fba1edb74cd7ce3d093e1dea037b3 100644 --- a/include/dnode/qnode/qnode.h +++ b/include/dnode/qnode/qnode.h @@ -25,17 +25,6 @@ extern "C" { /* ------------------------ TYPES EXPOSED ------------------------ */ typedef struct SQnode SQnode; -typedef struct { - int64_t numOfStartTask; - int64_t numOfStopTask; - int64_t numOfRecvedFetch; - int64_t numOfSentHb; - int64_t numOfSentFetch; - int64_t numOfTaskInQueue; - int64_t numOfFetchInQueue; - int64_t numOfErrors; -} SQnodeLoad; - typedef struct { SMsgCb msgCb; } SQnodeOpt; @@ -71,10 +60,10 @@ int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad); * @param pQnode The qnode object. * @param pMsg The request message */ -int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg); +int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg); #ifdef __cplusplus } #endif -#endif /*_TD_QNODE_H_*/ \ No newline at end of file +#endif /*_TD_QNODE_H_*/ diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h index 5b746015e33af1ed9feb7a92dec1d62030453b2b..f0e642bc9af8060d0b6bc0380f2c85284f307642 100644 --- a/include/libs/catalog/catalog.h +++ b/include/libs/catalog/catalog.h @@ -52,23 +52,31 @@ typedef struct SUserAuthInfo { AUTH_TYPE type; } SUserAuthInfo; +typedef struct SDbInfo { + int32_t vgVer; + int32_t tbNum; + int64_t dbId; +} SDbInfo; + typedef struct SCatalogReq { - SArray *pTableMeta; // element is SNAME SArray *pDbVgroup; // element is db full name + SArray *pDbCfg; // element is db full name + SArray *pDbInfo; // element is db full name + SArray *pTableMeta; // element is SNAME SArray *pTableHash; // element is SNAME SArray *pUdf; // element is udf name - SArray *pDbCfg; // element is db full name SArray *pIndex; // element is index name SArray *pUser; // element is SUserAuthInfo bool qNodeRequired; // valid qnode } SCatalogReq; typedef struct SMetaData { - SArray *pTableMeta; // SArray SArray *pDbVgroup; // SArray*> + SArray *pDbCfg; // SArray + SArray *pDbInfo; // SArray + SArray *pTableMeta; // SArray SArray *pTableHash; // SArray SArray *pUdfList; // SArray - SArray *pDbCfg; // SArray SArray *pIndex; // SArray SArray *pUser; // SArray SArray *pQnodeList; // SArray @@ -175,7 +183,7 @@ int32_t catalogGetTableMeta(SCatalog* pCatalog, void * pTransporter, const SEpSe */ int32_t catalogGetSTableMeta(SCatalog* pCatalog, void * pTransporter, const SEpSet* pMgmtEps, const SName* pTableName, STableMeta** pTableMeta); -int32_t catalogUpdateSTableMeta(SCatalog* pCatalog, STableMetaRsp *rspMsg); +int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp *rspMsg); /** @@ -269,6 +277,8 @@ int32_t catalogChkAuth(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth); +int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet); + int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, uint64_t reqId); diff --git a/include/libs/executor/dataSinkMgt.h b/include/libs/executor/dataSinkMgt.h index 339743f153968a2ae6910ac68735bbf295925041..2cc9caca6fa4d8e4dd4bd6a8d7b490e7baaf2c34 100644 --- a/include/libs/executor/dataSinkMgt.h +++ b/include/libs/executor/dataSinkMgt.h @@ -32,6 +32,10 @@ extern "C" { struct SDataSink; struct SSDataBlock; +typedef struct SDataSinkStat { + uint64_t cachedSize; +} SDataSinkStat; + typedef struct SDataSinkMgtCfg { uint32_t maxDataBlockNum; // todo: this should be numOfRows? uint32_t maxDataBlockNumPerQuery; @@ -62,6 +66,8 @@ typedef struct SOutputData { */ int32_t dsCreateDataSinker(const SDataSinkNode* pDataSink, DataSinkHandle* pHandle); +int32_t dsDataSinkGetCacheSize(SDataSinkStat *pStat); + /** * Put the result set returned by the executor into datasinker. * @param handle @@ -88,6 +94,8 @@ void dsGetDataLength(DataSinkHandle handle, int32_t* pLen, bool* pQueryEnd); */ int32_t dsGetDataBlock(DataSinkHandle handle, SOutputData* pOutput); +int32_t dsGetCacheSize(DataSinkHandle handle, uint64_t *pSize); + /** * After dsGetStatus returns DS_NEED_SCHEDULE, the caller need to put this into the work queue. * @param ahandle diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index 9cafb4ee04543f1978f68c982a5208fcde2c25a4..288248422b8288b98d8f0fccaef040186294cb76 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -61,7 +61,7 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, void* streamReadHandle); * @param type * @return */ -int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type); +int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid); /** * Set multiple input data blocks for the stream scan. @@ -71,7 +71,7 @@ int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type); * @param type * @return */ -int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type); +int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type, bool assignUid); /** * Update the table id list, add or remove. @@ -156,18 +156,6 @@ int64_t qGetQueriedTableUid(qTaskInfo_t tinfo); */ int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t tagCondLen, SArray* pTableIdList); -/** - * Create the table group according to the group by tags info - * @param pTableIdList - * @param skey - * @param groupInfo - * @param groupByIndex - * @param numOfIndex - * @return - */ -// int32_t qCreateTableGroupByGroupExpr(SArray* pTableIdList, TSKEY skey, STableGroupInfo groupInfo, SColIndex* -// groupByIndex, int32_t numOfIndex); - /** * Update the table id list of a given query. * @param uid child table uid diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 21b73090554cc951aac82b4d9adb1cb7d847bff2..e8cb363e08fa65385d36762face331f5de5cf1eb 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -61,56 +61,9 @@ typedef struct SFileBlockInfo { #define TSDB_BLOCK_DIST_STEP_ROWS 8 #define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results -#define FUNCTION_TYPE_SCALAR 1 -#define FUNCTION_TYPE_AGG 2 - #define TOP_BOTTOM_QUERY_LIMIT 100 #define FUNCTIONS_NAME_MAX_LENGTH 16 -#define FUNCTION_INVALID_ID -1 -#define FUNCTION_COUNT 0 -#define FUNCTION_SUM 1 -#define FUNCTION_AVG 2 -#define FUNCTION_MIN 3 -#define FUNCTION_MAX 4 -#define FUNCTION_STDDEV 5 -#define FUNCTION_PERCT 6 -#define FUNCTION_APERCT 7 -#define FUNCTION_FIRST 8 -#define FUNCTION_LAST 9 -#define FUNCTION_LAST_ROW 10 -#define FUNCTION_TOP 11 -#define FUNCTION_BOTTOM 12 -#define FUNCTION_SPREAD 13 -#define FUNCTION_TWA 14 -#define FUNCTION_LEASTSQR 15 - -#define FUNCTION_TS 16 -#define FUNCTION_TS_DUMMY 17 -#define FUNCTION_TAG_DUMMY 18 -#define FUNCTION_TS_COMP 19 - -#define FUNCTION_TAG 20 -#define FUNCTION_PRJ 21 - -#define FUNCTION_TAGPRJ 22 -#define FUNCTION_ARITHM 23 -#define FUNCTION_DIFF 24 - -#define FUNCTION_FIRST_DST 25 -#define FUNCTION_LAST_DST 26 -#define FUNCTION_STDDEV_DST 27 -#define FUNCTION_INTERP 28 - -#define FUNCTION_RATE 29 -#define FUNCTION_IRATE 30 -#define FUNCTION_TID_TAG 31 -#define FUNCTION_DERIVATIVE 32 -#define FUNCTION_BLKINFO 33 - - -#define FUNCTION_COV 38 - typedef struct SResultRowEntryInfo { bool initialized:1; // output buffer has been initialized bool complete:1; // query has completed @@ -180,10 +133,9 @@ typedef struct SqlFunctionCtx { char *pOutput; // final result output buffer, point to sdata->data int32_t numOfParams; SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param - int64_t *ptsList; // corresponding timestamp array list + int64_t *ptsList; // corresponding timestamp array list, todo remove it SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/ int32_t offset; - SVariant tag; struct SResultRowEntryInfo *resultInfo; SSubsidiaryResInfo subsidiaries; SPoint1 start; @@ -210,9 +162,6 @@ enum { typedef struct tExprNode { int32_t nodeType; union { - SSchema *pSchema;// column node - struct SVariant *pVal; // value node - struct {// function node char functionName[FUNCTIONS_NAME_MAX_LENGTH]; // todo refactor int32_t functionId; @@ -255,47 +204,23 @@ struct SScalarParam { int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, SResultDataInfo* pInfo, int16_t extLength, bool isSuperTable); -bool qIsValidUdf(SArray* pUdfInfo, const char* name, int32_t len, int32_t* functionId); - void resetResultRowEntryResult(SqlFunctionCtx* pCtx, int32_t num); void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell); int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock); bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry); bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry); -/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// fill api -struct SFillInfo; -struct SFillColInfo; - typedef struct SPoint { int64_t key; void * val; } SPoint; -//void taosFillSetStartInfo(struct SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey); -//void taosResetFillInfo(struct SFillInfo* pFillInfo, TSKEY startTimestamp); -//void taosFillSetInputDataBlock(struct SFillInfo* pFillInfo, const struct SSDataBlock* pInput); -//struct SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, const SValueNode* val); -//bool taosFillHasMoreResults(struct SFillInfo* pFillInfo); -// -//struct SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols, -// SInterval* pInterval, int32_t fillType, -// struct SFillColInfo* pCol, const char* id); -// -//void* taosDestroyFillInfo(struct SFillInfo *pFillInfo); -//int64_t taosFillResultDataBlock(struct SFillInfo* pFillInfo, void** output, int32_t capacity); -//int64_t getFillInfoStart(struct SFillInfo *pFillInfo); - int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // udf api struct SUdfInfo; -void qAddUdfInfo(uint64_t id, struct SUdfInfo* pUdfInfo); -void qRemoveUdfInfo(uint64_t id, struct SUdfInfo* pUdfInfo); - /** * create udfd proxy, called once in process that call doSetupUdf/callUdfxxx/doTeardownUdf * @return error code diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index 3d86adb573cd27dfce3b93409b96a11b47b7aaf5..f3e28936afc1b1556502eacd08f6b1e699abc198 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -23,6 +23,9 @@ extern "C" { #include "function.h" #include "querynodes.h" +#define FUNC_AGGREGATE_UDF_ID 5001 +#define FUNC_SCALAR_UDF_ID 5002 + typedef enum EFunctionType { // aggregate function FUNCTION_TYPE_APERCENTILE = 1, @@ -126,21 +129,12 @@ typedef enum EFunctionType { struct SqlFunctionCtx; struct SResultRowEntryInfo; struct STimeWindow; -struct SCatalog; - -typedef struct SFmGetFuncInfoParam { - struct SCatalog* pCtg; - void* pRpc; - const SEpSet* pMgmtEps; - char* pErrBuf; - int32_t errBufLen; -} SFmGetFuncInfoParam; int32_t fmFuncMgtInit(); void fmFuncMgtDestroy(); -int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc); +int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen); bool fmIsBuiltinFunc(const char* pFunc); @@ -162,6 +156,9 @@ bool fmIsDynamicScanOptimizedFunc(int32_t funcId); bool fmIsMultiResFunc(int32_t funcId); bool fmIsRepeatScanFunc(int32_t funcId); bool fmIsUserDefinedFunc(int32_t funcId); +bool fmIsDistExecFunc(int32_t funcId); + +int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc); typedef enum EFuncDataRequired { FUNC_DATA_REQUIRED_DATA_LOAD = 1, diff --git a/include/libs/index/index.h b/include/libs/index/index.h index 05db99db0f199169ce71e4a76d56899361aa403b..180c7e7216153f0cdfd5b4240de89bc586fd9b88 100644 --- a/include/libs/index/index.h +++ b/include/libs/index/index.h @@ -192,11 +192,17 @@ void indexTermDestroy(SIndexTerm* p); void indexInit(); /* index filter */ +typedef struct SIndexMetaArg { + void* metaHandle; + void* metaEx; + uint64_t suid; +} SIndexMetaArg; + typedef enum { SFLT_NOT_INDEX, SFLT_COARSE_INDEX, SFLT_ACCURATE_INDEX } SIdxFltStatus; SIdxFltStatus idxGetFltStatus(SNode* pFilterNode); -int32_t doFilterTag(const SNode* pFilterNode, SArray* result); +int32_t doFilterTag(const SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* result); /* * destory index env * diff --git a/include/libs/monitor/monitor.h b/include/libs/monitor/monitor.h index 9d8cf61b0646c764cee7056152f7873caa61b14f..39e8042b931ecbee48fbe389ab1160c613636f28 100644 --- a/include/libs/monitor/monitor.h +++ b/include/libs/monitor/monitor.h @@ -171,6 +171,7 @@ void tFreeSMonVmInfo(SMonVmInfo *pInfo); typedef struct { SMonSysInfo sys; SMonLogs log; + SQnodeLoad load; } SMonQmInfo; int32_t tSerializeSMonQmInfo(void *buf, int32_t bufLen, SMonQmInfo *pInfo); @@ -210,6 +211,10 @@ typedef struct { int32_t tSerializeSMonMloadInfo(void *buf, int32_t bufLen, SMonMloadInfo *pInfo); int32_t tDeserializeSMonMloadInfo(void *buf, int32_t bufLen, SMonMloadInfo *pInfo); +int32_t tSerializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo); +int32_t tDeserializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo); + + typedef struct { const char *server; uint16_t port; diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index 7bd3a40c7199f204bd14e5af3231e59d5b7383be..82924bef3f206911b803ace70ea15435dc29e882 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -80,8 +80,7 @@ typedef struct SAlterDatabaseStmt { typedef struct STableOptions { ENodeType type; char comment[TSDB_TB_COMMENT_LEN]; - int32_t delay; - float filesFactor; + double filesFactor; SNodeList* pRollupFuncs; int32_t ttl; SNodeList* pSma; @@ -239,20 +238,13 @@ typedef struct SDropComponentNodeStmt { int32_t dnodeId; } SDropComponentNodeStmt; -typedef struct STopicOptions { - ENodeType type; - bool withTable; - bool withSchema; - bool withTag; -} STopicOptions; - typedef struct SCreateTopicStmt { - ENodeType type; - char topicName[TSDB_TABLE_NAME_LEN]; - char subscribeDbName[TSDB_DB_NAME_LEN]; - bool ignoreExists; - SNode* pQuery; - STopicOptions* pOptions; + ENodeType type; + char topicName[TSDB_TABLE_NAME_LEN]; + char subDbName[TSDB_DB_NAME_LEN]; + char subSTbName[TSDB_TABLE_NAME_LEN]; + bool ignoreExists; + SNode* pQuery; } SCreateTopicStmt; typedef struct SDropTopicStmt { diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 38602667252e429eb9840c75d2c23b98139df184..15e5e14e41e2eb3b7bc0036932102d42cefecf79 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -95,7 +95,6 @@ typedef enum ENodeType { QUERY_NODE_INDEX_OPTIONS, QUERY_NODE_EXPLAIN_OPTIONS, QUERY_NODE_STREAM_OPTIONS, - QUERY_NODE_TOPIC_OPTIONS, QUERY_NODE_LEFT_VALUE, // Statement nodes are used in parser and planner module. @@ -190,6 +189,7 @@ typedef enum ENodeType { QUERY_NODE_LOGIC_PLAN_PROJECT, QUERY_NODE_LOGIC_PLAN_VNODE_MODIF, QUERY_NODE_LOGIC_PLAN_EXCHANGE, + QUERY_NODE_LOGIC_PLAN_MERGE, QUERY_NODE_LOGIC_PLAN_WINDOW, QUERY_NODE_LOGIC_PLAN_FILL, QUERY_NODE_LOGIC_PLAN_SORT, @@ -207,10 +207,12 @@ typedef enum ENodeType { QUERY_NODE_PHYSICAL_PLAN_JOIN, QUERY_NODE_PHYSICAL_PLAN_AGG, QUERY_NODE_PHYSICAL_PLAN_EXCHANGE, + QUERY_NODE_PHYSICAL_PLAN_MERGE, QUERY_NODE_PHYSICAL_PLAN_SORT, QUERY_NODE_PHYSICAL_PLAN_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL, + QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_FILL, QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW, QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW, diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index b2244c572c68b4facba490e518be25e1ddf290a4..01b8d5c51e9139d3079bdb5975dba63a7e39591c 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -56,6 +56,10 @@ typedef struct SScanLogicNode { int8_t intervalUnit; int8_t slidingUnit; SNode* pTagCond; + int8_t triggerType; + int64_t watermark; + int16_t tsColId; + double filesFactor; } SScanLogicNode; typedef struct SJoinLogicNode { @@ -91,25 +95,39 @@ typedef struct SVnodeModifLogicNode { typedef struct SExchangeLogicNode { SLogicNode node; int32_t srcGroupId; - uint8_t precision; } SExchangeLogicNode; +typedef struct SMergeLogicNode { + SLogicNode node; + SNodeList* pMergeKeys; + int32_t numOfChannels; + int32_t srcGroupId; +} SMergeLogicNode; + typedef enum EWindowType { WINDOW_TYPE_INTERVAL = 1, WINDOW_TYPE_SESSION, WINDOW_TYPE_STATE } EWindowType; +typedef enum EStreamIntervalAlgorithm { + STREAM_INTERVAL_ALGO_FINAL = 1, + STREAM_INTERVAL_ALGO_SEMI, + STREAM_INTERVAL_ALGO_SINGLE +} EStreamIntervalAlgorithm; + typedef struct SWindowLogicNode { - SLogicNode node; - EWindowType winType; - SNodeList* pFuncs; - int64_t interval; - int64_t offset; - int64_t sliding; - int8_t intervalUnit; - int8_t slidingUnit; - int64_t sessionGap; - SNode* pTspk; - SNode* pStateExpr; - int8_t triggerType; - int64_t watermark; + SLogicNode node; + EWindowType winType; + SNodeList* pFuncs; + int64_t interval; + int64_t offset; + int64_t sliding; + int8_t intervalUnit; + int8_t slidingUnit; + int64_t sessionGap; + SNode* pTspk; + SNode* pStateExpr; + int8_t triggerType; + int64_t watermark; + double filesFactor; + EStreamIntervalAlgorithm stmInterAlgo; } SWindowLogicNode; typedef struct SFillLogicNode { @@ -217,6 +235,10 @@ typedef struct STableScanPhysiNode { int64_t sliding; int8_t intervalUnit; int8_t slidingUnit; + int8_t triggerType; + int64_t watermark; + int16_t tsColId; + double filesFactor; } STableScanPhysiNode; typedef STableScanPhysiNode STableSeqScanPhysiNode; @@ -260,6 +282,13 @@ typedef struct SExchangePhysiNode { SNodeList* pSrcEndPoints; // element is SDownstreamSource, scheduler fill by calling qSetSuplanExecutionNode } SExchangePhysiNode; +typedef struct SMergePhysiNode { + SPhysiNode node; + SNodeList* pMergeKeys; + int32_t numOfChannels; + int32_t srcGroupId; +} SMergePhysiNode; + typedef struct SWinodwPhysiNode { SPhysiNode node; SNodeList* pExprs; // these are expression list of parameter expression of function @@ -267,6 +296,7 @@ typedef struct SWinodwPhysiNode { SNode* pTspk; // timestamp primary key int8_t triggerType; int64_t watermark; + double filesFactor; } SWinodwPhysiNode; typedef struct SIntervalPhysiNode { @@ -279,6 +309,8 @@ typedef struct SIntervalPhysiNode { } SIntervalPhysiNode; typedef SIntervalPhysiNode SStreamIntervalPhysiNode; +typedef SIntervalPhysiNode SStreamFinalIntervalPhysiNode; +typedef SIntervalPhysiNode SStreamSemiIntervalPhysiNode; typedef struct SFillPhysiNode { SPhysiNode node; diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 16014893ca539ce1954e41e7daa16eb60bc376c6..e4af78892baaf3757ab58be41fec776e2cb7186f 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -322,21 +322,22 @@ typedef enum EQueryExecMode { } EQueryExecMode; typedef struct SQuery { - ENodeType type; - EQueryExecMode execMode; - bool haveResultSet; - SNode* pRoot; - int32_t numOfResCols; - SSchema* pResSchema; - int8_t precision; - SCmdMsgInfo* pCmdMsg; - int32_t msgType; - SArray* pDbList; - SArray* pTableList; - bool showRewrite; - int32_t placeholderNum; - SArray* pPlaceholderValues; - SNode* pPrepareRoot; + ENodeType type; + EQueryExecMode execMode; + bool haveResultSet; + SNode* pRoot; + int32_t numOfResCols; + SSchema* pResSchema; + int8_t precision; + SCmdMsgInfo* pCmdMsg; + int32_t msgType; + SArray* pTableList; + SArray* pDbList; + bool showRewrite; + int32_t placeholderNum; + SArray* pPlaceholderValues; + SNode* pPrepareRoot; + struct SParseMetaCache* pMetaCache; } SQuery; void nodesWalkSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeWalker walker, void* pContext); diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index 788512e0e8f424b5654f36c6cd7a0af13ff6da3d..6abd1ffa6d57834b2d36b72071001019276f5e99 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -23,6 +23,9 @@ extern "C" { #include "query.h" #include "querynodes.h" +struct SCatalogReq; +struct SMetaData; + typedef struct SStmtCallback { TAOS_STMT* pStmt; int32_t (*getTbNameFn)(TAOS_STMT*, char**); @@ -45,11 +48,18 @@ typedef struct SParseContext { SStmtCallback* pStmtCb; const char* pUser; bool isSuperUser; + bool async; + int8_t schemalessType; } SParseContext; int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery); bool qIsInsertSql(const char* pStr, size_t length); +// for async mode +int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq); +int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq, + const struct SMetaData* pMetaData, SQuery* pQuery); + void qDestroyQuery(SQuery* pQueryNode); int32_t qExtractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema); @@ -68,8 +78,8 @@ int32_t qStmtParseQuerySql(SParseContext* pCxt, SQuery* pQuery); int32_t qBindStmtColsValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen); int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen, int32_t colIdx, int32_t rowNum); -int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD** fields); -int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD** fields); +int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields); +int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields); int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, char* tName, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen); void destroyBoundColumnInfo(void* pBoundInfo); diff --git a/include/libs/planner/planner.h b/include/libs/planner/planner.h index c4f71e57a8174c62cf331e4afec35604786282a0..af30ec4c6bf7d657dfdec1af49f871eed38b53d7 100644 --- a/include/libs/planner/planner.h +++ b/include/libs/planner/planner.h @@ -36,6 +36,7 @@ typedef struct SPlanContext { int64_t watermark; char* pMsg; int32_t msgLen; + double filesFactor; } SPlanContext; // Create the physical plan for the query, according to the AST. diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h index a30f3be7a17398b91db04678d14c3648dcab38ed..45a7e9a29f3457a68e9998659237a9e0d70d39ab 100644 --- a/include/libs/qcom/query.h +++ b/include/libs/qcom/query.h @@ -43,6 +43,12 @@ typedef enum { TASK_TYPE_TEMP, } ETaskType; +typedef enum { + TARGET_TYPE_MNODE = 1, + TARGET_TYPE_VNODE, + TARGET_TYPE_OTHER, +} ETargetType; + typedef struct STableComInfo { uint8_t numOfTags; // the number of tags in schema uint8_t precision; // the number of precision @@ -50,6 +56,11 @@ typedef struct STableComInfo { int32_t rowSize; // row size of the schema } STableComInfo; +typedef struct SQueryExecRes { + int32_t msgType; + void* res; +} SQueryExecRes; + typedef struct SIndexMeta { #ifdef WINDOWS size_t avoidCompilationErrors; @@ -126,11 +137,18 @@ typedef struct SDataBuf { void* handle; } SDataBuf; +typedef struct STargetInfo { + ETargetType type; + char dbFName[TSDB_DB_FNAME_LEN]; // used to update db's vgroup epset + int32_t vgId; +} STargetInfo; + typedef int32_t (*__async_send_cb_fn_t)(void* param, const SDataBuf* pMsg, int32_t code); typedef int32_t (*__async_exec_fn_t)(void* param); typedef struct SMsgSendInfo { __async_send_cb_fn_t fp; // async callback function + STargetInfo target; // for update epset void* param; uint64_t requestId; uint64_t requestObjRefId; @@ -179,6 +197,7 @@ int32_t queryCreateTableMetaFromMsg(STableMetaRsp* msg, bool isSuperTable, STabl char* jobTaskStatusStr(int32_t status); SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* name); +void destroyQueryExecRes(SQueryExecRes* pRes); extern int32_t (*queryBuildMsg[TDMT_MAX])(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallocFp)(int32_t)); extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t msgSize); @@ -191,7 +210,8 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t #define NEED_CLIENT_RM_TBLMETA_ERROR(_code) \ ((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST || \ (_code) == TSDB_CODE_PAR_INVALID_COLUMNS_NUM || (_code) == TSDB_CODE_PAR_INVALID_COLUMN || \ - (_code) == TSDB_CODE_PAR_TAGS_NOT_MATCHED || (_code == TSDB_CODE_PAR_VALUE_TOO_LONG)) + (_code) == TSDB_CODE_PAR_TAGS_NOT_MATCHED || (_code == TSDB_CODE_PAR_VALUE_TOO_LONG) || \ + (_code == TSDB_CODE_PAR_INVALID_DROP_COL)) #define NEED_CLIENT_REFRESH_VG_ERROR(_code) \ ((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID) #define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_TABLE_RECREATED) diff --git a/include/libs/qworker/qworker.h b/include/libs/qworker/qworker.h index 0846841cef1b509edf2ccc189bf9e81453169aa1..91cf975a56660cd13a9fac992cb59c79bd2362b4 100644 --- a/include/libs/qworker/qworker.h +++ b/include/libs/qworker/qworker.h @@ -22,7 +22,7 @@ extern "C" { #include "tmsgcb.h" #include "trpc.h" - +#include "executor.h" enum { NODE_TYPE_VNODE = 1, @@ -40,44 +40,42 @@ typedef struct SQWorkerCfg { } SQWorkerCfg; typedef struct { - uint64_t numOfStartTask; - uint64_t numOfStopTask; - uint64_t numOfRecvedFetch; - uint64_t numOfSentHb; - uint64_t numOfSentFetch; - uint64_t numOfTaskInQueue; + uint64_t cacheDataSize; + + uint64_t queryProcessed; + uint64_t cqueryProcessed; + uint64_t fetchProcessed; + uint64_t dropProcessed; + uint64_t hbProcessed; + + uint64_t numOfQueryInQueue; uint64_t numOfFetchInQueue; + uint64_t timeInQueryQueue; + uint64_t timeInFetchQueue; + uint64_t numOfErrors; } SQWorkerStat; int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb); -int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); - -int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); - -int32_t qWorkerProcessDataSinkMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); - -int32_t qWorkerProcessReadyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessStatusMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); - -int32_t qWorkerProcessShowMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); - -int32_t qWorkerProcessShowFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); void qWorkerDestroy(void **qWorkerMgmt); +int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pStat); + #ifdef __cplusplus } #endif diff --git a/include/libs/scheduler/scheduler.h b/include/libs/scheduler/scheduler.h index 0d32cce20b6e489249fa79080e6144754c17218b..331b78769029fb97764f81ce6bb646f28854918a 100644 --- a/include/libs/scheduler/scheduler.h +++ b/include/libs/scheduler/scheduler.h @@ -56,7 +56,7 @@ typedef struct SQueryProfileSummary { typedef struct SQueryResult { int32_t code; uint64_t numOfRows; - void *res; + SQueryExecRes res; } SQueryResult; typedef struct STaskInfo { diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 8aaf9a79dc5af256cfe089d8fc5f7b12856d2e71..f7ad7b4ed8dcecb65bec074480e36226f583727b 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -61,11 +61,8 @@ enum { }; typedef struct { - int8_t type; - - int32_t sourceVg; - int64_t sourceVer; - + int8_t type; + int64_t ver; int32_t* dataRef; SSubmitReq* data; } SStreamDataSubmit; @@ -83,6 +80,37 @@ typedef struct { int8_t type; } SStreamCheckpoint; +typedef struct { + STaosQueue* queue; + STaosQall* qall; + void* qItem; + int8_t failed; +} SStreamQ; + +static FORCE_INLINE void* streamQCurItem(SStreamQ* queue) { + // + return queue->qItem; +} + +static FORCE_INLINE void* streamQNextItem(SStreamQ* queue) { + int8_t failed = atomic_load_8(&queue->failed); + if (failed) { + ASSERT(queue->qItem != NULL); + return streamQCurItem(queue); + } else { + taosGetQitem(queue->qall, &queue->qItem); + if (queue->qItem == NULL) { + taosReadAllQitems(queue->queue, queue->qall); + taosGetQitem(queue->qall, &queue->qItem); + } + return streamQCurItem(queue); + } +} + +static FORCE_INLINE void streamQSetFail(SStreamQ* queue) { atomic_store_8(&queue->failed, 1); } + +static FORCE_INLINE void streamQSetSuccess(SStreamQ* queue) { atomic_store_8(&queue->failed, 0); } + static FORCE_INLINE SStreamDataSubmit* streamDataSubmitNew(SSubmitReq* pReq) { SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM); if (pDataSubmit == NULL) return NULL; @@ -111,6 +139,8 @@ static FORCE_INLINE void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit) } } +SStreamDataSubmit* streamSubmitRefClone(SStreamDataSubmit* pSubmit); + int32_t streamDataBlockEncode(void** buf, const SStreamDataBlock* pOutput); void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput); @@ -209,8 +239,6 @@ struct SStreamTask { int32_t nodeId; SEpSet epSet; - // source preprocess - // exec STaskExec exec; @@ -318,8 +346,6 @@ int32_t streamDequeueOutput(SStreamTask* pTask, void** output); int32_t streamTaskRun(SStreamTask* pTask); -int32_t streamTaskHandleInput(SStreamTask* pTask, void* data); - int32_t streamTaskProcessRunReq(SStreamTask* pTask, SMsgCb* pMsgCb); int32_t streamProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchReq* pReq, SRpcMsg* pMsg); int32_t streamProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchRsp* pRsp); diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index 2e04afdbdc8d06029808da29398392f481832d75..a587ad6ef22fb80538147a61980ae4cdadd8ec03 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -66,12 +66,6 @@ typedef struct SSyncCfg { SNodeInfo nodeInfo[TSDB_MAX_REPLICA]; } SSyncCfg; -typedef struct SSnapshot { - void* data; - SyncIndex lastApplyIndex; - SyncTerm lastApplyTerm; -} SSnapshot; - typedef struct SFsmCbMeta { SyncIndex index; bool isWeak; @@ -93,6 +87,12 @@ typedef struct SReConfigCbMeta { uint64_t flag; } SReConfigCbMeta; +typedef struct SSnapshot { + void *data; + SyncIndex lastApplyIndex; + SyncTerm lastApplyTerm; +} SSnapshot; + typedef struct SSyncFSM { void* data; @@ -101,23 +101,17 @@ typedef struct SSyncFSM { void (*FpRollBackCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta); void (*FpRestoreFinishCb)(struct SSyncFSM* pFsm); - int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot); - - // if (*ppIter == NULL) - // *ppIter = new iter; - // else - // *ppIter.next(); - // - // if success, return 0. else return error code - int32_t (*FpSnapshotRead)(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, void** ppIter, char** ppBuf, - int32_t* len); + void (*FpReConfigCb)(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta); - // apply data into fsm - int32_t (*FpSnapshotApply)(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, char* pBuf, int32_t len); + int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot); - void (*FpReConfigCb)(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta); + int32_t (*FpSnapshotStartRead)(struct SSyncFSM* pFsm, void** ppReader); + int32_t (*FpSnapshotStopRead)(struct SSyncFSM* pFsm, void* pReader); + int32_t (*FpSnapshotDoRead)(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len); - // int32_t (*FpRestoreSnapshot)(struct SSyncFSM* pFsm, const SSnapshot* snapshot); + int32_t (*FpSnapshotStartWrite)(struct SSyncFSM* pFsm, void** ppWriter); + int32_t (*FpSnapshotStopWrite)(struct SSyncFSM* pFsm, void* pWriter, bool isApply); + int32_t (*FpSnapshotDoWrite)(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_t len); } SSyncFSM; diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h index 70977bba871dd109d8e3d7a9b747df2e5435fa58..839194da94e5a184ab11b446077e334f085d68b5 100644 --- a/include/libs/transport/trpc.h +++ b/include/libs/transport/trpc.h @@ -124,6 +124,7 @@ void rpcSendRedirectRsp(void *pConn, const SEpSet *pEpSet); void rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx); int32_t rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo); void rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp); +void rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn); #ifdef __cplusplus } diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index e541c214deba8e9b9ad3cc4e95cc2d2224f3c5a3..95af8ac30666b67b0a933477ff8ca0764d2d0a43 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -184,6 +184,7 @@ int32_t walRollback(SWal *, int64_t ver); // notify that previous logs can be pruned safely int32_t walBeginSnapshot(SWal *, int64_t ver); int32_t walEndSnapshot(SWal *); +void walRestoreFromSnapshot(SWal *, int64_t ver); // int32_t walDataCorrupted(SWal*); // read diff --git a/include/os/osDir.h b/include/os/osDir.h index a4c686e2807ee3d1fb9a8a0e1e05066d1b616c0b..9019d4f80240b2335824cb5626488bf4d0957f06 100644 --- a/include/os/osDir.h +++ b/include/os/osDir.h @@ -33,8 +33,19 @@ extern "C" { #ifdef WINDOWS #define TD_TMP_DIR_PATH "C:\\Windows\\Temp\\" +#define TD_CFG_DIR_PATH "C:\\TDengine\\cfg\\" +#define TD_DATA_DIR_PATH "C:\\TDengine\\data\\" +#define TD_LOG_DIR_PATH "C:\\TDengine\\log\\" +#elif defined(_TD_DARWIN_64) +#define TD_TMP_DIR_PATH "/tmp/taosd/" +#define TD_CFG_DIR_PATH "/usr/local/etc/taos/" +#define TD_DATA_DIR_PATH "/usr/local/var/lib/taos/" +#define TD_LOG_DIR_PATH "/usr/local/var/log/taos/" #else #define TD_TMP_DIR_PATH "/tmp/" +#define TD_CFG_DIR_PATH "/etc/taos/" +#define TD_DATA_DIR_PATH "/var/lib/taos/" +#define TD_LOG_DIR_PATH "/var/log/taos/" #endif typedef struct TdDir *TdDirPtr; diff --git a/include/util/taoserror.h b/include/util/taoserror.h index a924719cf9d1355ce745267b39481b9dbd349faf..7335d174a4cf67917f58d4219a5f576a4c3cff86 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -69,6 +69,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_DUP_KEY TAOS_DEF_ERROR_CODE(0, 0x0027) #define TSDB_CODE_NEED_RETRY TAOS_DEF_ERROR_CODE(0, 0x0028) #define TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE TAOS_DEF_ERROR_CODE(0, 0x0029) +#define TSDB_CODE_INVALID_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x0030) +#define TSDB_CODE_MSG_DECODE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0031) #define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0040) #define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0041) @@ -83,6 +85,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RPC_NETWORK_UNAVAIL TAOS_DEF_ERROR_CODE(0, 0x0102) #define TSDB_CODE_RPC_FQDN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0103) #define TSDB_CODE_RPC_PORT_EADDRINUSE TAOS_DEF_ERROR_CODE(0, 0x0104) +#define TSDB_CODE_RPC_INDIRECT_NETWORK_UNAVAIL TAOS_DEF_ERROR_CODE(0, 0x0105) //client #define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200) @@ -181,7 +184,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_BNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0356) #define TSDB_CODE_MND_BNODE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0357) #define TSDB_CODE_MND_TOO_FEW_MNODES TAOS_DEF_ERROR_CODE(0, 0x0358) -#define TSDB_CODE_MND_MNODE_DEPLOYED TAOS_DEF_ERROR_CODE(0, 0x0359) +#define TSDB_CODE_MND_TOO_MANY_MNODES TAOS_DEF_ERROR_CODE(0, 0x0359) #define TSDB_CODE_MND_CANT_DROP_MASTER TAOS_DEF_ERROR_CODE(0, 0x035A) // mnode-acct @@ -268,6 +271,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_OFFSET_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x03E9) #define TSDB_CODE_MND_CONSUMER_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x03EA) #define TSDB_CODE_MND_TOPIC_SUBSCRIBED TAOS_DEF_ERROR_CODE(0, 0x03EB) +#define TSDB_CODE_MND_CGROUP_USED TAOS_DEF_ERROR_CODE(0, 0x03EC) // mnode-stream #define TSDB_CODE_MND_STREAM_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F0) @@ -642,6 +646,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_INVALID_DROP_COL TAOS_DEF_ERROR_CODE(0, 0x2651) #define TSDB_CODE_PAR_INVALID_COL_JSON TAOS_DEF_ERROR_CODE(0, 0x2652) #define TSDB_CODE_PAR_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2653) +#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2654) //planner #define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700) @@ -653,7 +658,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_FUNC_FUNTION_PARA_NUM TAOS_DEF_ERROR_CODE(0, 0x2801) #define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802) #define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803) -#define TSDB_CODE_FUNC_INVALID_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804) +#define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804) //udf #define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901) @@ -670,6 +675,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_SML_INVALID_PROTOCOL_TYPE TAOS_DEF_ERROR_CODE(0, 0x3000) #define TSDB_CODE_SML_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x3001) #define TSDB_CODE_SML_INVALID_DATA TAOS_DEF_ERROR_CODE(0, 0x3002) +#define TSDB_CODE_SML_INVALID_DB_CONF TAOS_DEF_ERROR_CODE(0, 0x3003) #ifdef __cplusplus } diff --git a/include/util/tdef.h b/include/util/tdef.h index cbbf3b8ff53433f9bee89a46e3ac2a75b7869347..0ae22d195395f6225dddf33c52a99046ad41354d 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -209,7 +209,7 @@ typedef enum ELogicConditionType { #define TSDB_INDEX_FNAME_LEN (TSDB_DB_FNAME_LEN + TSDB_INDEX_NAME_LEN + TSDB_NAME_DELIMITER_LEN) #define TSDB_TYPE_STR_MAX_LEN 32 #define TSDB_TABLE_FNAME_LEN (TSDB_DB_FNAME_LEN + TSDB_TABLE_NAME_LEN + TSDB_NAME_DELIMITER_LEN) -#define TSDB_TOPIC_FNAME_LEN TSDB_TABLE_FNAME_LEN +#define TSDB_TOPIC_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_TABLE_NAME_LEN + TSDB_NAME_DELIMITER_LEN) #define TSDB_STREAM_FNAME_LEN TSDB_TABLE_FNAME_LEN #define TSDB_SUBSCRIBE_KEY_LEN (TSDB_CGROUP_LEN + TSDB_TOPIC_FNAME_LEN + 2) #define TSDB_PARTITION_KEY_LEN (TSDB_SUBSCRIBE_KEY_LEN + 20) @@ -247,13 +247,13 @@ typedef enum ELogicConditionType { #define TSDB_EP_LEN (TSDB_FQDN_LEN + 6) #define TSDB_IPv4ADDR_LEN 16 #define TSDB_FILENAME_LEN 128 -#define TSDB_SHOW_SQL_LEN 512 +#define TSDB_SHOW_SQL_LEN 1024 #define TSDB_SLOW_QUERY_SQL_LEN 512 #define TSDB_SHOW_SUBQUERY_LEN 1000 #define TSDB_TRANS_STAGE_LEN 12 #define TSDB_TRANS_TYPE_LEN 16 -#define TSDB_TRANS_ERROR_LEN 64 +#define TSDB_TRANS_ERROR_LEN 512 #define TSDB_STEP_NAME_LEN 32 #define TSDB_STEP_DESC_LEN 128 @@ -342,11 +342,8 @@ typedef enum ELogicConditionType { #define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF #define TSDB_MIN_ROLLUP_FILE_FACTOR 0 -#define TSDB_MAX_ROLLUP_FILE_FACTOR 1 +#define TSDB_MAX_ROLLUP_FILE_FACTOR 10 #define TSDB_DEFAULT_ROLLUP_FILE_FACTOR 0.1 -#define TSDB_MIN_ROLLUP_DELAY 1 -#define TSDB_MAX_ROLLUP_DELAY 10 -#define TSDB_DEFAULT_ROLLUP_DELAY 2 #define TSDB_MIN_TABLE_TTL 0 #define TSDB_DEFAULT_TABLE_TTL 0 @@ -368,7 +365,11 @@ typedef enum ELogicConditionType { #define PRIMARYKEY_TIMESTAMP_COL_ID 1 #define COL_REACH_END(colId, maxColId) ((colId) > (maxColId)) +#ifdef WINDOWS +#define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections. +#else #define TSDB_MAX_RPC_THREADS 5 +#endif #define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type #define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01u // free qhandle at vnode diff --git a/include/util/tencode.h b/include/util/tencode.h index cbacd59fa7873c4cb05b8fdaefb321ae3f854e5b..a13afd44480eef8397befb42c2fe2a12c322b01e 100644 --- a/include/util/tencode.h +++ b/include/util/tencode.h @@ -378,14 +378,16 @@ static FORCE_INLINE int32_t tDecodeDouble(SDecoder* pCoder, double* val) { } static FORCE_INLINE int32_t tDecodeBinary(SDecoder* pCoder, uint8_t** val, uint32_t* len) { - if (tDecodeU32v(pCoder, len) < 0) return -1; + uint32_t length = 0; + if (tDecodeU32v(pCoder, &length) < 0) return -1; + if (len) *len = length; - if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, *len)) return -1; + if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1; if (val) { *val = (uint8_t*)TD_CODER_CURRENT(pCoder); } - TD_CODER_MOVE_POS(pCoder, *len); + TD_CODER_MOVE_POS(pCoder, length); return 0; } @@ -410,14 +412,16 @@ static int32_t tDecodeCStrTo(SDecoder* pCoder, char* val) { } static FORCE_INLINE int32_t tDecodeBinaryAlloc(SDecoder* pCoder, void** val, uint64_t* len) { - if (tDecodeU64v(pCoder, len) < 0) return -1; + uint64_t length = 0; + if (tDecodeU64v(pCoder, &length) < 0) return -1; + if (len) *len = length; - if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, *len)) return -1; - *val = taosMemoryMalloc(*len); + if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1; + *val = taosMemoryMalloc(length); if (*val == NULL) return -1; - memcpy(*val, TD_CODER_CURRENT(pCoder), *len); + memcpy(*val, TD_CODER_CURRENT(pCoder), length); - TD_CODER_MOVE_POS(pCoder, *len); + TD_CODER_MOVE_POS(pCoder, length); return 0; } @@ -530,6 +534,26 @@ static FORCE_INLINE int32_t tPutI64(uint8_t* p, int64_t v) { return sizeof(int64_t); } +static FORCE_INLINE int32_t tPutFloat(uint8_t* p, float f) { + union { + uint32_t ui; + float f; + } v; + v.f = f; + + return tPutU32(p, v.ui); +} + +static FORCE_INLINE int32_t tPutDouble(uint8_t* p, double d) { + union { + uint64_t ui; + double d; + } v; + v.d = d; + + return tPutU64(p, v.ui); +} + static FORCE_INLINE int32_t tPutU16v(uint8_t* p, uint16_t v) { tPutV(p, v); } static FORCE_INLINE int32_t tPutI16v(uint8_t* p, int16_t v) { return tPutU16v(p, ZIGZAGE(int16_t, v)); } @@ -619,6 +643,34 @@ static FORCE_INLINE int32_t tGetI64v(uint8_t* p, int64_t* v) { return n; } +static FORCE_INLINE int32_t tGetFloat(uint8_t* p, float* f) { + int32_t n = 0; + + union { + uint32_t ui; + float f; + } v; + + n = tGetU32(p, &v.ui); + + *f = v.f; + return n; +} + +static FORCE_INLINE int32_t tGetDouble(uint8_t* p, double* d) { + int32_t n = 0; + + union { + uint64_t ui; + double d; + } v; + + n = tGetU64(p, &v.ui); + + *d = v.d; + return n; +} + // ===================== static FORCE_INLINE int32_t tPutBinary(uint8_t* p, uint8_t* pData, uint32_t nData) { int n = 0; @@ -642,6 +694,11 @@ static FORCE_INLINE int32_t tGetBinary(uint8_t* p, uint8_t** ppData, uint32_t* n return n; } +static FORCE_INLINE int32_t tPutCStr(uint8_t* p, char* pData) { + return tPutBinary(p, (uint8_t*)pData, strlen(pData) + 1); +} +static FORCE_INLINE int32_t tGetCStr(uint8_t* p, char** ppData) { return tGetBinary(p, (uint8_t**)ppData, NULL); } + #ifdef __cplusplus } #endif diff --git a/include/util/tlist.h b/include/util/tlist.h index 43833d7ecd84f09643546f3f3fa838edbd1dabf1..1954bda145a48f249875bda8ea3389b4fbed22be 100644 --- a/include/util/tlist.h +++ b/include/util/tlist.h @@ -229,7 +229,7 @@ int32_t tdListAppend(SList *list, void *data); SListNode *tdListPopHead(SList *list); SListNode *tdListPopTail(SList *list); SListNode *tdListGetHead(SList *list); -SListNode *tsListGetTail(SList *list); +SListNode *tdListGetTail(SList *list); SListNode *tdListPopNode(SList *list, SListNode *node); void tdListMove(SList *src, SList *dst); void tdListDiscard(SList *list); diff --git a/include/util/tlog.h b/include/util/tlog.h index 47ac01aacfafc71d5f2ebd48f16c0d22b1c2d0eb..988d9c6890832d17a7e9acd2b496e3ef6ba63d90 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -62,6 +62,7 @@ extern int32_t fsDebugFlag; extern int32_t metaDebugFlag; extern int32_t fnDebugFlag; extern int32_t smaDebugFlag; +extern int32_t idxDebugFlag; int32_t taosInitLog(const char *logName, int32_t maxFiles); void taosCloseLog(); diff --git a/include/util/tqueue.h b/include/util/tqueue.h index dbc4d03177e4c489240c04aac37710ce995102d4..466c577c0079d07774722ff2efdd30bf207e0fc3 100644 --- a/include/util/tqueue.h +++ b/include/util/tqueue.h @@ -46,6 +46,7 @@ typedef struct { void *ahandle; int32_t workerId; int32_t threadNum; + int64_t timestamp; } SQueueInfo; typedef enum { @@ -80,7 +81,7 @@ int32_t taosAddIntoQset(STaosQset *qset, STaosQueue *queue, void *ahandle); void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue); int32_t taosGetQueueNumber(STaosQset *qset); -int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp); +int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp); int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp); void taosResetQsetThread(STaosQset *qset, void *pItem); diff --git a/include/util/ttimer.h b/include/util/ttimer.h index 10222596319f445c980e5a03b9ded91a3ca9ce4e..4111a8ca28375cbcf45f60512da06802eeb22669 100644 --- a/include/util/ttimer.h +++ b/include/util/ttimer.h @@ -31,16 +31,16 @@ extern int32_t taosTmrThreads; void *taosTmrInit(int32_t maxTmr, int32_t resoultion, int32_t longest, const char *label); +void taosTmrCleanUp(void *handle); + tmr_h taosTmrStart(TAOS_TMR_CALLBACK fp, int32_t mseconds, void *param, void *handle); bool taosTmrStop(tmr_h tmrId); -bool taosTmrStopA(tmr_h *timerId); +bool taosTmrStopA(tmr_h *tmrId); bool taosTmrReset(TAOS_TMR_CALLBACK fp, int32_t mseconds, void *param, void *handle, tmr_h *pTmrId); -void taosTmrCleanUp(void *handle); - #ifdef __cplusplus } #endif diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index 26349e257676d99d0ea81e03509c8b09c20a2248..35bea0e65ccc5070fe9d4e82adadc7132ae7cc81 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -1,32 +1,25 @@ -FROM ubuntu:18.04 - -WORKDIR /root - -ARG pkgFile -ARG dirName -ARG cpuType -RUN echo ${pkgFile} && echo ${dirName} - -COPY ${pkgFile} /root/ -RUN tar -zxf ${pkgFile} -WORKDIR /root/ -RUN cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root -RUN rm /root/${pkgFile} -RUN rm -rf /root/${dirName} - -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8 -ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \ - LC_CTYPE=en_US.UTF-8 \ - LANG=en_US.UTF-8 \ - LC_ALL=en_US.UTF-8 - -COPY ./bin/* /usr/bin/ - -ENV TINI_VERSION v0.19.0 -RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."' -ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini -RUN chmod +x /tini -ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"] -CMD ["taosd"] -VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ] +FROM ubuntu:18.04 + +WORKDIR /root + +ARG pkgFile +ARG dirName +ARG cpuType +RUN echo ${pkgFile} && echo ${dirName} + +COPY ${pkgFile} /root/ +ENV TINI_VERSION v0.19.0 +ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini +ENV DEBIAN_FRONTEND=noninteractive +WORKDIR /root/ +RUN tar -zxf ${pkgFile} && cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root && rm /root/${pkgFile} && rm -rf /root/${dirName} && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8 && apt-get clean && rm -rf /var/lib/apt/lists/ && chmod +x /tini + +ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \ + LC_CTYPE=en_US.UTF-8 \ + LANG=en_US.UTF-8 \ + LC_ALL=en_US.UTF-8 +COPY ./bin/* /usr/bin/ + +ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"] +CMD ["taosd"] +VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ] diff --git a/packaging/docker/bin/entrypoint.sh b/packaging/docker/bin/entrypoint.sh index 5fb441004d8b454de1039eb3f4b23eb51f32be64..f4be349c0de0ea0df382fc6fee033120c5c48007 100644 --- a/packaging/docker/bin/entrypoint.sh +++ b/packaging/docker/bin/entrypoint.sh @@ -11,39 +11,22 @@ DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0} unset TAOS_DISABLE_ADAPTER # to get mnodeEpSet from data dir -DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos} +DATA_DIR=$(taosd -C|grep -E 'dataDir.*(\S+)' -o |head -n1|sed 's/dataDir *//') +DATA_DIR=${DATA_DIR:-/var/lib/taos} -# append env to custom taos.cfg -CFG_DIR=/tmp/taos -CFG_FILE=$CFG_DIR/taos.cfg - -mkdir -p $CFG_DIR >/dev/null 2>&1 - -[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE -env-to-cfg >>$CFG_FILE - -FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//') +FQDN=$(taosd -C|grep -E 'fqdn.*(\S+)' -o |head -n1|sed 's/fqdn *//') # ensure the fqdn is resolved as localhost grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts - +FIRSET_EP=$(taosd -C|grep -E 'firstEp.*(\S+)' -o |head -n1|sed 's/firstEp *//') # parse first ep host and port -FIRST_EP_HOST=${TAOS_FIRST_EP%:*} -FIRST_EP_PORT=${TAOS_FIRST_EP#*:} +FIRST_EP_HOST=${FIRSET_EP%:*} +FIRST_EP_PORT=${FIRSET_EP#*:} # in case of custom server port -SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//') +SERVER_PORT=$(taosd -C|grep -E 'serverPort.*(\S+)' -o |head -n1|sed 's/serverPort *//') SERVER_PORT=${SERVER_PORT:-6030} -# for other binaries like interpreters -if echo $1 | grep -E "taosd$" - >/dev/null; then - true # will run taosd -else - cp -f $CFG_FILE /etc/taos/taos.cfg || true - $@ - exit $? -fi - set +e ulimit -c unlimited # set core files pattern, maybe failed @@ -62,22 +45,23 @@ fi # if has mnode ep set or the host is first ep or not for cluster, just start. if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] || [ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then - $@ -c $CFG_DIR + $@ # others will first wait the first ep ready. else if [ "$TAOS_FIRST_EP" = "" ]; then echo "run TDengine with single node." - $@ -c $CFG_DIR + $@ exit $? fi while true; do - es=0 - taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$? - if [ "$es" -eq 0 ]; then + es=$(taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT --check) + echo ${es} + if [ "${es%%:*}" -eq 2 ]; then + echo "execute create dnode" taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";" break fi sleep 1s done - $@ -c $CFG_DIR + $@ fi diff --git a/packaging/docker/bin/taos-check b/packaging/docker/bin/taos-check new file mode 100644 index 0000000000000000000000000000000000000000..5dc06b6018b93b627610b446ca6363773fd0fd72 --- /dev/null +++ b/packaging/docker/bin/taos-check @@ -0,0 +1,8 @@ +#!/bin/sh +es=$(taos --check) +code=${es%%:*} +if [ "$code" -ne "0" ] && [ "$code" -ne "4" ]; then + exit 0 +fi +echo $es +exit 1 diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index d9f33510088cf228215edf0f77368334edd4b956..3ff61eca25fa54692569ff2db9a53813ce6b7a36 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -119,6 +119,8 @@ typedef struct SHeartBeatInfo { struct SAppInstInfo { int64_t numOfConns; SCorEpSet mgmtEp; + TdThreadMutex qnodeMutex; + SArray* pQnodeList; SInstanceSummary summary; SList* pConnList; // STscObj linked list uint64_t clusterId; @@ -149,6 +151,7 @@ typedef struct STscObj { int32_t numOfReqs; // number of sqlObj bound to this connection SAppInstInfo* pAppInfo; SHashObj* pRequests; + int8_t schemalessType; } STscObj; typedef struct SResultColumn { @@ -160,6 +163,7 @@ typedef struct SResultColumn { } SResultColumn; typedef struct SReqResultInfo { + SQueryExecRes execRes; const char* pRspMsg; const char* pData; TAOS_FIELD* fields; // todo, column names are not needed. @@ -189,6 +193,7 @@ typedef struct SRequestSendRecvBody { typedef struct { int8_t resType; char topic[TSDB_TOPIC_FNAME_LEN]; + char db[TSDB_DB_FNAME_LEN]; int32_t vgId; SSchemaWrapper schema; int32_t resIter; @@ -217,7 +222,8 @@ typedef struct SRequestObj { void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4); void doSetOneRowPtr(SReqResultInfo* pResultInfo); void setResPrecision(SReqResultInfo* pResInfo, int32_t precision); -int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4); +int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4, + bool freeAfterUse); void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols); void doFreeReqResultInfo(SReqResultInfo* pResInfo); @@ -239,7 +245,7 @@ static FORCE_INLINE SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool conver taosMemoryFreeClear(msg->resInfo.length); taosMemoryFreeClear(msg->resInfo.convertBuf); } - setQueryResultFromRsp(&msg->resInfo, pRetrieve, convertUcs4); + setQueryResultFromRsp(&msg->resInfo, pRetrieve, convertUcs4, false); return &msg->resInfo; } return NULL; @@ -290,7 +296,7 @@ SRequestObj* launchQuery(STscObj* pTscObj, const char* sql, int sqlLen); int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtCallback* pStmtCb); -int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArray* pNodeList); +int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArray** pNodeList); int32_t buildRequest(STscObj* pTscObj, const char* sql, int sqlLen, SRequestObj** pRequest); @@ -315,8 +321,9 @@ void hbMgrInitMqHbRspHandle(); SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, int32_t code, bool keepQuery, void** res); int32_t getQueryPlan(SRequestObj* pRequest, SQuery* pQuery, SArray** pNodeList); -int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList, void** res); +int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList); int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest); +int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList); #ifdef __cplusplus } diff --git a/source/client/inc/clientStmt.h b/source/client/inc/clientStmt.h index f0c9dcd67dd8e3b05775003221ddf86681da37ab..936fb92fc4019842485e7051abf161aee8a7d858 100644 --- a/source/client/inc/clientStmt.h +++ b/source/client/inc/clientStmt.h @@ -116,8 +116,11 @@ int stmtAffectedRowsOnce(TAOS_STMT *stmt); int stmtPrepare(TAOS_STMT *stmt, const char *sql, unsigned long length); int stmtSetTbName(TAOS_STMT *stmt, const char *tbName); int stmtSetTbTags(TAOS_STMT *stmt, TAOS_MULTI_BIND *tags); +int stmtGetTagFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields); +int stmtGetColFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields); int stmtIsInsert(TAOS_STMT *stmt, int *insert); int stmtGetParamNum(TAOS_STMT *stmt, int *nums); +int stmtGetParam(TAOS_STMT *stmt, int idx, int *type, int *bytes); int stmtAddBatch(TAOS_STMT *stmt); TAOS_RES *stmtUseResult(TAOS_STMT *stmt); int stmtBindBatch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind, int32_t colIdx); diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 669b2bc97eb3e6fab04701aebbf80402432b44c1..19847d9aa2fd04bd8b3f7ff13c0d8462d3c2b9cf 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -161,6 +161,7 @@ void *createTscObj(const char *user, const char *auth, const char *db, int32_t c taosThreadMutexInit(&pObj->mutex, NULL); pObj->id = taosAddRef(clientConnRefPool, pObj); + pObj->schemalessType = 0; tscDebug("connObj created, 0x%" PRIx64, pObj->id); return pObj; @@ -234,6 +235,8 @@ static void doDestroyRequest(void *p) { taosArrayDestroy(pRequest->tableList); taosArrayDestroy(pRequest->dbList); + destroyQueryExecRes(&pRequest->body.resInfo.execRes); + deregisterRequest(pRequest); taosMemoryFreeClear(pRequest); } diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index a9c5cd06f668ba625dee6d13c44261ef2badf8bb..09c3d269c703d6e2dc78cbef49a7790c98f34245 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -120,7 +120,7 @@ static int32_t hbProcessStbInfoRsp(void *value, int32_t valueLen, struct SCatalo return TSDB_CODE_TSC_INVALID_VALUE; } - catalogUpdateSTableMeta(pCatalog, rsp); + catalogUpdateTableMeta(pCatalog, rsp); } } @@ -160,6 +160,10 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) { taos_close(pTscObj); } + if (pRsp->query->pQnodeList) { + updateQnodeList(pTscObj->pAppInfo, pRsp->query->pQnodeList); + } + releaseTscObj(pRsp->connKey.tscRid); } } diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 8819e3763ce2f1b9cb459f63327ead3d8976b496..0f0cf064d6facd3586a0dfc2bee1a4a9a7d743b3 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -118,6 +118,7 @@ TAOS* taos_connect_internal(const char* ip, const char* user, const char* pass, if (pInst == NULL) { p = taosMemoryCalloc(1, sizeof(struct SAppInstInfo)); p->mgmtEp = epSet; + taosThreadMutexInit(&p->qnodeMutex, NULL); p->pTransporter = openTransporter(user, secretEncrypt, tsNumOfCores); p->pAppHbMgr = appHbMgrInit(p, key); taosHashPut(appInfo.pInstMap, key, strlen(key), &p, POINTER_BYTES); @@ -175,6 +176,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC .pTransporter = pTscObj->pAppInfo->pTransporter, .pStmtCb = pStmtCb, .pUser = pTscObj->user, + .schemalessType = pTscObj->schemalessType, .isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER))}; cxt.mgmtEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp); @@ -202,7 +204,7 @@ int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) { SRetrieveTableRsp* pRsp = NULL; int32_t code = qExecCommand(pQuery->pRoot, &pRsp); if (TSDB_CODE_SUCCESS == code && NULL != pRsp) { - code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false); + code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, false); } return code; } @@ -228,7 +230,61 @@ int32_t execDdlQuery(SRequestObj* pRequest, SQuery* pQuery) { return TSDB_CODE_SUCCESS; } -int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArray* pNodeList) { +int compareQueryNodeLoad(const void* elem1, const void* elem2) { + SQueryNodeLoad* node1 = (SQueryNodeLoad*)elem1; + SQueryNodeLoad* node2 = (SQueryNodeLoad*)elem2; + + if (node1->load < node2->load) { + return -1; + } + + return node1->load > node2->load; +} + +int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList) { + taosThreadMutexLock(&pInfo->qnodeMutex); + if (pInfo->pQnodeList) { + taosArrayDestroy(pInfo->pQnodeList); + pInfo->pQnodeList = NULL; + } + + if (pNodeList) { + pInfo->pQnodeList = taosArrayDup(pNodeList); + taosArraySort(pInfo->pQnodeList, compareQueryNodeLoad); + } + taosThreadMutexUnlock(&pInfo->qnodeMutex); + + return TSDB_CODE_SUCCESS; +} + +int32_t getQnodeList(SRequestObj* pRequest, SArray** pNodeList) { + SAppInstInfo* pInfo = pRequest->pTscObj->pAppInfo; + int32_t code = 0; + + taosThreadMutexLock(&pInfo->qnodeMutex); + if (pInfo->pQnodeList) { + *pNodeList = taosArrayDup(pInfo->pQnodeList); + } + taosThreadMutexUnlock(&pInfo->qnodeMutex); + + if (NULL == *pNodeList) { + SEpSet mgmtEpSet = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); + SCatalog* pCatalog = NULL; + code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog); + if (TSDB_CODE_SUCCESS == code) { + *pNodeList = taosArrayInit(5, sizeof(SQueryNodeLoad)); + code = catalogGetQnodeList(pCatalog, pRequest->pTscObj->pAppInfo->pTransporter, &mgmtEpSet, *pNodeList); + } + + if (TSDB_CODE_SUCCESS == code && *pNodeList) { + code = updateQnodeList(pInfo, *pNodeList); + } + } + + return code; +} + +int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArray** pNodeList) { pRequest->type = pQuery->msgType; SPlanContext cxt = {.queryId = pRequest->requestId, .acctId = pRequest->pTscObj->acctId, @@ -237,14 +293,10 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra .showRewrite = pQuery->showRewrite, .pMsg = pRequest->msgBuf, .msgLen = ERROR_MSG_BUF_DEFAULT_SIZE}; - SEpSet mgmtEpSet = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); - SCatalog* pCatalog = NULL; - int32_t code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog); - if (TSDB_CODE_SUCCESS == code) { - code = catalogGetQnodeList(pCatalog, pRequest->pTscObj->pAppInfo->pTransporter, &mgmtEpSet, pNodeList); - } + + int32_t code = getQnodeList(pRequest, pNodeList); if (TSDB_CODE_SUCCESS == code) { - code = qCreateQueryPlan(&cxt, pPlan, pNodeList); + code = qCreateQueryPlan(&cxt, pPlan, *pNodeList); } return code; } @@ -289,28 +341,29 @@ void setResPrecision(SReqResultInfo* pResInfo, int32_t precision) { pResInfo->precision = precision; } -int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList, void** pRes) { +int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList) { void* pTransporter = pRequest->pTscObj->pAppInfo->pTransporter; - + tsem_init(&schdRspSem, 0, 0); SQueryResult res = {.code = 0, .numOfRows = 0}; int32_t code = schedulerAsyncExecJob(pTransporter, pNodeList, pDag, &pRequest->body.queryJob, pRequest->sqlstr, pRequest->metric.start, schdExecCallback, &res); + + pRequest->body.resInfo.execRes = res.res; + while (true) { if (code != TSDB_CODE_SUCCESS) { if (pRequest->body.queryJob != 0) { schedulerFreeJob(pRequest->body.queryJob); } - *pRes = res.res; - pRequest->code = code; terrno = code; return pRequest->code; } else { tsem_wait(&schdRspSem); - + if (res.code) { code = res.code; } else { @@ -327,27 +380,25 @@ int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNod } } - *pRes = res.res; - pRequest->code = res.code; terrno = res.code; return pRequest->code; } - -int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList, void** pRes) { +int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList) { void* pTransporter = pRequest->pTscObj->pAppInfo->pTransporter; SQueryResult res = {.code = 0, .numOfRows = 0}; int32_t code = schedulerExecJob(pTransporter, pNodeList, pDag, &pRequest->body.queryJob, pRequest->sqlstr, pRequest->metric.start, &res); + + pRequest->body.resInfo.execRes = res.res; + if (code != TSDB_CODE_SUCCESS) { if (pRequest->body.queryJob != 0) { schedulerFreeJob(pRequest->body.queryJob); } - *pRes = res.res; - pRequest->code = code; terrno = code; return pRequest->code; @@ -361,92 +412,118 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList } } - *pRes = res.res; - pRequest->code = res.code; terrno = res.code; return pRequest->code; } int32_t getQueryPlan(SRequestObj* pRequest, SQuery* pQuery, SArray** pNodeList) { - *pNodeList = taosArrayInit(4, sizeof(struct SQueryNodeAddr)); - return getPlan(pRequest, pQuery, &pRequest->body.pDag, *pNodeList); + return getPlan(pRequest, pQuery, &pRequest->body.pDag, pNodeList); } -int32_t validateSversion(SRequestObj* pRequest, void* res) { - SArray* pArray = NULL; +int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog, SEpSet *epset) { int32_t code = 0; - - if (TDMT_VND_SUBMIT == pRequest->type) { - SSubmitRsp* pRsp = (SSubmitRsp*)res; - if (pRsp->nBlocks <= 0) { - return TSDB_CODE_SUCCESS; + SArray* pArray = NULL; + SSubmitRsp* pRsp = (SSubmitRsp*)res; + if (pRsp->nBlocks <= 0) { + return TSDB_CODE_SUCCESS; + } + + pArray = taosArrayInit(pRsp->nBlocks, sizeof(STbSVersion)); + if (NULL == pArray) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return TSDB_CODE_OUT_OF_MEMORY; + } + + for (int32_t i = 0; i < pRsp->nBlocks; ++i) { + SSubmitBlkRsp* blk = pRsp->pBlocks + i; + if (NULL == blk->tblFName || 0 == blk->tblFName[0]) { + continue; } + + STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver}; + taosArrayPush(pArray, &tbSver); + } - pArray = taosArrayInit(pRsp->nBlocks, sizeof(STbSVersion)); - if (NULL == pArray) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return TSDB_CODE_OUT_OF_MEMORY; - } + code = catalogChkTbMetaVersion(pCatalog, pRequest->pTscObj->pAppInfo->pTransporter, epset, pArray); - for (int32_t i = 0; i < pRsp->nBlocks; ++i) { - SSubmitBlkRsp* blk = pRsp->pBlocks + i; - if (NULL == blk->tblFName || 0 == blk->tblFName[0]) { - continue; - } - - STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver}; - taosArrayPush(pArray, &tbSver); - } - } else if (TDMT_VND_QUERY == pRequest->type) { - SArray* pTbArray = (SArray*)res; - int32_t tbNum = taosArrayGetSize(pTbArray); - if (tbNum <= 0) { - return TSDB_CODE_SUCCESS; - } +_return: - pArray = taosArrayInit(tbNum, sizeof(STbSVersion)); - if (NULL == pArray) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return TSDB_CODE_OUT_OF_MEMORY; - } + taosArrayDestroy(pArray); + return code; +} - for (int32_t i = 0; i < tbNum; ++i) { - STbVerInfo* tbInfo = taosArrayGet(pTbArray, i); - STbSVersion tbSver = {.tbFName = tbInfo->tbFName, .sver = tbInfo->sversion, .tver = tbInfo->tversion}; - taosArrayPush(pArray, &tbSver); - } +int32_t handleQueryExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog, SEpSet *epset) { + int32_t code = 0; + SArray* pArray = NULL; + SArray* pTbArray = (SArray*)res; + int32_t tbNum = taosArrayGetSize(pTbArray); + if (tbNum <= 0) { + return TSDB_CODE_SUCCESS; + } + + pArray = taosArrayInit(tbNum, sizeof(STbSVersion)); + if (NULL == pArray) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return TSDB_CODE_OUT_OF_MEMORY; + } + + for (int32_t i = 0; i < tbNum; ++i) { + STbVerInfo* tbInfo = taosArrayGet(pTbArray, i); + STbSVersion tbSver = {.tbFName = tbInfo->tbFName, .sver = tbInfo->sversion, .tver = tbInfo->tversion}; + taosArrayPush(pArray, &tbSver); } - SCatalog* pCatalog = NULL; - CHECK_CODE_GOTO(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog), _return); - - SEpSet epset = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); - - code = catalogChkTbMetaVersion(pCatalog, pRequest->pTscObj->pAppInfo->pTransporter, &epset, pArray); + code = catalogChkTbMetaVersion(pCatalog, pRequest->pTscObj->pAppInfo->pTransporter, epset, pArray); _return: taosArrayDestroy(pArray); + return code; +} - return code; +int32_t handleAlterTbExecRes(void* res, SCatalog* pCatalog) { + return catalogUpdateTableMeta(pCatalog, (STableMetaRsp*)res); } -void freeRequestRes(SRequestObj* pRequest, void* res) { - if (NULL == pRequest || NULL == res) { - return; +int32_t handleExecRes(SRequestObj* pRequest) { + if (NULL == pRequest->body.resInfo.execRes.res) { + return TSDB_CODE_SUCCESS; } - - if (TDMT_VND_SUBMIT == pRequest->type) { - tFreeSSubmitRsp((SSubmitRsp*)res); - } else if (TDMT_VND_QUERY == pRequest->type) { - taosArrayDestroy((SArray*)res); + + int32_t code = 0; + SCatalog* pCatalog = NULL; + code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog); + if (code) { + return code; } + + SEpSet epset = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); + SQueryExecRes* pRes = &pRequest->body.resInfo.execRes; + + switch (pRes->msgType) { + case TDMT_VND_ALTER_TABLE: + case TDMT_MND_ALTER_STB: { + code = handleAlterTbExecRes(pRes->res, pCatalog); + break; + } + case TDMT_VND_SUBMIT: { + code = handleSubmitExecRes(pRequest, pRes->res, pCatalog, &epset); + break; + } + case TDMT_VND_QUERY: { + code = handleQueryExecRes(pRequest, pRes->res, pCatalog, &epset); + break; + } + default: + tscError("invalid exec result for request type %d", pRequest->type); + return TSDB_CODE_APP_ERROR; + } + + return code; } SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, int32_t code, bool keepQuery, void** res) { - void* pRes = NULL; - if (TSDB_CODE_SUCCESS == code) { switch (pQuery->execMode) { case QUERY_EXEC_MODE_LOCAL: @@ -456,13 +533,10 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, int32_t code code = execDdlQuery(pRequest, pQuery); break; case QUERY_EXEC_MODE_SCHEDULE: { - SArray* pNodeList = taosArrayInit(4, sizeof(struct SQueryNodeAddr)); - code = getPlan(pRequest, pQuery, &pRequest->body.pDag, pNodeList); + SArray* pNodeList = NULL; + code = getPlan(pRequest, pQuery, &pRequest->body.pDag, &pNodeList); if (TSDB_CODE_SUCCESS == code) { - code = scheduleQuery(pRequest, pRequest->body.pDag, pNodeList, &pRes); - if (NULL != pRes) { - code = validateSversion(pRequest, pRes); - } + code = scheduleQuery(pRequest, pRequest->body.pDag, pNodeList); } taosArrayDestroy(pNodeList); break; @@ -479,15 +553,15 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, int32_t code qDestroyQuery(pQuery); } + handleExecRes(pRequest); + if (NULL != pRequest && TSDB_CODE_SUCCESS != code) { pRequest->code = terrno; } if (res) { - *res = pRes; - } else { - freeRequestRes(pRequest, pRes); - pRes = NULL; + *res = pRequest->body.resInfo.execRes.res; + pRequest->body.resInfo.execRes.res = NULL; } return pRequest; @@ -552,12 +626,12 @@ int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest) { int32_t removeMeta(STscObj* pTscObj, SArray* tbList) { SCatalog* pCatalog = NULL; - int32_t tbNum = taosArrayGetSize(tbList); - int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); + int32_t tbNum = taosArrayGetSize(tbList); + int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); if (code != TSDB_CODE_SUCCESS) { return code; } - + for (int32_t i = 0; i < tbNum; ++i) { SName* pTbName = taosArrayGet(tbList, i); catalogRemoveTableMeta(pCatalog, pTbName); @@ -566,7 +640,6 @@ int32_t removeMeta(STscObj* pTscObj, SArray* tbList) { return TSDB_CODE_SUCCESS; } - SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) { SRequestObj* pRequest = NULL; int32_t retryNum = 0; @@ -589,7 +662,7 @@ SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) { if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) { removeMeta(pTscObj, pRequest->tableList); } - + return pRequest; } @@ -730,28 +803,55 @@ static void destroySendMsgInfo(SMsgSendInfo* pMsgBody) { taosMemoryFreeClear(pMsgBody); } -bool persistConnForSpecificMsg(void* parenct, tmsg_t msgType) { - return msgType == TDMT_VND_QUERY_RSP || msgType == TDMT_VND_FETCH_RSP || msgType == TDMT_VND_RES_READY_RSP || - msgType == TDMT_VND_QUERY_HEARTBEAT_RSP; +void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg, SEpSet* pEpSet) { + if (NULL == pEpSet) { + return; + } + + switch (pSendInfo->target.type) { + case TARGET_TYPE_MNODE: + if (NULL == pTscObj) { + tscError("mnode epset changed but not able to update it, reqObjRefId:%" PRIx64, pSendInfo->requestObjRefId); + return; + } + + updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet); + break; + case TARGET_TYPE_VNODE: { + if (NULL == pTscObj) { + tscError("vnode epset changed but not able to update it, reqObjRefId:%" PRIx64, pSendInfo->requestObjRefId); + return; + } + + SCatalog* pCatalog = NULL; + int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); + if (code != TSDB_CODE_SUCCESS) { + tscError("fail to get catalog handle, clusterId:%" PRIx64 ", error %s", pTscObj->pAppInfo->clusterId, + tstrerror(code)); + return; + } + + catalogUpdateVgEpSet(pCatalog, pSendInfo->target.dbFName, pSendInfo->target.vgId, pEpSet); + break; + } + default: + tscDebug("epset changed, not updated, msgType %s", TMSG_INFO(pMsg->msgType)); + break; + } } void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle; assert(pMsg->info.ahandle != NULL); + SRequestObj* pRequest = NULL; + STscObj* pTscObj = NULL; if (pSendInfo->requestObjRefId != 0) { SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId); assert(pRequest->self == pSendInfo->requestObjRefId); pRequest->metric.rsp = taosGetTimestampUs(); - - //STscObj* pTscObj = pRequest->pTscObj; - //if (pEpSet) { - // if (!isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, pEpSet)) { - // updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet); - // } - //} - + pTscObj = pRequest->pTscObj; /* * There is not response callback function for submit response. * The actual inserted number of points is the first number. @@ -768,6 +868,8 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId); } + updateTargetEpSet(pSendInfo, pTscObj, pMsg, pEpSet); + SDataBuf buf = {.len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle}; if (pMsg->contLen > 0) { @@ -821,7 +923,7 @@ void doSetOneRowPtr(SReqResultInfo* pResultInfo) { int32_t bytes = pResultInfo->fields[i].bytes; if (IS_VAR_DATA_TYPE(type)) { - if (pCol->offset[pResultInfo->current] != -1) { + if (!IS_VAR_NULL_TYPE(type, bytes) && pCol->offset[pResultInfo->current] != -1) { char* pStart = pResultInfo->pCol[i].offset[pResultInfo->current] + pResultInfo->pCol[i].pData; pResultInfo->length[i] = varDataLen(pStart); @@ -869,7 +971,8 @@ void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertU return NULL; } - pRequest->code = setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4); + pRequest->code = + setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4, true); if (pRequest->code != TSDB_CODE_SUCCESS) { pResultInfo->numOfRows = 0; return NULL; @@ -891,9 +994,8 @@ void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertU return pResultInfo->row; } - void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) { - //return doAsyncFetchRows(pRequest, setupOneRowPtr, convertUcs4); + // return doAsyncFetchRows(pRequest, setupOneRowPtr, convertUcs4); assert(pRequest != NULL); SReqResultInfo* pResultInfo = &pRequest->body.resInfo; @@ -911,7 +1013,8 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) return NULL; } - pRequest->code = setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4); + pRequest->code = + setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4, true); if (pRequest->code != TSDB_CODE_SUCCESS) { pResultInfo->numOfRows = 0; return NULL; @@ -955,27 +1058,20 @@ static char* parseTagDatatoJson(void* p) { goto end; } - int16_t nCols = kvRowNCols(p); + SArray* pTagVals = NULL; + if (tTagToValArray((const STag*)p, &pTagVals) != 0) { + goto end; + } + + int16_t nCols = taosArrayGetSize(pTagVals); char tagJsonKey[256] = {0}; for (int j = 0; j < nCols; ++j) { - SColIdx* pColIdx = kvRowColIdxAt(p, j); - char* val = (char*)(kvRowColVal(p, pColIdx)); - if (j == 0) { - if (*val == TSDB_DATA_TYPE_NULL) { - string = taosMemoryCalloc(1, 8); - sprintf(string, "%s", TSDB_DATA_NULL_STR_L); - goto end; - } - continue; - } - + STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, j); // json key encode by binary memset(tagJsonKey, 0, sizeof(tagJsonKey)); - memcpy(tagJsonKey, varDataVal(val), varDataLen(val)); + memcpy(tagJsonKey, pTagVal->pKey, strlen(pTagVal->pKey)); // json value - val += varDataTLen(val); - char* realData = POINTER_SHIFT(val, CHAR_BYTES); - char type = *val; + char type = pTagVal->type; if (type == TSDB_DATA_TYPE_NULL) { cJSON* value = cJSON_CreateNull(); if (value == NULL) { @@ -984,11 +1080,12 @@ static char* parseTagDatatoJson(void* p) { cJSON_AddItemToObject(json, tagJsonKey, value); } else if (type == TSDB_DATA_TYPE_NCHAR) { cJSON* value = NULL; - if (varDataLen(realData) > 0) { - char* tagJsonValue = taosMemoryCalloc(varDataLen(realData), 1); - int32_t length = taosUcs4ToMbs((TdUcs4*)varDataVal(realData), varDataLen(realData), tagJsonValue); + if (pTagVal->nData > 0) { + char* tagJsonValue = taosMemoryCalloc(pTagVal->nData, 1); + int32_t length = taosUcs4ToMbs((TdUcs4*)pTagVal->pData, pTagVal->nData, tagJsonValue); if (length < 0) { - tscError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, val); + tscError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, + pTagVal->pData); taosMemoryFree(tagJsonValue); goto end; } @@ -997,7 +1094,7 @@ static char* parseTagDatatoJson(void* p) { if (value == NULL) { goto end; } - } else if (varDataLen(realData) == 0) { + } else if (pTagVal->nData == 0) { value = cJSON_CreateString(""); } else { ASSERT(0); @@ -1005,22 +1102,14 @@ static char* parseTagDatatoJson(void* p) { cJSON_AddItemToObject(json, tagJsonKey, value); } else if (type == TSDB_DATA_TYPE_DOUBLE) { - double jsonVd = *(double*)(realData); + double jsonVd = *(double*)(&pTagVal->i64); cJSON* value = cJSON_CreateNumber(jsonVd); if (value == NULL) { goto end; } cJSON_AddItemToObject(json, tagJsonKey, value); - // }else if(type == TSDB_DATA_TYPE_BIGINT){ - // int64_t jsonVd = *(int64_t*)(realData); - // cJSON* value = cJSON_CreateNumber((double)jsonVd); - // if (value == NULL) - // { - // goto end; - // } - // cJSON_AddItemToObject(json, tagJsonKey, value); } else if (type == TSDB_DATA_TYPE_BOOL) { - char jsonVd = *(char*)(realData); + char jsonVd = *(char*)(&pTagVal->i64); cJSON* value = cJSON_CreateBool(jsonVd); if (value == NULL) { goto end; @@ -1085,7 +1174,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int if (jsonInnerType == TSDB_DATA_TYPE_NULL) { sprintf(varDataVal(dst), "%s", TSDB_DATA_NULL_STR_L); varDataSetLen(dst, strlen(varDataVal(dst))); - } else if (jsonInnerType == TSDB_DATA_TYPE_JSON) { + } else if (jsonInnerType == TD_TAG_JSON) { char* jsonString = parseTagDatatoJson(jsonInnerData); STR_TO_VARSTR(dst, jsonString); taosMemoryFree(jsonString); @@ -1104,10 +1193,6 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int double jsonVd = *(double*)(jsonInnerData); sprintf(varDataVal(dst), "%.9lf", jsonVd); varDataSetLen(dst, strlen(varDataVal(dst))); - } else if (jsonInnerType == TSDB_DATA_TYPE_BIGINT) { - int64_t jsonVd = *(int64_t*)(jsonInnerData); - sprintf(varDataVal(dst), "%" PRId64, jsonVd); - varDataSetLen(dst, strlen(varDataVal(dst))); } else if (jsonInnerType == TSDB_DATA_TYPE_BOOL) { sprintf(varDataVal(dst), "%s", (*((char*)jsonInnerData) == 1) ? "true" : "false"); varDataSetLen(dst, strlen(varDataVal(dst))); @@ -1218,9 +1303,12 @@ void resetConnectDB(STscObj* pTscObj) { taosThreadMutexUnlock(&pTscObj->mutex); } -int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4) { +int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4, + bool freeAfterUse) { assert(pResultInfo != NULL && pRsp != NULL); + if (freeAfterUse) taosMemoryFreeClear(pResultInfo->pRspMsg); + pResultInfo->pRspMsg = (const char*)pRsp; pResultInfo->pData = (void*)pRsp->data; pResultInfo->numOfRows = htonl(pRsp->numOfRows); diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 53eb443b36b05393b22667a6f623892008f14ebb..e144885e9efc4b3eca7c806996b77ad416d70161 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -666,8 +666,39 @@ int taos_stmt_set_tbname(TAOS_STMT *stmt, const char *name) { return stmtSetTbName(stmt, name); } +int taos_stmt_set_tags(TAOS_STMT *stmt, TAOS_MULTI_BIND *tags) { + if (stmt == NULL || tags == NULL) { + tscError("NULL parameter for %s", __FUNCTION__); + terrno = TSDB_CODE_INVALID_PARA; + return terrno; + } + + return stmtSetTbTags(stmt, tags); +} + + int taos_stmt_set_sub_tbname(TAOS_STMT *stmt, const char *name) { return taos_stmt_set_tbname(stmt, name); } +int taos_stmt_get_tag_fields(TAOS_STMT *stmt, int* fieldNum, TAOS_FIELD_E** fields) { + if (stmt == NULL || NULL == fieldNum) { + tscError("NULL parameter for %s", __FUNCTION__); + terrno = TSDB_CODE_INVALID_PARA; + return terrno; + } + + return stmtGetTagFields(stmt, fieldNum, fields); +} + +int taos_stmt_get_col_fields(TAOS_STMT *stmt, int* fieldNum, TAOS_FIELD_E** fields) { + if (stmt == NULL || NULL == fieldNum) { + tscError("NULL parameter for %s", __FUNCTION__); + terrno = TSDB_CODE_INVALID_PARA; + return terrno; + } + + return stmtGetColFields(stmt, fieldNum, fields); +} + int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) { if (stmt == NULL || bind == NULL) { tscError("NULL parameter for %s", __FUNCTION__); @@ -772,6 +803,16 @@ int taos_stmt_num_params(TAOS_STMT *stmt, int *nums) { return stmtGetParamNum(stmt, nums); } +int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) { + if (stmt == NULL || type == NULL || NULL == bytes || idx < 0) { + tscError("invalid parameter for %s", __FUNCTION__); + terrno = TSDB_CODE_INVALID_PARA; + return terrno; + } + + return stmtGetParam(stmt, idx, type, bytes); +} + TAOS_RES *taos_stmt_use_result(TAOS_STMT *stmt) { if (stmt == NULL) { tscError("NULL parameter for %s", __FUNCTION__); diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index dfce01dd6356f19da8dce1b8de9c2eb9e9ca42e4..9de3ee1d0f1a8a529c7177329543e3379cdc6cbb 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -58,7 +58,12 @@ int32_t processConnectRsp(void* param, const SDataBuf* pMsg, int32_t code) { return code; } - if (connectRsp.dnodeNum > 1 && !isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, &connectRsp.epSet)) { + if (connectRsp.dnodeNum == 1) { + SEpSet srcEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp); + SEpSet dstEpSet = connectRsp.epSet; + rpcSetDefaultAddr(pTscObj->pAppInfo->pTransporter, srcEpSet.eps[srcEpSet.inUse].fqdn, + dstEpSet.eps[dstEpSet.inUse].fqdn); + } else if (connectRsp.dnodeNum > 1 && !isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, &connectRsp.epSet)) { updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, &connectRsp.epSet); } @@ -126,9 +131,10 @@ int32_t processUseDbRsp(void* param, const SDataBuf* pMsg, int32_t code) { if (usedbRsp.vgVersion >= 0) { uint64_t clusterId = pRequest->pTscObj->pAppInfo->clusterId; - int32_t code1 = catalogGetHandle(clusterId, &pCatalog); + int32_t code1 = catalogGetHandle(clusterId, &pCatalog); if (code1 != TSDB_CODE_SUCCESS) { - tscWarn("0x%" PRIx64 "catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->requestId, clusterId, tstrerror(code1)); + tscWarn("0x%" PRIx64 "catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->requestId, clusterId, + tstrerror(code1)); } else { catalogRemoveDB(pCatalog, usedbRsp.db, usedbRsp.uid); } @@ -158,7 +164,7 @@ int32_t processUseDbRsp(void* param, const SDataBuf* pMsg, int32_t code) { if (output.dbVgroup) taosHashCleanup(output.dbVgroup->vgHash); taosMemoryFreeClear(output.dbVgroup); - tscError("0x%" PRIx64" failed to build use db output since %s", pRequest->requestId, terrstr()); + tscError("0x%" PRIx64 " failed to build use db output since %s", pRequest->requestId, terrstr()); } else if (output.dbVgroup) { struct SCatalog* pCatalog = NULL; @@ -217,10 +223,33 @@ int32_t processDropDbRsp(void* param, const SDataBuf* pMsg, int32_t code) { return code; } +int32_t processAlterStbRsp(void* param, const SDataBuf* pMsg, int32_t code) { + SRequestObj* pRequest = param; + if (code != TSDB_CODE_SUCCESS) { + setErrno(pRequest, code); + tsem_post(&pRequest->body.rspSem); + return code; + } + + SMAlterStbRsp alterRsp = {0}; + SDecoder coder = {0}; + tDecoderInit(&coder, pMsg->pData, pMsg->len); + tDecodeSMAlterStbRsp(&coder, &alterRsp); + tDecoderClear(&coder); + + pRequest->body.resInfo.execRes.msgType = TDMT_MND_ALTER_STB; + pRequest->body.resInfo.execRes.res = alterRsp.pMeta; + + tsem_post(&pRequest->body.rspSem); + return code; +} + + void initMsgHandleFp() { handleRequestRspFp[TMSG_INDEX(TDMT_MND_CONNECT)] = processConnectRsp; handleRequestRspFp[TMSG_INDEX(TDMT_MND_CREATE_DB)] = processCreateDbRsp; handleRequestRspFp[TMSG_INDEX(TDMT_MND_USE_DB)] = processUseDbRsp; handleRequestRspFp[TMSG_INDEX(TDMT_MND_CREATE_STB)] = processCreateTableRsp; handleRequestRspFp[TMSG_INDEX(TDMT_MND_DROP_DB)] = processDropDbRsp; + handleRequestRspFp[TMSG_INDEX(TDMT_MND_ALTER_STB)] = processAlterStbRsp; } diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 7d623072d664a4b9f1d77251812032f8c4fa4de1..bcdba696925a0cef4838fa24a563852385044c22 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1124,7 +1124,7 @@ static int32_t smlParseTelnetString(SSmlHandle *info, const char* sql, SSmlTable } static int32_t smlParseCols(const char* data, int32_t len, SArray *cols, char *childTableName, bool isTag, SHashObj *dumplicateKey, SSmlMsgBuf *msg){ - if(isTag && len == 0){ + if(len == 0){ return TSDB_CODE_SUCCESS; } @@ -2318,6 +2318,28 @@ cleanup: return code; } +static int32_t isSchemalessDb(SSmlHandle* info){ + SName name; + tNameSetDbName(&name, info->taos->acctId, info->taos->db, strlen(info->taos->db)); + char dbFname[TSDB_DB_FNAME_LEN] = {0}; + tNameGetFullDbName(&name, dbFname); + SDbCfgInfo pInfo = {0}; + SEpSet ep = getEpSet_s(&info->taos->pAppInfo->mgmtEp); + + int32_t code = catalogGetDBCfg(info->pCatalog, info->taos->pAppInfo->pTransporter, &ep, dbFname, &pInfo); + if (code != TSDB_CODE_SUCCESS) { + info->pRequest->code = code; + smlBuildInvalidDataMsg(&info->msgBuf, "catalogGetDBCfg error, code:", tstrerror(code)); + return code; + } + if (!pInfo.schemaless){ + info->pRequest->code = TSDB_CODE_SML_INVALID_DB_CONF; + smlBuildInvalidDataMsg(&info->msgBuf, "can not insert into schemaless db:", dbFname); + return TSDB_CODE_SML_INVALID_DB_CONF; + } + return TSDB_CODE_SUCCESS; +} + /** * taos_schemaless_insert() parse and insert data points into database according to * different protocol. @@ -2351,6 +2373,19 @@ TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int pr return (TAOS_RES*)request; } + info->taos->schemalessType = 1; + if(request->pDb == NULL){ + request->code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; + smlBuildInvalidDataMsg(&info->msgBuf, "Database not specified", NULL); + goto end; + } + + if(isSchemalessDb(info) != TSDB_CODE_SUCCESS){ + request->code = TSDB_CODE_SML_INVALID_DB_CONF; + smlBuildInvalidDataMsg(&info->msgBuf, "Cannot write data to a non schemaless database", NULL); + goto end; + } + if (!lines) { request->code = TSDB_CODE_SML_INVALID_DATA; smlBuildInvalidDataMsg(&info->msgBuf, "lines is null", NULL); @@ -2372,6 +2407,7 @@ TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int pr info->pRequest->code = smlProcess(info, lines, numLines); end: + uDebug("result:%s", info->msgBuf.buf); smlDestroyInfo(info); return (TAOS_RES*)request; } diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 01d785ef73107778c818437c18d98c778d1f8893..3adb3684da1164363a1ffda4c26130643efc5f78 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -17,7 +17,7 @@ int32_t stmtSwitchStatus(STscStmt* pStmt, STMT_STATUS newStatus) { } break; case STMT_SETTAGS: - if (STMT_STATUS_NE(SETTBNAME)) { + if (STMT_STATUS_NE(SETTBNAME) && STMT_STATUS_NE(FETCH_FIELDS)) { code = TSDB_CODE_TSC_STMT_API_ERROR; } break; @@ -540,6 +540,8 @@ int stmtSetTbName(TAOS_STMT* stmt, const char* tbName) { if (pStmt->bInfo.needParse) { strncpy(pStmt->bInfo.tbName, tbName, sizeof(pStmt->bInfo.tbName) - 1); pStmt->bInfo.tbName[sizeof(pStmt->bInfo.tbName) - 1] = 0; + + STMT_ERR_RET(stmtParseSql(pStmt)); } return TSDB_CODE_SUCCESS; @@ -550,10 +552,6 @@ int stmtSetTbTags(TAOS_STMT* stmt, TAOS_MULTI_BIND* tags) { STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_SETTAGS)); - if (pStmt->bInfo.needParse) { - STMT_ERR_RET(stmtParseSql(pStmt)); - } - if (pStmt->bInfo.inExecCache) { return TSDB_CODE_SUCCESS; } @@ -571,7 +569,7 @@ int stmtSetTbTags(TAOS_STMT* stmt, TAOS_MULTI_BIND* tags) { return TSDB_CODE_SUCCESS; } -int32_t stmtFetchTagFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD** fields) { +int stmtFetchTagFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD_E** fields) { if (STMT_TYPE_QUERY == pStmt->sql.type) { tscError("invalid operation to get query tag fileds"); STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR); @@ -589,7 +587,7 @@ int32_t stmtFetchTagFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD** fiel return TSDB_CODE_SUCCESS; } -int32_t stmtFetchColFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD** fields) { +int stmtFetchColFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD_E** fields) { if (STMT_TYPE_QUERY == pStmt->sql.type) { tscError("invalid operation to get query column fileds"); STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR); @@ -852,6 +850,71 @@ int stmtIsInsert(TAOS_STMT* stmt, int* insert) { return TSDB_CODE_SUCCESS; } +int stmtGetTagFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) { + STscStmt* pStmt = (STscStmt*)stmt; + + if (STMT_TYPE_QUERY == pStmt->sql.type) { + STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR); + } + + STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); + + if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 && + STMT_TYPE_MULTI_INSERT != pStmt->sql.type) { + pStmt->bInfo.needParse = false; + } + + if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) { + taos_free_result(pStmt->exec.pRequest); + pStmt->exec.pRequest = NULL; + } + + if (NULL == pStmt->exec.pRequest) { + STMT_ERR_RET(buildRequest(pStmt->taos, pStmt->sql.sqlStr, pStmt->sql.sqlLen, &pStmt->exec.pRequest)); + } + + if (pStmt->bInfo.needParse) { + STMT_ERR_RET(stmtParseSql(pStmt)); + } + + STMT_ERR_RET(stmtFetchTagFields(stmt, nums, fields)); + + return TSDB_CODE_SUCCESS; +} + +int stmtGetColFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) { + STscStmt* pStmt = (STscStmt*)stmt; + + if (STMT_TYPE_QUERY == pStmt->sql.type) { + STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR); + } + + STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); + + if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 && + STMT_TYPE_MULTI_INSERT != pStmt->sql.type) { + pStmt->bInfo.needParse = false; + } + + if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) { + taos_free_result(pStmt->exec.pRequest); + pStmt->exec.pRequest = NULL; + } + + if (NULL == pStmt->exec.pRequest) { + STMT_ERR_RET(buildRequest(pStmt->taos, pStmt->sql.sqlStr, pStmt->sql.sqlLen, &pStmt->exec.pRequest)); + } + + if (pStmt->bInfo.needParse) { + STMT_ERR_RET(stmtParseSql(pStmt)); + } + + STMT_ERR_RET(stmtFetchColFields(stmt, nums, fields)); + + return TSDB_CODE_SUCCESS; +} + + int stmtGetParamNum(TAOS_STMT* stmt, int* nums) { STscStmt* pStmt = (STscStmt*)stmt; @@ -884,6 +947,50 @@ int stmtGetParamNum(TAOS_STMT* stmt, int* nums) { return TSDB_CODE_SUCCESS; } +int stmtGetParam(TAOS_STMT *stmt, int idx, int *type, int *bytes) { + STscStmt* pStmt = (STscStmt*)stmt; + + if (STMT_TYPE_QUERY == pStmt->sql.type) { + STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR); + } + + STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); + + if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 && + STMT_TYPE_MULTI_INSERT != pStmt->sql.type) { + pStmt->bInfo.needParse = false; + } + + if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) { + taos_free_result(pStmt->exec.pRequest); + pStmt->exec.pRequest = NULL; + } + + if (NULL == pStmt->exec.pRequest) { + STMT_ERR_RET(buildRequest(pStmt->taos, pStmt->sql.sqlStr, pStmt->sql.sqlLen, &pStmt->exec.pRequest)); + } + + if (pStmt->bInfo.needParse) { + STMT_ERR_RET(stmtParseSql(pStmt)); + } + + int32_t nums = 0; + TAOS_FIELD_E *pField = NULL; + STMT_ERR_RET(stmtFetchColFields(stmt, &nums, &pField)); + if (idx >= nums) { + tscError("idx %d is too big", idx); + taosMemoryFree(pField); + STMT_ERR_RET(TSDB_CODE_INVALID_PARA); + } + + *type = pField[idx].type; + *bytes = pField[idx].bytes; + + taosMemoryFree(pField); + + return TSDB_CODE_SUCCESS; +} + TAOS_RES* stmtUseResult(TAOS_STMT* stmt) { STscStmt* pStmt = (STscStmt*)stmt; diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c index dfa56f80c457783eb58255f3a0d494936b475bad..c2170631c2c90ca1d7322a4210f7763c6a703c57 100644 --- a/source/client/src/tmq.c +++ b/source/client/src/tmq.c @@ -143,6 +143,7 @@ typedef struct { typedef struct { // subscribe info char* topicName; + char db[TSDB_DB_FNAME_LEN]; SArray* vgs; // SArray @@ -1039,6 +1040,7 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) { topic.schema = pTopicEp->schema; taosHashClear(pHash); topic.topicName = strdup(pTopicEp->topic); + tstrncpy(topic.db, pTopicEp->db, TSDB_DB_FNAME_LEN); tscDebug("consumer %ld update topic: %s", tmq->consumerId, topic.topicName); int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics); @@ -1243,7 +1245,7 @@ tmq_resp_err_t tmq_seek(tmq_t* tmq, const tmq_topic_vgroup_t* offset) { return TMQ_RESP_ERR__FAIL; } -SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t waitTime, SMqClientTopic* pTopic, SMqClientVg* pVg) { +SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic* pTopic, SMqClientVg* pVg) { int64_t reqOffset; if (pVg->currentOffset >= 0) { reqOffset = pVg->currentOffset; @@ -1269,7 +1271,7 @@ SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t waitTime, SMqClientTopic* strcpy(pReq->subKey + tlen + 1, pTopic->topicName); pReq->withTbName = tmq->withTbName; - pReq->waitTime = waitTime; + pReq->timeout = timeout; pReq->consumerId = tmq->consumerId; pReq->epoch = tmq->epoch; pReq->currentOffset = reqOffset; @@ -1283,7 +1285,8 @@ SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t waitTime, SMqClientTopic* SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper) { SMqRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqRspObj)); pRspObj->resType = RES_TYPE__TMQ; - strncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN); + tstrncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN); + tstrncpy(pRspObj->db, pWrapper->topicHandle->db, TSDB_DB_FNAME_LEN); pRspObj->vgId = pWrapper->vgHandle->vgId; pRspObj->resIter = -1; memcpy(&pRspObj->rsp, &pWrapper->msg, sizeof(SMqDataBlkRsp)); @@ -1297,7 +1300,7 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper) { return pRspObj; } -int32_t tmqPollImpl(tmq_t* tmq, int64_t waitTime) { +int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { /*printf("call poll\n");*/ for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) { SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); @@ -1318,7 +1321,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t waitTime) { #endif } atomic_store_32(&pVg->vgSkipCnt, 0); - SMqPollReq* pReq = tmqBuildConsumeReqImpl(tmq, waitTime, pTopic, pVg); + SMqPollReq* pReq = tmqBuildConsumeReqImpl(tmq, timeout, pTopic, pVg); if (pReq == NULL) { atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE); tsem_post(&tmq->rspSem); @@ -1388,7 +1391,7 @@ int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* pReset) return 0; } -SMqRspObj* tmqHandleAllRsp(tmq_t* tmq, int64_t waitTime, bool pollIfReset) { +SMqRspObj* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { while (1) { SMqRspWrapper* rspWrapper = NULL; taosGetQitem(tmq->qall, (void**)&rspWrapper); @@ -1428,17 +1431,17 @@ SMqRspObj* tmqHandleAllRsp(tmq_t* tmq, int64_t waitTime, bool pollIfReset) { taosFreeQitem(rspWrapper); if (pollIfReset && reset) { tscDebug("consumer %ld reset and repoll", tmq->consumerId); - tmqPollImpl(tmq, waitTime); + tmqPollImpl(tmq, timeout); } } } } -TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t wait_time) { +TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { SMqRspObj* rspObj; int64_t startTime = taosGetTimestampMs(); - rspObj = tmqHandleAllRsp(tmq, wait_time, false); + rspObj = tmqHandleAllRsp(tmq, timeout, false); if (rspObj) { return (TAOS_RES*)rspObj; } @@ -1450,16 +1453,16 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t wait_time) { while (1) { tmqHandleAllDelayedTask(tmq); - if (tmqPollImpl(tmq, wait_time) < 0) return NULL; + if (tmqPollImpl(tmq, timeout) < 0) return NULL; - rspObj = tmqHandleAllRsp(tmq, wait_time, false); + rspObj = tmqHandleAllRsp(tmq, timeout, false); if (rspObj) { return (TAOS_RES*)rspObj; } - if (wait_time != 0) { + if (timeout != 0) { int64_t endTime = taosGetTimestampMs(); int64_t leftTime = endTime - startTime; - if (leftTime > wait_time) { + if (leftTime > timeout) { tscDebug("consumer %ld (epoch %d) timeout, no rsp", tmq->consumerId, tmq->epoch); return NULL; } @@ -1474,10 +1477,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t wait_time) { tmq_resp_err_t tmq_consumer_close(tmq_t* tmq) { if (tmq->status == TMQ_CONSUMER_STATUS__READY) { tmq_resp_err_t rsp = tmq_commit_sync(tmq, NULL); - if (rsp == TMQ_RESP_ERR__SUCCESS) { - // TODO: free resources - return TMQ_RESP_ERR__SUCCESS; - } else { + if (rsp == TMQ_RESP_ERR__FAIL) { return TMQ_RESP_ERR__FAIL; } @@ -1485,10 +1485,7 @@ tmq_resp_err_t tmq_consumer_close(tmq_t* tmq) { rsp = tmq_subscribe(tmq, lst); tmq_list_destroy(lst); - if (rsp == TMQ_RESP_ERR__SUCCESS) { - // TODO: free resources - return TMQ_RESP_ERR__SUCCESS; - } else { + if (rsp == TMQ_RESP_ERR__FAIL) { return TMQ_RESP_ERR__FAIL; } } @@ -1512,6 +1509,15 @@ const char* tmq_get_topic_name(TAOS_RES* res) { } } +const char* tmq_get_db_name(TAOS_RES* res) { + if (TD_RES_TMQ(res)) { + SMqRspObj* pRspObj = (SMqRspObj*)res; + return strchr(pRspObj->db, '.') + 1; + } else { + return NULL; + } +} + int32_t tmq_get_vgroup_id(TAOS_RES* res) { if (TD_RES_TMQ(res)) { SMqRspObj* pRspObj = (SMqRspObj*)res; diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index b5b6ea65e0bab73e3d7801fc81fbcd29c013cc25..914e5aefc2e16595e3c8831f4255bdb26c4738a9 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -567,6 +567,7 @@ TEST(testCase, insert_test) { taos_free_result(pRes); taos_close(pConn); } +#endif TEST(testCase, projection_query_tables) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); @@ -625,23 +626,23 @@ TEST(testCase, projection_query_tables) { printf("start to insert next table\n"); - for(int32_t i = 0; i < 1000000; i += 20) { - char sql[1024] = {0}; - sprintf(sql, - "insert into tu2 values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" - "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" - "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" - "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)", - i, i, i + 1, i + 1, i + 2, i + 2, i + 3, i + 3, i + 4, i + 4, i + 5, i + 5, i + 6, i + 6, i + 7, i + 7, - i + 8, i + 8, i + 9, i + 9, i + 10, i + 10, i + 11, i + 11, i + 12, i + 12, i + 13, i + 13, i + 14, i + 14, - i + 15, i + 15, i + 16, i + 16, i + 17, i + 17, i + 18, i + 18, i + 19, i + 19); - TAOS_RES* p = taos_query(pConn, sql); - if (taos_errno(p) != 0) { - printf("failed to insert data, reason:%s\n", taos_errstr(p)); - } - - taos_free_result(p); - } +// for(int32_t i = 0; i < 1000000; i += 20) { +// char sql[1024] = {0}; +// sprintf(sql, +// "insert into tu2 values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" +// "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" +// "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" +// "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)", +// i, i, i + 1, i + 1, i + 2, i + 2, i + 3, i + 3, i + 4, i + 4, i + 5, i + 5, i + 6, i + 6, i + 7, i + 7, +// i + 8, i + 8, i + 9, i + 9, i + 10, i + 10, i + 11, i + 11, i + 12, i + 12, i + 13, i + 13, i + 14, i + 14, +// i + 15, i + 15, i + 16, i + 16, i + 17, i + 17, i + 18, i + 18, i + 19, i + 19); +// TAOS_RES* p = taos_query(pConn, sql); +// if (taos_errno(p) != 0) { +// printf("failed to insert data, reason:%s\n", taos_errstr(p)); +// } +// +// taos_free_result(p); +// } // pRes = taos_query(pConn, "select * from tu"); // if (taos_errno(pRes) != 0) { @@ -663,7 +664,7 @@ TEST(testCase, projection_query_tables) { // taos_free_result(pRes); taos_close(pConn); } - +#if 0 TEST(testCase, projection_query_stables) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -692,8 +693,6 @@ TEST(testCase, projection_query_stables) { taos_close(pConn); } -#endif - TEST(testCase, agg_query_tables) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -734,5 +733,6 @@ TEST(testCase, agg_query_tables) { taos_free_result(pRes); taos_close(pConn); } +#endif #pragma GCC diagnostic pop diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp index 217699e36071e1e4c5e93e391e77a95c4f857af8..6dc8088cd1ab9470fefe35666fa186b5acd5f3f6 100644 --- a/source/client/test/smlTest.cpp +++ b/source/client/test/smlTest.cpp @@ -1258,4 +1258,48 @@ TEST(testCase, sml_TD15742_Test) { destroyRequest(request); smlDestroyInfo(info); -} \ No newline at end of file +} + +TEST(testCase, sml_params_Test) { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_NE(taos, nullptr); + + TAOS_RES* pRes = taos_query(taos, "create database if not exists param"); + taos_free_result(pRes); + + const char *sql[] = { + "test_ms,t0=t c0=f 1626006833641", + }; + TAOS_RES* res = taos_schemaless_insert(taos, (char**)sql, 1, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); + ASSERT_EQ(taos_errno(res), TSDB_CODE_PAR_DB_NOT_SPECIFIED); + taos_free_result(pRes); + + pRes = taos_query(taos, "use param"); + taos_free_result(pRes); + + res = taos_schemaless_insert(taos, (char**)sql, 1, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); + ASSERT_EQ(taos_errno(res), TSDB_CODE_SML_INVALID_DB_CONF); + taos_free_result(pRes); +} + +TEST(testCase, sml_oom_Test) { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_NE(taos, nullptr); + + TAOS_RES* pRes = taos_query(taos, "create database if not exists oom schemaless 1"); + taos_free_result(pRes); + + const char *sql[] = { + //"test_ms,t0=t c0=f 1626006833641", + "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pgxbrbga\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"gviggpmi\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", + "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"cexkarjn\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"rzwwuoxu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", + "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"xphrlkey\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"llsawebj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", + "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jwpkipff\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"euzzhcvu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jumhnsvw\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"fnetgdhj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"vrmmpgqe\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lnpfjapr\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"gvbhmsfr\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"kydxrxwc\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pfyarryq\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"uxptotap\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"prolhudh\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ttxaxnac\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"dfgvmjmz\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bloextkn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"dvjxwzsi\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"aigjomaf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"refbidtf\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vuanlfpz\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nbpajxkx\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ktzzauxh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"prcwdjct\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vmbhvjtp\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"liuddtuz\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"pddsktow\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"algldlvl\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mlmnjgdl\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"oiynpcog\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"wmynbagb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"asvyulrm\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ohaacrkp\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ytyejhiq\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bbznuerb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"lpebcibw\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xmqrbafv\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"lnmwpdne\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"jpcsjqun\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"mmxqmavz\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hhsbgaow\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"uwogyuud\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ytxpaxnk\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"wouwdvtt\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"iitwikkh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"lgyzuyaq\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bdtiigxi\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"qpnsvdhw\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"pjxihgvu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ksxkfetn\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ocukufqs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"qzerxmpe\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"qwcfdyxs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jldrpmmd\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lucxlfzc\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"rcewrvya\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"dknvaphs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nxtxgzdr\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mbvuugwz\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"uikakffu\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mwmtqsma\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bfcxrrpa\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ksajygdj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"vmhhszyv\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"urwjgvut\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jrvytcxy\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"evqkzygh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"zitdznhg\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tpqekrxa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"yrrbgjtk\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bnphiuyq\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"huknehjn\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"iudbxfke\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"fjmolwbn\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"gukzgcjs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bjvdtlgq\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"phxnesxh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"qgpgckvc\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"yechqtfa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pbouxywy\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"kxtuojyo\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"txaniwlj\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"fixgufrj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"okzvalwq\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"iitawgbn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"gayvmird\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"dprkfjph\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kmuccshq\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vkslsdsd\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"dukccdqk\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"leztxmqf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kltixbwz\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xqhkweef\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"idxsimvz\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vbruvcpk\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"uxandqkd\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"dsiosysh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kxuyanpp\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"wkrktags\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"yvizzpiv\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ddnefben\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"novmfmbc\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"fnusxsfu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ouerfjap\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"sigognkf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"slvzhede\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bknerect\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"tmhcdfjb\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hpnoanpp\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"okmhelnc\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xcernjin\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jdmiismg\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tmnqozrf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"zgwrftkx\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zyamlwwh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nuedqcro\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lpsvyqaa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"mneitsul\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vpleinwb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"njxuaedy\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"sdgxpqmu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"yjirrebp\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ikqndzfj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ghnfdxhr\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hrwczpvo\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nattumpb\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zoyfzazn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"rdwemofy\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"phkgsjeg\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pyhvvjrt\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zfslyton\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bxwjzeri\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"uovzzgjv\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"cfjmacvr\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"jefqgzqx\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"njrksxmr\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mhvabvgn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kfekjltr\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lexfaaby\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"zbblsmwq\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"oqcombkx\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"rcdmhzyw\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"otksuean\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"itbdvowq\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tswtmhex\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"xoukkzid\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"guangmpq\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"rayxzuky\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lspwucrv\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pdprzzkf\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"sddqrtza\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kabndgkx\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"aglnqqxs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"fiwpzmdr\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hxctooen\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pckjpwyh\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ivmvsbai\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"eljdclst\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"rwgdctie\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"zlnthxoz\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ljtxelle\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"llfggdpy\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tvnridze\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"hxjpgube\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zmldmquq\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bggqwcoj\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"drksfofm\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jcsixens\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"cdwnwhaf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nngpumuq\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hylgooci\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"cozeyjys\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lcgpfcsa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"qdtzhtyd\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"txpubynb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"gbslzbtu\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"buihcpcl\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ayqezaiq\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zgkgtilj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bcjopqif\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mfzxiaqt\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"xmnlqxoj\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"reyiklyf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"xssuomhk\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"liazkjll\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nigjlblo\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vmojyznk\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"dotkbvrz\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"kuwdyydw\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"slsfqydw\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zyironhd\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pktwfhzi\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xybavsvh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pyrxemvx\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tlfihwjs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"neumakmg\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"wxqingoa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", + }; + pRes = taos_query(taos, "use oom"); + taos_free_result(pRes); + + TAOS_RES* res = taos_schemaless_insert(taos, (char**)sql, 100, TSDB_SML_LINE_PROTOCOL, 0); + ASSERT_EQ(taos_errno(res), 0); + taos_free_result(pRes); +} diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 1517684ccda1202852944307df61c1c8519fe175..948b50c01a6a0d583fdea5b0d412e9e8440092cb 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -36,6 +36,7 @@ static const SSysDbTableSchema mnodesSchema[] = { {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, }; @@ -90,6 +91,8 @@ static const SSysDbTableSchema userDBSchema[] = { {.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "schemaless", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, + // {.name = "update", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, // disable update }; @@ -125,11 +128,15 @@ static const SSysDbTableSchema userStbsSchema[] = { static const SSysDbTableSchema streamSchema[] = { {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "user_name", .bytes = 23, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "dest_table", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "sql", .bytes = 1024, .type = TSDB_DATA_TYPE_VARCHAR}, -}; + {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, + {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, + {.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + }; static const SSysDbTableSchema userTblsSchema[] = { {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, @@ -211,7 +218,6 @@ static const SSysDbTableSchema transSchema[] = { {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "type", .bytes = TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "last_error", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 2d77905d866061e0a454af094a8928e5771d94e7..f77b823f3c7d8a9e3f62e98e0f967f9d66ad83d3 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -116,22 +116,23 @@ int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, con int32_t type = pColumnInfoData->info.type; if (IS_VAR_DATA_TYPE(type)) { - int32_t dataLen = varDataTLen(pData); + int32_t dataLen = 0; if (type == TSDB_DATA_TYPE_JSON) { if (*pData == TSDB_DATA_TYPE_NULL) { - dataLen = 0; + dataLen = CHAR_BYTES; } else if (*pData == TSDB_DATA_TYPE_NCHAR) { - dataLen = varDataTLen(pData + CHAR_BYTES); + dataLen = varDataTLen(pData + CHAR_BYTES) + CHAR_BYTES; } else if (*pData == TSDB_DATA_TYPE_DOUBLE) { - dataLen = DOUBLE_BYTES; + dataLen = DOUBLE_BYTES + CHAR_BYTES; } else if (*pData == TSDB_DATA_TYPE_BOOL) { - dataLen = CHAR_BYTES; - } else if (*pData == TSDB_DATA_TYPE_JSON) { - dataLen = kvRowLen(pData + CHAR_BYTES); + dataLen = CHAR_BYTES + CHAR_BYTES; + } else if (*pData == TD_TAG_JSON) { // json string + dataLen = ((STag*)(pData))->len; } else { ASSERT(0); } - dataLen += CHAR_BYTES; + }else { + dataLen = varDataTLen(pData); } SVarColAttr* pAttr = &pColumnInfoData->varmeta; @@ -354,14 +355,19 @@ int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex) return -1; } - int32_t index = (tsColumnIndex == -1) ? 0 : tsColumnIndex; + int32_t index = (tsColumnIndex == -1) ? 0 : tsColumnIndex; + SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, index); if (pColInfoData->info.type != TSDB_DATA_TYPE_TIMESTAMP) { return 0; } - pDataBlock->info.window.skey = *(TSKEY*)colDataGetData(pColInfoData, 0); - pDataBlock->info.window.ekey = *(TSKEY*)colDataGetData(pColInfoData, (pDataBlock->info.rows - 1)); + TSKEY skey = *(TSKEY*)colDataGetData(pColInfoData, 0); + TSKEY ekey = *(TSKEY*)colDataGetData(pColInfoData, (pDataBlock->info.rows - 1)); + + pDataBlock->info.window.skey = TMIN(skey, ekey); + pDataBlock->info.window.ekey = TMAX(skey, ekey); + return 0; } @@ -611,6 +617,7 @@ int32_t blockDataFromBuf1(SSDataBlock* pBlock, const char* buf, size_t capacity) for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i); + pCol->hasNull = true; if (IS_VAR_DATA_TYPE(pCol->info.type)) { size_t metaSize = capacity * sizeof(int32_t); @@ -1153,7 +1160,9 @@ void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows) { if (IS_VAR_DATA_TYPE(pColumn->info.type)) { pColumn->varmeta.length = 0; } else { - memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows)); + if (pColumn->nullbitmap != NULL) { + memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows)); + } } } @@ -1269,29 +1278,43 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) { memmove(nullBitmap, nullBitmap + n / 8, newLen); } else { int32_t tail = n % 8; - int32_t i = 0; - + int32_t i = 0; uint8_t* p = (uint8_t*)nullBitmap; - while (i < len) { - uint8_t v = p[i]; - p[i] = 0; - p[i] = (v << tail); + if (n < 8) { + while (i < len) { + uint8_t v = p[i]; // source bitmap value + p[i] = (v << tail); - if (i < len - 1) { - uint8_t next = p[i + 1]; - p[i] |= (next >> (8 - tail)); + if (i < len - 1) { + uint8_t next = p[i + 1]; + p[i] |= (next >> (8 - tail)); + } + + i += 1; } + } else if (n > 8) { + int32_t gap = len - newLen; + while(i < newLen) { + uint8_t v = p[i + gap]; + p[i] = (v << tail); + + if (i < newLen - 1) { + uint8_t next = p[i + gap + 1]; + p[i] |= (next >> (8 - tail)); + } - i += 1; + i += 1; + } } } } + static void colDataTrimFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) { if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { - memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n)); - memset(&pColInfoData->varmeta.offset[total - n - 1], 0, n); + memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n) * sizeof(int32_t)); + memset(&pColInfoData->varmeta.offset[total - n], 0, n); } else { int32_t bytes = pColInfoData->info.bytes; memmove(pColInfoData->pData, ((char*)pColInfoData->pData + n * bytes), (total - n) * bytes); @@ -1460,7 +1483,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { } void blockDebugShowData(const SArray* dataBlocks) { - char pBuf[128]; + char pBuf[128] = {0}; int32_t sz = taosArrayGetSize(dataBlocks); for (int32_t i = 0; i < sz; i++) { SSDataBlock* pDataBlock = taosArrayGet(dataBlocks, i); @@ -1508,14 +1531,11 @@ void blockDebugShowData(const SArray* dataBlocks) { * @param pReq * @param pDataBlocks * @param vgId - * @param uid set as parameter temporarily // TODO: remove this parameter, and the executor should set uid in - * SDataBlock->info.uid * @param suid // TODO: check with Liao whether suid response is reasonable * * TODO: colId should be set */ -int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, - tb_uid_t uid, tb_uid_t suid) { +int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, tb_uid_t suid) { int32_t sz = taosArrayGetSize(pDataBlocks); int32_t bufSize = sizeof(SSubmitReq); for (int32_t i = 0; i < sz; ++i) { @@ -1551,7 +1571,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks SSubmitBlk* pSubmitBlk = POINTER_SHIFT(pDataBuf, msgLen); pSubmitBlk->suid = suid; - pSubmitBlk->uid = uid; + pSubmitBlk->uid = pDataBlock->info.groupId; pSubmitBlk->numOfRows = rows; ++numOfBlks; @@ -1562,6 +1582,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen)); // set row buf printf("|"); bool isStartKey = false; + int32_t offset = 0; for (int32_t k = 0; k < colNum; ++k) { // iterate by column SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes); @@ -1570,18 +1591,18 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks if (!isStartKey) { isStartKey = true; tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true, - 0, 0); + offset, k); + } else { - tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true, 8, k); - break; + tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true, offset, k); } break; case TSDB_DATA_TYPE_NCHAR: { - tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_NCHAR, TD_VTYPE_NORM, var, true, 8, k); + tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_NCHAR, TD_VTYPE_NORM, var, true, offset, k); break; } case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY - tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_VARCHAR, TD_VTYPE_NORM, var, true, 8, k); + tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_VARCHAR, TD_VTYPE_NORM, var, true, offset, k); break; } case TSDB_DATA_TYPE_VARBINARY: @@ -1593,13 +1614,14 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks break; default: if (pColInfoData->info.type < TSDB_DATA_TYPE_MAX && pColInfoData->info.type > TSDB_DATA_TYPE_NULL) { - tdAppendColValToRow(&rb, 2, pColInfoData->info.type, TD_VTYPE_NORM, var, true, 8, k); + tdAppendColValToRow(&rb, 2, pColInfoData->info.type, TD_VTYPE_NORM, var, true, offset, k); } else { printf("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type); TASSERT(0); } break; } + offset += TYPE_BYTES[pColInfoData->info.type]; } dataLen += TD_ROW_LEN(rb.pBuf); } @@ -1632,6 +1654,11 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, bool createTb, int64_t suid, const char* stbFullName, int32_t vgId) { SSubmitReq* ret = NULL; + SArray* tagArray = taosArrayInit(1, sizeof(STagVal)); + if(!tagArray) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } // cal size int32_t cap = sizeof(SSubmitReq); @@ -1653,18 +1680,33 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo createTbReq.type = TSDB_CHILD_TABLE; createTbReq.ctb.suid = suid; - SKVRowBuilder kvRowBuilder = {0}; - if (tdInitKVRowBuilder(&kvRowBuilder) < 0) { - ASSERT(0); + + + STagVal tagVal = {.cid = 1, + .type = TSDB_DATA_TYPE_UBIGINT, + .pData = (uint8_t*)&pDataBlock->info.groupId, + .nData = sizeof(uint64_t)}; + STag* pTag = NULL; + taosArrayClear(tagArray); + taosArrayPush(tagArray, &tagVal); + tTagNew(tagArray, 1, false, &pTag); + if (!pTag) { + tdDestroySVCreateTbReq(&createTbReq); + taosArrayDestroy(tagArray); + return NULL; } - tdAddColToKVRow(&kvRowBuilder, 1, &pDataBlock->info.groupId, sizeof(uint64_t)); - createTbReq.ctb.pTag = tdGetKVRowFromBuilder(&kvRowBuilder); - tdDestroyKVRowBuilder(&kvRowBuilder); + createTbReq.ctb.pTag = (uint8_t*)pTag; int32_t code; tEncodeSize(tEncodeSVCreateTbReq, &createTbReq, schemaLen, code); - if (code < 0) return NULL; - taosMemoryFree(cname); + + tdDestroySVCreateTbReq(&createTbReq); + + if (code < 0) { + tdDestroySVCreateTbReq(&createTbReq); + taosArrayDestroy(tagArray); + return NULL; + } } cap += sizeof(SSubmitBlk) + schemaLen + rows * maxLen; @@ -1707,22 +1749,42 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo createTbReq.type = TSDB_CHILD_TABLE; createTbReq.ctb.suid = suid; - SKVRowBuilder kvRowBuilder = {0}; - if (tdInitKVRowBuilder(&kvRowBuilder) < 0) { - ASSERT(0); + STagVal tagVal = {.cid = 1, + .type = TSDB_DATA_TYPE_UBIGINT, + .pData = (uint8_t*)&pDataBlock->info.groupId, + .nData = sizeof(uint64_t)}; + taosArrayClear(tagArray); + taosArrayPush(tagArray, &tagVal); + STag* pTag = NULL; + tTagNew(tagArray, 1, false, &pTag); + if (!pTag) { + tdDestroySVCreateTbReq(&createTbReq); + taosArrayDestroy(tagArray); + taosMemoryFreeClear(ret); + return NULL; } - tdAddColToKVRow(&kvRowBuilder, 1, &pDataBlock->info.groupId, sizeof(uint64_t)); - createTbReq.ctb.pTag = tdGetKVRowFromBuilder(&kvRowBuilder); - tdDestroyKVRowBuilder(&kvRowBuilder); + createTbReq.ctb.pTag = (uint8_t*)pTag; int32_t code; tEncodeSize(tEncodeSVCreateTbReq, &createTbReq, schemaLen, code); - if (code < 0) return NULL; + if (code < 0) { + tdDestroySVCreateTbReq(&createTbReq); + taosArrayDestroy(tagArray); + taosMemoryFreeClear(ret); + return NULL; + } SEncoder encoder = {0}; tEncoderInit(&encoder, blockData, schemaLen); - if (tEncodeSVCreateTbReq(&encoder, &createTbReq) < 0) return NULL; + code = tEncodeSVCreateTbReq(&encoder, &createTbReq); tEncoderClear(&encoder); + tdDestroySVCreateTbReq(&createTbReq); + + if (code < 0) { + taosArrayDestroy(tagArray); + taosMemoryFreeClear(ret); + return NULL; + } } blkHead->schemaLen = htonl(schemaLen); @@ -1757,5 +1819,102 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo } ret->length = htonl(ret->length); + taosArrayDestroy(tagArray); return ret; } + +void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress) { + int32_t* actualLen = (int32_t*)data; + data += sizeof(int32_t); + + uint64_t* groupId = (uint64_t*)data; + data += sizeof(uint64_t); + + int32_t* colSizes = (int32_t*)data; + data += numOfCols * sizeof(int32_t); + + *dataLen = (numOfCols * sizeof(int32_t) + sizeof(uint64_t) + sizeof(int32_t)); + + int32_t numOfRows = pBlock->info.rows; + for (int32_t col = 0; col < numOfCols; ++col) { + SColumnInfoData* pColRes = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, col); + + // copy the null bitmap + if (IS_VAR_DATA_TYPE(pColRes->info.type)) { + size_t metaSize = numOfRows * sizeof(int32_t); + memcpy(data, pColRes->varmeta.offset, metaSize); + data += metaSize; + (*dataLen) += metaSize; + } else { + int32_t len = BitmapLen(numOfRows); + memcpy(data, pColRes->nullbitmap, len); + data += len; + (*dataLen) += len; + } + + if (needCompress) { + colSizes[col] = blockCompressColData(pColRes, numOfRows, data, needCompress); + data += colSizes[col]; + (*dataLen) += colSizes[col]; + } else { + colSizes[col] = colDataGetLength(pColRes, numOfRows); + (*dataLen) += colSizes[col]; + memmove(data, pColRes->pData, colSizes[col]); + data += colSizes[col]; + } + + colSizes[col] = htonl(colSizes[col]); + } + + *actualLen = *dataLen; + *groupId = pBlock->info.groupId; +} + +const char* blockCompressDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData) { + blockDataEnsureCapacity(pBlock, numOfRows); + const char* pStart = pData; + + int32_t dataLen = *(int32_t*)pStart; + pStart += sizeof(int32_t); + + pBlock->info.groupId = *(uint64_t*)pStart; + pStart += sizeof(uint64_t); + + int32_t* colLen = (int32_t*)pStart; + pStart += sizeof(int32_t) * numOfCols; + + for (int32_t i = 0; i < numOfCols; ++i) { + colLen[i] = htonl(colLen[i]); + ASSERT(colLen[i] >= 0); + + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); + if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { + pColInfoData->varmeta.length = colLen[i]; + pColInfoData->varmeta.allocLen = colLen[i]; + + memcpy(pColInfoData->varmeta.offset, pStart, sizeof(int32_t) * numOfRows); + pStart += sizeof(int32_t) * numOfRows; + + if (colLen[i] > 0) { + taosMemoryFreeClear(pColInfoData->pData); + pColInfoData->pData = taosMemoryMalloc(colLen[i]); + } + } else { + memcpy(pColInfoData->nullbitmap, pStart, BitmapLen(numOfRows)); + pStart += BitmapLen(numOfRows); + } + + if (colLen[i] > 0) { + memcpy(pColInfoData->pData, pStart, colLen[i]); + } + + // TODO + // setting this flag to true temporarily so aggregate function on stable will + // examine NULL value for non-primary key column + pColInfoData->hasNull = true; + pStart += colLen[i]; + } + + ASSERT(pStart - pData == dataLen); + return pStart; +} \ No newline at end of file diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index e8d7e3ac0933532a4ad4f55509df575d2eaa177b..287dba6d3badaa1c4a1f077d8cea805981da09e7 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -19,31 +19,15 @@ #include "tdatablock.h" #include "tlog.h" -typedef struct SKVIdx { - int32_t cid; - int32_t offset; -} SKVIdx; +static int32_t tGetTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson); #pragma pack(push, 1) typedef struct { int16_t nCols; - SKVIdx idx[]; + uint8_t idx[]; } STSKVRow; #pragma pack(pop) -typedef struct STagIdx { - int16_t cid; - uint16_t offset; -} STagIdx; - -#pragma pack(push, 1) -struct STag { - uint16_t len; - uint16_t nTag; - STagIdx idx[]; -}; -#pragma pack(pop) - #define TSROW_IS_KV_ROW(r) ((r)->flags & TSROW_KV_ROW) #define BIT1_SIZE(n) (((n)-1) / 8 + 1) #define BIT2_SIZE(n) (((n)-1) / 4 + 1) @@ -54,171 +38,663 @@ struct STag { static FORCE_INLINE int tSKVIdxCmprFn(const void *p1, const void *p2); -// STSRow2 -int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow) { +// SValue +static FORCE_INLINE int32_t tPutValue(uint8_t *p, SValue *pValue, int8_t type) { int32_t n = 0; - n += tPutI64(p ? p + n : p, pRow->ts); - n += tPutI8(p ? p + n : p, pRow->flags); - n += tPutI32v(p ? p + n : p, pRow->sver); + if (IS_VAR_DATA_TYPE(type)) { + n += tPutBinary(p ? p + n : p, pValue->pData, pValue->nData); + } else { + switch (type) { + case TSDB_DATA_TYPE_BOOL: + n += tPutI8(p ? p + n : p, pValue->i8 ? 1 : 0); + break; + case TSDB_DATA_TYPE_TINYINT: + n += tPutI8(p ? p + n : p, pValue->i8); + break; + case TSDB_DATA_TYPE_SMALLINT: + n += tPutI16(p ? p + n : p, pValue->i16); + break; + case TSDB_DATA_TYPE_INT: + n += tPutI32(p ? p + n : p, pValue->i32); + break; + case TSDB_DATA_TYPE_BIGINT: + n += tPutI64(p ? p + n : p, pValue->i64); + break; + case TSDB_DATA_TYPE_FLOAT: + n += tPutFloat(p ? p + n : p, pValue->f); + break; + case TSDB_DATA_TYPE_DOUBLE: + n += tPutDouble(p ? p + n : p, pValue->d); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + n += tPutI64(p ? p + n : p, pValue->ts); + break; + case TSDB_DATA_TYPE_UTINYINT: + n += tPutU8(p ? p + n : p, pValue->u8); + break; + case TSDB_DATA_TYPE_USMALLINT: + n += tPutU16(p ? p + n : p, pValue->u16); + break; + case TSDB_DATA_TYPE_UINT: + n += tPutU32(p ? p + n : p, pValue->u32); + break; + case TSDB_DATA_TYPE_UBIGINT: + n += tPutU64(p ? p + n : p, pValue->u64); + break; + default: + ASSERT(0); + } + } - ASSERT(pRow->flags & 0xf); + return n; +} - switch (pRow->flags & 0xf) { - case TSROW_HAS_NONE: - case TSROW_HAS_NULL: - break; - default: - n += tPutBinary(p ? p + n : p, pRow->pData, pRow->nData); - break; +static FORCE_INLINE int32_t tGetValue(uint8_t *p, SValue *pValue, int8_t type) { + int32_t n = 0; + + if (IS_VAR_DATA_TYPE(type)) { + n += tGetBinary(p, &pValue->pData, pValue ? &pValue->nData : NULL); + } else { + switch (type) { + case TSDB_DATA_TYPE_BOOL: + n += tGetI8(p, &pValue->i8); + break; + case TSDB_DATA_TYPE_TINYINT: + n += tGetI8(p, &pValue->i8); + break; + case TSDB_DATA_TYPE_SMALLINT: + n += tGetI16(p, &pValue->i16); + break; + case TSDB_DATA_TYPE_INT: + n += tGetI32(p, &pValue->i32); + break; + case TSDB_DATA_TYPE_BIGINT: + n += tGetI64(p, &pValue->i64); + break; + case TSDB_DATA_TYPE_FLOAT: + n += tGetFloat(p, &pValue->f); + break; + case TSDB_DATA_TYPE_DOUBLE: + n += tGetDouble(p, &pValue->d); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + n += tGetI64(p, &pValue->ts); + break; + case TSDB_DATA_TYPE_UTINYINT: + n += tGetU8(p, &pValue->u8); + break; + case TSDB_DATA_TYPE_USMALLINT: + n += tGetU16(p, &pValue->u16); + break; + case TSDB_DATA_TYPE_UINT: + n += tGetU32(p, &pValue->u32); + break; + case TSDB_DATA_TYPE_UBIGINT: + n += tGetU64(p, &pValue->u64); + break; + default: + ASSERT(0); + } } return n; } -int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow) { - int32_t n = 0; - uint8_t flags; +// STSRow2 ======================================================================== +static void setBitMap(uint8_t *pb, uint8_t v, int32_t idx, uint8_t flags) { + if (pb) { + switch (flags & 0xf) { + case TSROW_HAS_NULL | TSROW_HAS_NONE: + case TSROW_HAS_VAL | TSROW_HAS_NONE: + if (v) { + SET_BIT1(pb, idx, (uint8_t)1); + } else { + SET_BIT1(pb, idx, (uint8_t)0); + } + break; + case TSROW_HAS_VAL | TSROW_HAS_NULL: + v = v - 1; + SET_BIT1(pb, idx, v); + break; + case TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE: + SET_BIT2(pb, idx, v); + break; - n += tGetI64(p + n, pRow ? &pRow->ts : NULL); - n += tGetI8(p + n, pRow ? &pRow->flags : &flags); - n += tGetI32v(p + n, pRow ? &pRow->sver : NULL); + default: + ASSERT(0); + } + } +} +#define SET_IDX(p, i, n, f) \ + do { \ + if ((f)&TSROW_KV_SMALL) { \ + ((uint8_t *)(p))[i] = (n); \ + } else if ((f)&TSROW_KV_MID) { \ + ((uint16_t *)(p))[i] = (n); \ + } else { \ + ((uint32_t *)(p))[i] = (n); \ + } \ + } while (0) + +int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, STSRow2 **ppRow) { + int32_t code = 0; + STColumn *pTColumn; + SColVal *pColVal; + int32_t nColVal = taosArrayGetSize(pArray); + int32_t iColVal; + + ASSERT(nColVal > 0); + + // try + uint8_t flags = 0; + uint32_t ntv = 0; + uint32_t nkv = 0; + int16_t nTag = 0; + uint32_t maxIdx = 0; + + iColVal = 0; + for (int32_t iColumn = 0; iColumn < pTSchema->numOfCols; iColumn++) { + pTColumn = &pTSchema->columns[iColumn]; + if (iColVal < nColVal) { + pColVal = (SColVal *)taosArrayGet(pArray, iColVal); + } else { + pColVal = NULL; + } - if (pRow) flags = pRow->flags; - switch (flags & 0xf) { - case TSROW_HAS_NONE: - case TSROW_HAS_NULL: - break; - default: - n += tGetBinary(p + n, pRow ? &pRow->pData : NULL, pRow ? &pRow->nData : NULL); - break; + if (iColumn == 0) { + ASSERT(pColVal->cid == pTColumn->colId); + ASSERT(pTColumn->type == TSDB_DATA_TYPE_TIMESTAMP); + ASSERT(pTColumn->colId == 0); + + iColVal++; + } else { + if (pColVal) { + if (pColVal->cid == pTColumn->colId) { + iColVal++; + + if (pColVal->isNone) { + flags |= TSROW_HAS_NONE; + } else if (pColVal->isNull) { + flags |= TSROW_HAS_NULL; + maxIdx = nkv; + nTag++; + nkv += tPutI16v(NULL, -pTColumn->colId); + } else { + flags |= TSROW_HAS_VAL; + maxIdx = nkv; + nTag++; + nkv += tPutI16v(NULL, pTColumn->colId); + nkv += tPutValue(NULL, &pColVal->value, pTColumn->type); + if (IS_VAR_DATA_TYPE(pTColumn->type)) { + ntv += tPutValue(NULL, &pColVal->value, pTColumn->type); + } + } + } else if (pColVal->cid > pTColumn->colId) { + flags |= TSROW_HAS_NONE; + } else { + ASSERT(0); + } + } else { + flags |= TSROW_HAS_NONE; + } + } } - return n; + ASSERT(flags); + + // decide + uint32_t nData = 0; + uint32_t nDataT = 0; + uint32_t nDataK = 0; + if (flags == TSROW_HAS_NONE || flags == TSROW_HAS_NULL) { + nData = 0; + } else { + switch (flags) { + case TSROW_HAS_VAL: + nDataT = pTSchema->flen + ntv; + break; + case TSROW_HAS_NULL | TSROW_HAS_NONE: + nDataT = BIT1_SIZE(pTSchema->numOfCols - 1); + break; + case TSROW_HAS_VAL | TSROW_HAS_NONE: + case TSROW_HAS_VAL | TSROW_HAS_NULL: + nDataT = BIT1_SIZE(pTSchema->numOfCols - 1) + pTSchema->flen + ntv; + break; + case TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE: + nDataT = BIT2_SIZE(pTSchema->numOfCols - 1) + pTSchema->flen + ntv; + break; + default: + ASSERT(0); + } + + uint8_t tflags = 0; + if (maxIdx <= UINT8_MAX) { + nDataK = sizeof(STSKVRow) + sizeof(uint8_t) * nTag + nkv; + tflags |= TSROW_KV_SMALL; + } else if (maxIdx <= UINT16_MAX) { + nDataK = sizeof(STSKVRow) + sizeof(uint16_t) * nTag + nkv; + tflags |= TSROW_KV_MID; + } else { + nDataK = sizeof(STSKVRow) + sizeof(uint32_t) * nTag + nkv; + tflags |= TSROW_KV_BIG; + } + + if (nDataT < nDataK) { + nData = nDataT; + } else { + nData = nDataK; + flags |= tflags; + } + } + + // alloc + if (pBuilder) { + // create from a builder + if (nData == 0) { + pBuilder->tsRow.nData = 0; + pBuilder->tsRow.pData = NULL; + } else { + if (pBuilder->szBuf < nData) { + uint8_t *p = taosMemoryRealloc(pBuilder->pBuf, nData); + if (p == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + pBuilder->pBuf = p; + pBuilder->szBuf = nData; + } + + pBuilder->tsRow.nData = nData; + pBuilder->tsRow.pData = pBuilder->pBuf; + } + + *ppRow = &pBuilder->tsRow; + } else { + // create a new one + *ppRow = (STSRow2 *)taosMemoryMalloc(sizeof(STSRow2)); + if (*ppRow == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + if (nData == 0) { + (*ppRow)->nData = 0; + (*ppRow)->pData = NULL; + } else { + (*ppRow)->nData = nData; + (*ppRow)->pData = taosMemoryMalloc(nData); + if ((*ppRow)->pData == NULL) { + taosMemoryFree(*ppRow); + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + } + } + + // build + (*ppRow)->flags = flags; + (*ppRow)->sver = pTSchema->version; + + pColVal = (SColVal *)taosArrayGet(pArray, 0); + (*ppRow)->ts = pColVal->value.ts; + + if ((*ppRow)->pData) { + STSKVRow *pTSKVRow = NULL; + uint8_t *pidx = NULL; + uint8_t *pkv = NULL; + uint8_t *pb = NULL; + uint8_t *pf = NULL; + uint8_t *ptv = NULL; + nkv = 0; + ntv = 0; + iColVal = 1; + + if (flags & 0xf0 == 0) { + switch (flags & 0xf) { + case TSROW_HAS_VAL: + pf = (*ppRow)->pData; + ptv = pf + pTSchema->flen; + break; + case TSROW_HAS_NULL | TSROW_HAS_NONE: + pb = (*ppRow)->pData; + break; + case TSROW_HAS_VAL | TSROW_HAS_NONE: + case TSROW_HAS_VAL | TSROW_HAS_NULL: + pb = (*ppRow)->pData; + pf = pb + BIT1_SIZE(pTSchema->numOfCols - 1); + ptv = pf + pTSchema->flen; + break; + case TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE: + pb = (*ppRow)->pData; + pf = pb + BIT2_SIZE(pTSchema->numOfCols - 1); + ptv = pf + pTSchema->flen; + break; + default: + ASSERT(0); + } + } else { + pTSKVRow = (STSKVRow *)(*ppRow)->pData; + pTSKVRow->nCols = 0; + pidx = pTSKVRow->idx; + if (flags & TSROW_KV_SMALL) { + pkv = pidx + sizeof(uint8_t) * nTag; + } else if (flags & TSROW_KV_MID) { + pkv = pidx + sizeof(uint16_t) * nTag; + } else { + pkv = pidx + sizeof(uint32_t) * nTag; + } + } + + for (int32_t iColumn = 1; iColumn < pTSchema->numOfCols; iColumn++) { + pTColumn = &pTSchema->columns[iColumn]; + if (iColVal < nColVal) { + pColVal = (SColVal *)taosArrayGet(pArray, iColVal); + } else { + pColVal = NULL; + } + + if (pColVal) { + if (pColVal->cid == pTColumn->colId) { + iColVal++; + + if (pColVal->isNone) { + goto _set_none; + } else if (pColVal->isNull) { + goto _set_null; + } else { + goto _set_value; + } + } else if (pColVal->cid > pTColumn->colId) { + goto _set_none; + } else { + ASSERT(0); + } + } else { + goto _set_none; + } + + _set_none: + if (flags & 0xf0 == 0) { + setBitMap(pb, 0, iColumn - 1, flags); + } + continue; + + _set_null: + if (flags & 0xf0 == 0) { + setBitMap(pb, 1, iColumn - 1, flags); + } else { + SET_IDX(pidx, pTSKVRow->nCols, nkv, flags); + pTSKVRow->nCols++; + nkv += tPutI16v(pkv + nkv, -pTColumn->colId); + } + continue; + + _set_value: + if (flags & 0xf0 == 0) { + setBitMap(pb, 2, iColumn - 1, flags); + + if (IS_VAR_DATA_TYPE(pTColumn->type)) { + *(VarDataOffsetT *)(pf + pTColumn->offset) = ntv; + ntv += tPutValue(ptv + ntv, &pColVal->value, pTColumn->type); + } else { + tPutValue(pf + pTColumn->offset, &pColVal->value, pTColumn->type); + } + } else { + SET_IDX(pidx, pTSKVRow->nCols, nkv, flags); + pTSKVRow->nCols++; + nkv += tPutI16v(pkv + nkv, pColVal->cid); + nkv += tPutValue(pkv + nkv, &pColVal->value, pTColumn->type); + } + continue; + } + } + +_exit: + return code; } -int32_t tTSRowDup(const STSRow2 *pRow, STSRow2 **ppRow) { - (*ppRow) = taosMemoryMalloc(sizeof(*pRow) + pRow->nData); +int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow) { + int32_t code = 0; + + (*ppRow) = (STSRow2 *)taosMemoryMalloc(sizeof(**ppRow)); if (*ppRow == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; } + **ppRow = *pRow; + (*ppRow)->pData = NULL; - (*ppRow)->ts = pRow->ts; - (*ppRow)->flags = pRow->flags; - (*ppRow)->sver = pRow->sver; - (*ppRow)->nData = pRow->nData; if (pRow->nData) { - (*ppRow)->pData = (uint8_t *)(&(*ppRow)[1]); + (*ppRow)->pData = taosMemoryMalloc(pRow->nData); + if ((*ppRow)->pData == NULL) { + taosMemoryFree(*ppRow); + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } memcpy((*ppRow)->pData, pRow->pData, pRow->nData); - } else { - (*ppRow)->pData = NULL; } - return 0; +_exit: + return code; } void tTSRowFree(STSRow2 *pRow) { - if (pRow) taosMemoryFree(pRow); + if (pRow) { + if (pRow->pData) taosMemoryFree(pRow->pData); + taosMemoryFree(pRow); + } } -int32_t tTSRowGet(const STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { - uint32_t n; - uint8_t *p; - uint8_t v; - int32_t bidx = iCol - 1; +void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { + uint8_t isTuple = (pRow->flags & 0xf0 == 0) ? 1 : 0; STColumn *pTColumn = &pTSchema->columns[iCol]; - STSKVRow *pTSKVRow; - SKVIdx *pKVIdx; + uint8_t flags = pRow->flags & (uint8_t)0xf; + SValue value; - ASSERT(iCol != 0); - ASSERT(pTColumn->colId != 0); + ASSERT(iCol < pTSchema->numOfCols); + ASSERT(flags); + ASSERT(pRow->sver == pTSchema->version); - ASSERT(pRow->flags & 0xf != 0); - switch (pRow->flags & 0xf) { - case TSROW_HAS_NONE: - *pColVal = ColValNONE; - return 0; - case TSROW_HAS_NULL: - *pColVal = ColValNULL; - return 0; + if (iCol == 0) { + value.ts = pRow->ts; + goto _return_value; } - if (TSROW_IS_KV_ROW(pRow)) { - ASSERT((pRow->flags & 0xf) != TSROW_HAS_VAL); + if (flags == TSROW_HAS_NONE) { + goto _return_none; + } else if (flags == TSROW_HAS_NONE) { + goto _return_null; + } - pTSKVRow = (STSKVRow *)pRow->pData; - pKVIdx = - bsearch(&((SKVIdx){.cid = pTColumn->colId}), pTSKVRow->idx, pTSKVRow->nCols, sizeof(SKVIdx), tSKVIdxCmprFn); - if (pKVIdx == NULL) { - *pColVal = ColValNONE; - } else if (pKVIdx->offset < 0) { - *pColVal = ColValNULL; - } else { - p = pRow->pData + sizeof(STSKVRow) + sizeof(SKVIdx) * pTSKVRow->nCols + pKVIdx->offset; - pColVal->type = COL_VAL_DATA; - tGetBinary(p, &pColVal->pData, &pColVal->nData); - } - } else { - // get bitmap - p = pRow->pData; - switch (pRow->flags & 0xf) { + ASSERT(pRow->nData && pRow->pData); + + if (isTuple) { + uint8_t *pb = pRow->pData; + uint8_t *pf = NULL; + uint8_t *pv = NULL; + uint8_t *p; + uint8_t b; + + // bit + switch (flags) { + case TSROW_HAS_VAL: + pf = pb; + break; case TSROW_HAS_NULL | TSROW_HAS_NONE: - v = GET_BIT1(p, bidx); - if (v == 0) { - *pColVal = ColValNONE; + b = GET_BIT1(pb, iCol - 1); + if (b == 0) { + goto _return_none; } else { - *pColVal = ColValNULL; + goto _return_null; } - return 0; case TSROW_HAS_VAL | TSROW_HAS_NONE: - v = GET_BIT1(p, bidx); - if (v == 1) { - p = p + BIT1_SIZE(pTSchema->numOfCols - 1); - break; + b = GET_BIT1(pb, iCol - 1); + if (b == 0) { + goto _return_none; } else { - *pColVal = ColValNONE; - return 0; + pf = pb + BIT1_SIZE(pTSchema->numOfCols - 1); + break; } case TSROW_HAS_VAL | TSROW_HAS_NULL: - v = GET_BIT1(p, bidx); - if (v == 1) { - p = p + BIT1_SIZE(pTSchema->numOfCols - 1); - break; + b = GET_BIT1(pb, iCol - 1); + if (b == 0) { + goto _return_null; } else { - *pColVal = ColValNULL; - return 0; + pf = pb + BIT1_SIZE(pTSchema->numOfCols - 1); + break; } case TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE: - v = GET_BIT2(p, bidx); - if (v == 0) { - *pColVal = ColValNONE; - return 0; - } else if (v == 1) { - *pColVal = ColValNULL; - return 0; - } else if (v == 2) { - p = p + BIT2_SIZE(pTSchema->numOfCols - 1); - break; + b = GET_BIT2(pb, iCol - 1); + if (b == 0) { + goto _return_none; + } else if (b == 1) { + goto _return_null; } else { - ASSERT(0); + pf = pb + BIT2_SIZE(pTSchema->numOfCols - 1); + break; } default: - break; + ASSERT(0); } - // get real value - p = p + pTColumn->offset; - pColVal->type = COL_VAL_DATA; + ASSERT(pf); + + p = pf + pTColumn->offset; if (IS_VAR_DATA_TYPE(pTColumn->type)) { - tGetBinary(p + pTSchema->flen + *(int32_t *)p, &pColVal->pData, &pColVal->nData); + pv = pf + pTSchema->flen; + p = pv + *(VarDataOffsetT *)p; + } + tGetValue(p, &value, pTColumn->type); + goto _return_value; + } else { + STSKVRow *pRowK = (STSKVRow *)pRow->pData; + int16_t lidx = 0; + int16_t ridx = pRowK->nCols - 1; + uint8_t *p; + int16_t midx; + uint32_t n; + int16_t cid; + + ASSERT(pRowK->nCols > 0); + + if (pRow->flags & TSROW_KV_SMALL) { + p = pRow->pData + sizeof(STSKVRow) + sizeof(uint8_t) * pRowK->nCols; + } else if (pRow->flags & TSROW_KV_MID) { + p = pRow->pData + sizeof(STSKVRow) + sizeof(uint16_t) * pRowK->nCols; + } else if (pRow->flags & TSROW_KV_BIG) { + p = pRow->pData + sizeof(STSKVRow) + sizeof(uint32_t) * pRowK->nCols; } else { - pColVal->pData = p; - pColVal->nData = pTColumn->bytes; + ASSERT(0); + } + while (lidx <= ridx) { + midx = (lidx + ridx) / 2; + + if (pRow->flags & TSROW_KV_SMALL) { + n = ((uint8_t *)pRowK->idx)[midx]; + } else if (pRow->flags & TSROW_KV_MID) { + n = ((uint16_t *)pRowK->idx)[midx]; + } else { + n = ((uint32_t *)pRowK->idx)[midx]; + } + + n += tGetI16v(p + n, &cid); + + if (TABS(cid) == pTColumn->colId) { + if (cid < 0) { + goto _return_null; + } else { + n += tGetValue(p + n, &value, pTColumn->type); + goto _return_value; + } + + return; + } else if (TABS(cid) > pTColumn->colId) { + ridx = midx - 1; + } else { + lidx = midx + 1; + } } + + // not found, return NONE + goto _return_none; } - return 0; +_return_none: + *pColVal = COL_VAL_NONE(pTColumn->colId); + return; + +_return_null: + *pColVal = COL_VAL_NULL(pTColumn->colId); + return; + +_return_value: + *pColVal = COL_VAL_VALUE(pTColumn->colId, value); + return; +} + +int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray) { + int32_t code = 0; + SColVal cv; + + (*ppArray) = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal)); + if (*ppArray == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + + for (int32_t iColumn = 0; iColumn < pTSchema->numOfCols; iColumn++) { + tTSRowGet(pRow, pTSchema, iColumn, &cv); + taosArrayPush(*ppArray, &cv); + } + +_exit: + return code; +} + +int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow) { + int32_t n = 0; + + n += tPutI64(p ? p + n : p, pRow->ts); + n += tPutI8(p ? p + n : p, pRow->flags); + n += tPutI32v(p ? p + n : p, pRow->sver); + + ASSERT(pRow->flags & 0xf); + + switch (pRow->flags & 0xf) { + case TSROW_HAS_NONE: + case TSROW_HAS_NULL: + ASSERT(pRow->nData == 0); + ASSERT(pRow->pData == NULL); + break; + default: + ASSERT(pRow->nData && pRow->pData); + n += tPutBinary(p ? p + n : p, pRow->pData, pRow->nData); + break; + } + + return n; +} + +int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow) { + int32_t n = 0; + + n += tGetI64(p + n, &pRow->ts); + n += tGetI8(p + n, &pRow->flags); + n += tGetI32v(p + n, &pRow->sver); + + ASSERT(pRow->flags); + switch (pRow->flags & 0xf) { + case TSROW_HAS_NONE: + case TSROW_HAS_NULL: + pRow->nData = 0; + pRow->pData = NULL; + break; + default: + n += tGetBinary(p + n, &pRow->pData, &pRow->nData); + break; + } + + return n; } // STSchema @@ -262,391 +738,354 @@ void tTSchemaDestroy(STSchema *pTSchema) { } // STSRowBuilder -int32_t tTSRowBuilderInit(STSRowBuilder *pBuilder, int32_t sver, int32_t nCols, SSchema *pSchema) { - if (tTSchemaCreate(sver, pSchema, nCols, &pBuilder->pTSchema) < 0) return -1; - - pBuilder->szBitMap1 = BIT1_SIZE(nCols - 1); - pBuilder->szBitMap2 = BIT2_SIZE(nCols - 1); - pBuilder->szKVBuf = - sizeof(STSKVRow) + sizeof(SKVIdx) * (nCols - 1) + pBuilder->pTSchema->flen + pBuilder->pTSchema->vlen; - pBuilder->szTPBuf = pBuilder->szBitMap2 + pBuilder->pTSchema->flen + pBuilder->pTSchema->vlen; - pBuilder->pKVBuf = taosMemoryMalloc(pBuilder->szKVBuf); - if (pBuilder->pKVBuf == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - tTSchemaDestroy(pBuilder->pTSchema); - return -1; - } - pBuilder->pTPBuf = taosMemoryMalloc(pBuilder->szTPBuf); - if (pBuilder->pTPBuf == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - taosMemoryFree(pBuilder->pKVBuf); - tTSchemaDestroy(pBuilder->pTSchema); + +// STag +static int tTagValCmprFn(const void *p1, const void *p2) { + if (((STagVal *)p1)->cid < ((STagVal *)p2)->cid) { return -1; + } else if (((STagVal *)p1)->cid > ((STagVal *)p2)->cid) { + return 1; } return 0; } - -void tTSRowBuilderClear(STSRowBuilder *pBuilder) { - if (pBuilder->pTPBuf) { - taosMemoryFree(pBuilder->pTPBuf); - pBuilder->pTPBuf = NULL; - } - if (pBuilder->pKVBuf) { - taosMemoryFree(pBuilder->pKVBuf); - pBuilder->pKVBuf = NULL; - } - tTSchemaDestroy(pBuilder->pTSchema); - pBuilder->pTSchema = NULL; +static int tTagValJsonCmprFn(const void *p1, const void *p2) { + return strcmp(((STagVal *)p1)[0].pKey, ((STagVal *)p2)[0].pKey); } -void tTSRowBuilderReset(STSRowBuilder *pBuilder) { - for (int32_t iCol = pBuilder->pTSchema->numOfCols - 1; iCol >= 0; iCol--) { - STColumn *pTColumn = &pBuilder->pTSchema->columns[iCol]; - COL_CLR_SET(pTColumn->flags); +static void debugPrintTagVal(int8_t type, const void *val, int32_t vlen, const char *tag, int32_t ln) { + switch (type) { + case TSDB_DATA_TYPE_JSON: + case TSDB_DATA_TYPE_VARCHAR: + case TSDB_DATA_TYPE_NCHAR: { + char tmpVal[32] = {0}; + strncpy(tmpVal, val, vlen > 31 ? 31 : vlen); + printf("%s:%d type:%d vlen:%d, val:\"%s\"\n", tag, ln, (int32_t)type, vlen, tmpVal); + } break; + case TSDB_DATA_TYPE_FLOAT: + printf("%s:%d type:%d vlen:%d, val:%f\n", tag, ln, (int32_t)type, vlen, *(float *)val); + break; + case TSDB_DATA_TYPE_DOUBLE: + printf("%s:%d type:%d vlen:%d, val:%lf\n", tag, ln, (int32_t)type, vlen, *(double *)val); + break; + case TSDB_DATA_TYPE_BOOL: + printf("%s:%d type:%d vlen:%d, val:%" PRIu8 "\n", tag, ln, (int32_t)type, vlen, *(uint8_t *)val); + break; + case TSDB_DATA_TYPE_TINYINT: + printf("%s:%d type:%d vlen:%d, val:%" PRIi8 "\n", tag, ln, (int32_t)type, vlen, *(int8_t *)val); + break; + case TSDB_DATA_TYPE_SMALLINT: + printf("%s:%d type:%d vlen:%d, val:%" PRIi16 "\n", tag, ln, (int32_t)type, vlen, *(int16_t *)val); + break; + case TSDB_DATA_TYPE_INT: + printf("%s:%d type:%d vlen:%d, val:%" PRIi32 "\n", tag, ln, (int32_t)type, vlen, *(int32_t *)val); + break; + case TSDB_DATA_TYPE_BIGINT: + printf("%s:%d type:%d vlen:%d, val:%" PRIi64 "\n", tag, ln, (int32_t)type, vlen, *(int64_t *)val); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + printf("%s:%d type:%d vlen:%d, val:%" PRIi64 "\n", tag, ln, (int32_t)type, vlen, *(int64_t *)val); + break; + case TSDB_DATA_TYPE_UTINYINT: + printf("%s:%d type:%d vlen:%d, val:%" PRIu8 "\n", tag, ln, (int32_t)type, vlen, *(uint8_t *)val); + break; + case TSDB_DATA_TYPE_USMALLINT: + printf("%s:%d type:%d vlen:%d, val:%" PRIu16 "\n", tag, ln, (int32_t)type, vlen, *(uint16_t *)val); + break; + case TSDB_DATA_TYPE_UINT: + printf("%s:%d type:%d vlen:%d, val:%" PRIu32 "\n", tag, ln, (int32_t)type, vlen, *(uint32_t *)val); + break; + case TSDB_DATA_TYPE_UBIGINT: + printf("%s:%d type:%d vlen:%d, val:%" PRIu64 "\n", tag, ln, (int32_t)type, vlen, *(uint64_t *)val); + break; + case TSDB_DATA_TYPE_NULL: + printf("%s:%d type:%d vlen:%d, val:%" PRIi8 "\n", tag, ln, (int32_t)type, vlen, *(int8_t *)val); + break; + default: + ASSERT(0); + break; } - - pBuilder->iCol = 0; - ((STSKVRow *)pBuilder->pKVBuf)->nCols = 0; - pBuilder->vlenKV = 0; - pBuilder->vlenTP = 0; - pBuilder->row.flags = 0; } -int32_t tTSRowBuilderPut(STSRowBuilder *pBuilder, int32_t cid, uint8_t *pData, uint32_t nData) { - STColumn *pTColumn = &pBuilder->pTSchema->columns[pBuilder->iCol]; - uint8_t *p; - int32_t iCol; - STSKVRow *pTSKVRow = (STSKVRow *)pBuilder->pKVBuf; - - // use interp search - if (pTColumn->colId < cid) { // right search - for (iCol = pBuilder->iCol + 1; iCol < pBuilder->pTSchema->numOfCols; iCol++) { - pTColumn = &pBuilder->pTSchema->columns[iCol]; - if (pTColumn->colId >= cid) break; - } - } else if (pTColumn->colId > cid) { // left search - for (iCol = pBuilder->iCol - 1; iCol >= 0; iCol--) { - pTColumn = &pBuilder->pTSchema->columns[iCol]; - if (pTColumn->colId <= cid) break; - } - } - - if (pTColumn->colId != cid || COL_IS_SET(pTColumn->flags)) { - return -1; - } +void debugPrintSTag(STag *pTag, const char *tag, int32_t ln) { + int8_t isJson = pTag->flags & TD_TAG_JSON; + int8_t isLarge = pTag->flags & TD_TAG_LARGE; + uint8_t *p = NULL; + int16_t offset = 0; - pBuilder->iCol = iCol; - - // set value - if (cid == 0) { - ASSERT(pData && nData == sizeof(TSKEY) && iCol == 0); - pBuilder->row.ts = *(TSKEY *)pData; - pTColumn->flags |= COL_SET_VAL; + if (isLarge) { + p = (uint8_t *)&((int16_t *)pTag->idx)[pTag->nTag]; } else { - if (pData) { - // set VAL - - pBuilder->row.flags |= TSROW_HAS_VAL; - pTColumn->flags |= COL_SET_VAL; - - /* KV */ - if (1) { // avoid KV at some threshold (todo) - pTSKVRow->idx[pTSKVRow->nCols].cid = cid; - pTSKVRow->idx[pTSKVRow->nCols].offset = pBuilder->vlenKV; - - p = pBuilder->pKVBuf + sizeof(STSKVRow) + sizeof(SKVIdx) * (pBuilder->pTSchema->numOfCols - 1) + - pBuilder->vlenKV; - if (IS_VAR_DATA_TYPE(pTColumn->type)) { - ASSERT(nData <= pTColumn->bytes); - pBuilder->vlenKV += tPutBinary(p, pData, nData); - } else { - ASSERT(nData == pTColumn->bytes); - memcpy(p, pData, nData); - pBuilder->vlenKV += nData; - } - } - - /* TUPLE */ - p = pBuilder->pTPBuf + pBuilder->szBitMap2 + pTColumn->offset; - if (IS_VAR_DATA_TYPE(pTColumn->type)) { - ASSERT(nData <= pTColumn->bytes); - *(int32_t *)p = pBuilder->vlenTP; - - p = pBuilder->pTPBuf + pBuilder->szBitMap2 + pBuilder->pTSchema->flen + pBuilder->vlenTP; - pBuilder->vlenTP += tPutBinary(p, pData, nData); - } else { - ASSERT(nData == pTColumn->bytes); - memcpy(p, pData, nData); - } + p = (uint8_t *)&pTag->idx[pTag->nTag]; + } + printf("%s:%d >>> STAG === %s:%s, len: %d, nTag: %d, sver:%d\n", tag, ln, isJson ? "json" : "normal", + isLarge ? "large" : "small", (int32_t)pTag->len, (int32_t)pTag->nTag, pTag->ver); + for (uint16_t n = 0; n < pTag->nTag; ++n) { + if (isLarge) { + offset = ((int16_t *)pTag->idx)[n]; } else { - // set NULL - - pBuilder->row.flags |= TSROW_HAS_NULL; - pTColumn->flags |= COL_SET_NULL; - - pTSKVRow->idx[pTSKVRow->nCols].cid = cid; - pTSKVRow->idx[pTSKVRow->nCols].offset = -1; + offset = pTag->idx[n]; } - - pTSKVRow->nCols++; - } - - return 0; -} - -static FORCE_INLINE int tSKVIdxCmprFn(const void *p1, const void *p2) { - SKVIdx *pKVIdx1 = (SKVIdx *)p1; - SKVIdx *pKVIdx2 = (SKVIdx *)p2; - if (pKVIdx1->cid > pKVIdx2->cid) { - return 1; - } else if (pKVIdx1->cid < pKVIdx2->cid) { - return -1; - } - return 0; -} -static void setBitMap(uint8_t *p, STSchema *pTSchema, uint8_t flags) { - int32_t bidx; - STColumn *pTColumn; - - for (int32_t iCol = 1; iCol < pTSchema->numOfCols; iCol++) { - pTColumn = &pTSchema->columns[iCol]; - bidx = iCol - 1; - - switch (flags) { - case TSROW_HAS_NULL | TSROW_HAS_NONE: - if (pTColumn->flags & COL_SET_NULL) { - SET_BIT1(p, bidx, (uint8_t)1); - } else { - SET_BIT1(p, bidx, (uint8_t)0); - } - break; - case TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE: - if (pTColumn->flags & COL_SET_NULL) { - SET_BIT2(p, bidx, (uint8_t)1); - } else if (pTColumn->flags & COL_SET_VAL) { - SET_BIT2(p, bidx, (uint8_t)2); - } else { - SET_BIT2(p, bidx, (uint8_t)0); - } - break; - default: - if (pTColumn->flags & COL_SET_VAL) { - SET_BIT1(p, bidx, (uint8_t)1); - } else { - SET_BIT1(p, bidx, (uint8_t)0); - } - - break; + STagVal tagVal = {0}; + if (isJson) { + tagVal.pKey = (char *)POINTER_SHIFT(p, offset); + } else { + tagVal.cid = *(int16_t *)POINTER_SHIFT(p, offset); + } + printf("%s:%d loop[%d-%d] offset=%d\n", __func__, __LINE__, (int32_t)pTag->nTag, (int32_t)n, (int32_t)offset); + tGetTagVal(p + offset, &tagVal, isJson); + if (IS_VAR_DATA_TYPE(tagVal.type)) { + debugPrintTagVal(tagVal.type, tagVal.pData, tagVal.nData, __func__, __LINE__); + } else { + debugPrintTagVal(tagVal.type, &tagVal.i64, tDataTypes[tagVal.type].bytes, __func__, __LINE__); } } + printf("\n"); } -int32_t tTSRowBuilderGetRow(STSRowBuilder *pBuilder, const STSRow2 **ppRow) { - int32_t nDataTP, nDataKV; - uint32_t flags; - STSKVRow *pTSKVRow = (STSKVRow *)pBuilder->pKVBuf; - int32_t nCols = pBuilder->pTSchema->numOfCols; - - // error not set ts - if (!COL_IS_SET(pBuilder->pTSchema->columns->flags)) { - return -1; - } - - ASSERT(pTSKVRow->nCols < nCols); - if (pTSKVRow->nCols < nCols - 1) { - pBuilder->row.flags |= TSROW_HAS_NONE; - } - ASSERT(pBuilder->row.flags & 0xf != 0); - *(ppRow) = &pBuilder->row; - switch (pBuilder->row.flags & 0xf) { - case TSROW_HAS_NONE: - case TSROW_HAS_NULL: - pBuilder->row.nData = 0; - pBuilder->row.pData = NULL; - return 0; - case TSROW_HAS_NULL | TSROW_HAS_NONE: - nDataTP = pBuilder->szBitMap1; - break; - case TSROW_HAS_VAL: - nDataTP = pBuilder->pTSchema->flen + pBuilder->vlenTP; - break; - case TSROW_HAS_VAL | TSROW_HAS_NONE: - case TSROW_HAS_VAL | TSROW_HAS_NULL: - nDataTP = pBuilder->szBitMap1 + pBuilder->pTSchema->flen + pBuilder->vlenTP; - break; - case TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE: - nDataTP = pBuilder->szBitMap2 + pBuilder->pTSchema->flen + pBuilder->vlenTP; +void debugCheckTags(STag *pTag) { + switch (pTag->flags) { + case 0x0: + case 0x20: + case 0x40: + case 0x60: break; default: ASSERT(0); } - nDataKV = sizeof(STSKVRow) + sizeof(SKVIdx) * pTSKVRow->nCols + pBuilder->vlenKV; - pBuilder->row.sver = pBuilder->pTSchema->version; - if (nDataKV < nDataTP) { - // generate KV row - - ASSERT(pBuilder->row.flags & 0xf != TSROW_HAS_VAL); + ASSERT(pTag->nTag <= 128 && pTag->nTag >= 0); + ASSERT(pTag->ver <= 512 && pTag->ver >= 0); // temp condition for pTag->ver +} - pBuilder->row.flags |= TSROW_KV_ROW; - pBuilder->row.nData = nDataKV; - pBuilder->row.pData = pBuilder->pKVBuf; +static int32_t tPutTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson) { + int32_t n = 0; - qsort(pTSKVRow->idx, pTSKVRow->nCols, sizeof(SKVIdx), tSKVIdxCmprFn); - if (pTSKVRow->nCols < nCols - 1) { - memmove(&pTSKVRow->idx[pTSKVRow->nCols], &pTSKVRow->idx[nCols - 1], pBuilder->vlenKV); - } + // key + if (isJson) { + n += tPutCStr(p ? p + n : p, pTagVal->pKey); } else { - // generate TUPLE row + n += tPutI16v(p ? p + n : p, pTagVal->cid); + } - pBuilder->row.nData = nDataTP; + // type + n += tPutI8(p ? p + n : p, pTagVal->type); - uint8_t *p; - uint8_t flags = pBuilder->row.flags & 0xf; + // value + if (IS_VAR_DATA_TYPE(pTagVal->type)) { + n += tPutBinary(p ? p + n : p, pTagVal->pData, pTagVal->nData); + } else { + p = p ? p + n : p; + n += tDataTypes[pTagVal->type].bytes; + if (p) memcpy(p, &(pTagVal->i64), tDataTypes[pTagVal->type].bytes); + } - if (flags == TSROW_HAS_VAL) { - pBuilder->row.pData = pBuilder->pTPBuf + pBuilder->szBitMap2; - } else { - if (flags == TSROW_HAS_VAL | TSROW_HAS_NULL | TSROW_HAS_NONE) { - pBuilder->row.pData = pBuilder->pTPBuf; - } else { - pBuilder->row.pData = pBuilder->pTPBuf + pBuilder->szBitMap2 - pBuilder->szBitMap1; - } + return n; +} +static int32_t tGetTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson) { + int32_t n = 0; - setBitMap(pBuilder->row.pData, pBuilder->pTSchema, flags); - } + // key + if (isJson) { + n += tGetCStr(p + n, &pTagVal->pKey); + } else { + n += tGetI16v(p + n, &pTagVal->cid); } - return 0; -} + // type + n += tGetI8(p + n, &pTagVal->type); -static FORCE_INLINE int tTagIdxCmprFn(const void *p1, const void *p2) { - STagIdx *pTagIdx1 = (STagIdx *)p1; - STagIdx *pTagIdx2 = (STagIdx *)p2; - if (pTagIdx1->cid < pTagIdx1->cid) { - return -1; - } else if (pTagIdx1->cid > pTagIdx1->cid) { - return 1; + // value + if (IS_VAR_DATA_TYPE(pTagVal->type)) { + n += tGetBinary(p + n, &pTagVal->pData, &pTagVal->nData); + } else { + memcpy(&(pTagVal->i64), p + n, tDataTypes[pTagVal->type].bytes); + n += tDataTypes[pTagVal->type].bytes; } - return 0; + + return n; } -int32_t tTagNew(STagVal *pTagVals, int16_t nTag, STag **ppTag) { - STagVal *pTagVal; - uint8_t *p; - int32_t n; - uint16_t tsize = sizeof(STag) + sizeof(STagIdx) * nTag; +int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag) { + int32_t code = 0; + uint8_t *p = NULL; + int16_t n = 0; + int16_t nTag = taosArrayGetSize(pArray); + int32_t szTag = 0; + int8_t isLarge = 0; + + // sort + if (isJson) { + qsort(pArray->pData, nTag, sizeof(STagVal), tTagValJsonCmprFn); + } else { + qsort(pArray->pData, nTag, sizeof(STagVal), tTagValCmprFn); + } + // get size for (int16_t iTag = 0; iTag < nTag; iTag++) { - pTagVal = &pTagVals[iTag]; - - if (IS_VAR_DATA_TYPE(pTagVal->type)) { - tsize += tPutBinary(NULL, pTagVal->pData, pTagVal->nData); - } else { - ASSERT(pTagVal->nData == TYPE_BYTES[pTagVal->type]); - tsize += pTagVal->nData; - } + szTag += tPutTagVal(NULL, (STagVal *)taosArrayGet(pArray, iTag), isJson); } - - (*ppTag) = (STag *)taosMemoryMalloc(tsize); - if (*ppTag == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; + if (szTag <= INT8_MAX) { + szTag = szTag + sizeof(STag) + sizeof(int8_t) * nTag; + } else { + szTag = szTag + sizeof(STag) + sizeof(int16_t) * nTag; + isLarge = 1; } - p = (uint8_t *)&((*ppTag)->idx[nTag]); - n = 0; + ASSERT(szTag <= INT16_MAX); - (*ppTag)->len = tsize; + // build tag + (*ppTag) = (STag *)taosMemoryCalloc(szTag, 1); + if ((*ppTag) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + (*ppTag)->flags = 0; + if (isJson) { + (*ppTag)->flags |= TD_TAG_JSON; + } + if (isLarge) { + (*ppTag)->flags |= TD_TAG_LARGE; + } + (*ppTag)->len = szTag; (*ppTag)->nTag = nTag; - for (int16_t iTag = 0; iTag < nTag; iTag++) { - pTagVal = &pTagVals[iTag]; - - (*ppTag)->idx[iTag].cid = pTagVal->cid; - (*ppTag)->idx[iTag].offset = n; + (*ppTag)->ver = version; - if (IS_VAR_DATA_TYPE(pTagVal->type)) { - n += tPutBinary(p + n, pTagVal->pData, pTagVal->nData); + if (isLarge) { + p = (uint8_t *)&((int16_t *)(*ppTag)->idx)[nTag]; + } else { + p = (uint8_t *)&(*ppTag)->idx[nTag]; + } + n = 0; + for (int16_t iTag = 0; iTag < nTag; iTag++) { + if (isLarge) { + ((int16_t *)(*ppTag)->idx)[iTag] = n; } else { - memcpy(p + n, pTagVal->pData, pTagVal->nData); - n += pTagVal->nData; + (*ppTag)->idx[iTag] = n; } + n += tPutTagVal(p + n, (STagVal *)taosArrayGet(pArray, iTag), isJson); } +#ifdef TD_DEBUG_PRINT_TAG + debugPrintSTag(*ppTag, __func__, __LINE__); +#endif - qsort((*ppTag)->idx, (*ppTag)->nTag, sizeof(STagIdx), tTagIdxCmprFn); - return 0; + debugCheckTags(*ppTag); // TODO: remove this line after debug + return code; + +_err: + return code; } void tTagFree(STag *pTag) { if (pTag) taosMemoryFree(pTag); } -int32_t tTagSet(STag *pTag, SSchema *pSchema, int32_t nCols, int iCol, uint8_t *pData, uint32_t nData, STag **ppTag) { - STagVal *pTagVals; - int16_t nTags = 0; - SSchema *pColumn; - uint8_t *p; - uint32_t n; +char *tTagValToData(const STagVal *value, bool isJson) { + if (!value) return NULL; + char *data = NULL; + int8_t typeBytes = 0; + if (isJson) { + typeBytes = CHAR_BYTES; + } + if (IS_VAR_DATA_TYPE(value->type)) { + data = taosMemoryCalloc(1, typeBytes + VARSTR_HEADER_SIZE + value->nData); + if (data == NULL) return NULL; + if (isJson) *data = value->type; + varDataLen(data + typeBytes) = value->nData; + memcpy(varDataVal(data + typeBytes), value->pData, value->nData); + } else { + data = ((char *)&(value->i64)) - typeBytes; // json with type + } - pTagVals = (STagVal *)taosMemoryMalloc(sizeof(*pTagVals) * nCols); - if (pTagVals == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; + return data; +} + +bool tTagGet(const STag *pTag, STagVal *pTagVal) { + int16_t lidx = 0; + int16_t ridx = pTag->nTag - 1; + int16_t midx; + uint8_t *p; + int8_t isJson = pTag->flags & TD_TAG_JSON; + int8_t isLarge = pTag->flags & TD_TAG_LARGE; + int16_t offset; + STagVal tv; + int c; + + if (isLarge) { + p = (uint8_t *)&((int16_t *)pTag->idx)[pTag->nTag]; + } else { + p = (uint8_t *)&pTag->idx[pTag->nTag]; } - for (int32_t i = 0; i < nCols; i++) { - pColumn = &pSchema[i]; + pTagVal->type = TSDB_DATA_TYPE_NULL; + pTagVal->pData = NULL; + pTagVal->nData = 0; + while (lidx <= ridx) { + midx = (lidx + ridx) / 2; + if (isLarge) { + offset = ((int16_t *)pTag->idx)[midx]; + } else { + offset = pTag->idx[midx]; + } - if (i == iCol) { - p = pData; - n = nData; + tGetTagVal(p + offset, &tv, isJson); + if (isJson) { + c = tTagValJsonCmprFn(pTagVal, &tv); } else { - tTagGet(pTag, pColumn->colId, pColumn->type, &p, &n); + c = tTagValCmprFn(pTagVal, &tv); } - if (p == NULL) continue; + if (c < 0) { + ridx = midx - 1; + } else if (c > 0) { + lidx = midx + 1; + } else { + memcpy(pTagVal, &tv, sizeof(tv)); + return true; + } + } + return false; +} - ASSERT(IS_VAR_DATA_TYPE(pColumn->type) || n == pColumn->bytes); +int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag) { + return tEncodeBinary(pEncoder, (const uint8_t *)pTag, pTag->len); +} - pTagVals[nTags].cid = pColumn->colId; - pTagVals[nTags].type = pColumn->type; - pTagVals[nTags].nData = n; - pTagVals[nTags].pData = p; +int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag) { return tDecodeBinary(pDecoder, (uint8_t **)ppTag, NULL); } - nTags++; - } +int32_t tTagToValArray(const STag *pTag, SArray **ppArray) { + int32_t code = 0; + uint8_t *p = NULL; + STagVal tv = {0}; + int8_t isLarge = pTag->flags & TD_TAG_LARGE; + int16_t offset = 0; - // create new tag - if (tTagNew(pTagVals, nTags, ppTag) < 0) { - taosMemoryFree(pTagVals); - return -1; + if (isLarge) { + p = (uint8_t *)&((int16_t *)pTag->idx)[pTag->nTag]; + } else { + p = (uint8_t *)&pTag->idx[pTag->nTag]; } - taosMemoryFree(pTagVals); - return 0; -} + (*ppArray) = taosArrayInit(pTag->nTag + 1, sizeof(STagVal)); + if (*ppArray == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } -void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, uint32_t *nData) { - STagIdx *pTagIdx = bsearch(&((STagIdx){.cid = cid}), pTag->idx, pTag->nTag, sizeof(STagIdx), tTagIdxCmprFn); - if (pTagIdx == NULL) { - *ppData = NULL; - *nData = 0; - } else { - uint8_t *p = (uint8_t *)&pTag->idx[pTag->nTag] + pTagIdx->offset; - if (IS_VAR_DATA_TYPE(type)) { - tGetBinary(p, ppData, nData); + for (int16_t iTag = 0; iTag < pTag->nTag; iTag++) { + if (isLarge) { + offset = ((int16_t *)pTag->idx)[iTag]; } else { - *ppData = p; - *nData = TYPE_BYTES[type]; + offset = pTag->idx[iTag]; } + tGetTagVal(p + offset, &tv, pTag->flags & TD_TAG_JSON); + taosArrayPush(*ppArray, &tv); } -} -int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag) { - return tEncodeBinary(pEncoder, (const uint8_t *)pTag, pTag->len); -} + return code; -int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag) { return tDecodeBinary(pDecoder, (uint8_t **)ppTag, NULL); } +_err: + return code; +} #if 1 // =================================================================================================================== static void dataColSetNEleNull(SDataCol *pCol, int nEle); @@ -974,162 +1413,4 @@ void tdResetDataCols(SDataCols *pCols) { } } -SKVRow tdKVRowDup(SKVRow row) { - SKVRow trow = taosMemoryMalloc(kvRowLen(row)); - if (trow == NULL) return NULL; - - kvRowCpy(trow, row); - return trow; -} - -static int compareColIdx(const void *a, const void *b) { - const SColIdx *x = (const SColIdx *)a; - const SColIdx *y = (const SColIdx *)b; - if (x->colId > y->colId) { - return 1; - } - if (x->colId < y->colId) { - return -1; - } - return 0; -} - -void tdSortKVRowByColIdx(SKVRow row) { qsort(kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), compareColIdx); } - -int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { - SColIdx *pColIdx = NULL; - SKVRow row = *orow; - SKVRow nrow = NULL; - void *ptr = taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_GE); - - if (ptr == NULL || ((SColIdx *)ptr)->colId > colId) { // need to add a column value to the row - int diff = IS_VAR_DATA_TYPE(type) ? varDataTLen(value) : TYPE_BYTES[type]; - int nRowLen = kvRowLen(row) + sizeof(SColIdx) + diff; - int oRowCols = kvRowNCols(row); - - ASSERT(diff > 0); - nrow = taosMemoryMalloc(nRowLen); - if (nrow == NULL) return -1; - - kvRowSetLen(nrow, nRowLen); - kvRowSetNCols(nrow, oRowCols + 1); - - memcpy(kvRowColIdx(nrow), kvRowColIdx(row), sizeof(SColIdx) * oRowCols); - memcpy(kvRowValues(nrow), kvRowValues(row), kvRowValLen(row)); - - pColIdx = kvRowColIdxAt(nrow, oRowCols); - pColIdx->colId = colId; - pColIdx->offset = kvRowValLen(row); - - memcpy(kvRowColVal(nrow, pColIdx), value, diff); // copy new value - - tdSortKVRowByColIdx(nrow); - - *orow = nrow; - taosMemoryFree(row); - } else { - ASSERT(((SColIdx *)ptr)->colId == colId); - if (IS_VAR_DATA_TYPE(type)) { - void *pOldVal = kvRowColVal(row, (SColIdx *)ptr); - - if (varDataTLen(value) == varDataTLen(pOldVal)) { // just update the column value in place - memcpy(pOldVal, value, varDataTLen(value)); - } else { // need to reallocate the memory - int16_t nlen = kvRowLen(row) + (varDataTLen(value) - varDataTLen(pOldVal)); - ASSERT(nlen > 0); - nrow = taosMemoryMalloc(nlen); - if (nrow == NULL) return -1; - - kvRowSetLen(nrow, nlen); - kvRowSetNCols(nrow, kvRowNCols(row)); - - int zsize = sizeof(SColIdx) * kvRowNCols(row) + ((SColIdx *)ptr)->offset; - memcpy(kvRowColIdx(nrow), kvRowColIdx(row), zsize); - memcpy(kvRowColVal(nrow, ((SColIdx *)ptr)), value, varDataTLen(value)); - // Copy left value part - int lsize = kvRowLen(row) - TD_KV_ROW_HEAD_SIZE - zsize - varDataTLen(pOldVal); - if (lsize > 0) { - memcpy(POINTER_SHIFT(nrow, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(value)), - POINTER_SHIFT(row, TD_KV_ROW_HEAD_SIZE + zsize + varDataTLen(pOldVal)), lsize); - } - - for (int i = 0; i < kvRowNCols(nrow); i++) { - pColIdx = kvRowColIdxAt(nrow, i); - - if (pColIdx->offset > ((SColIdx *)ptr)->offset) { - pColIdx->offset = pColIdx->offset - varDataTLen(pOldVal) + varDataTLen(value); - } - } - - *orow = nrow; - taosMemoryFree(row); - } - } else { - memcpy(kvRowColVal(row, (SColIdx *)ptr), value, TYPE_BYTES[type]); - } - } - - return 0; -} - -int tdEncodeKVRow(void **buf, SKVRow row) { - // May change the encode purpose - if (buf != NULL) { - kvRowCpy(*buf, row); - *buf = POINTER_SHIFT(*buf, kvRowLen(row)); - } - - return kvRowLen(row); -} - -void *tdDecodeKVRow(void *buf, SKVRow *row) { - *row = tdKVRowDup(buf); - if (*row == NULL) return NULL; - return POINTER_SHIFT(buf, kvRowLen(*row)); -} - -int tdInitKVRowBuilder(SKVRowBuilder *pBuilder) { - pBuilder->tCols = 128; - pBuilder->nCols = 0; - pBuilder->pColIdx = (SColIdx *)taosMemoryMalloc(sizeof(SColIdx) * pBuilder->tCols); - if (pBuilder->pColIdx == NULL) return -1; - pBuilder->alloc = 1024; - pBuilder->size = 0; - pBuilder->buf = taosMemoryMalloc(pBuilder->alloc); - if (pBuilder->buf == NULL) { - taosMemoryFree(pBuilder->pColIdx); - return -1; - } - return 0; -} - -void tdDestroyKVRowBuilder(SKVRowBuilder *pBuilder) { - taosMemoryFreeClear(pBuilder->pColIdx); - taosMemoryFreeClear(pBuilder->buf); -} - -void tdResetKVRowBuilder(SKVRowBuilder *pBuilder) { - pBuilder->nCols = 0; - pBuilder->size = 0; -} - -SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) { - int tlen = sizeof(SColIdx) * pBuilder->nCols + pBuilder->size; - // if (tlen == 0) return NULL; // nCols == 0 means no tags - - tlen += TD_KV_ROW_HEAD_SIZE; - - SKVRow row = taosMemoryMalloc(tlen); - if (row == NULL) return NULL; - - kvRowSetNCols(row, pBuilder->nCols); - kvRowSetLen(row, tlen); - - if (pBuilder->nCols > 0) { - memcpy(kvRowColIdx(row), pBuilder->pColIdx, sizeof(SColIdx) * pBuilder->nCols); - memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size); - } - - return row; -} #endif \ No newline at end of file diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 1b61a0bc606aa9fd479cf996668756d2b88f4702..e77c462e5ae0fe81521f34cbd1475669747e0ee6 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -79,9 +79,10 @@ uint16_t tsTelemPort = 80; // schemaless char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null"; -char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value. - //If set to empty system will generate table name using MD5 hash. -bool tsSmlDataFormat = true; // true means that the name and order of cols in each line are the same(only for influx protocol) +char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table name can be specified in tag value. + // If set to empty system will generate table name using MD5 hash. +bool tsSmlDataFormat = + true; // true means that the name and order of cols in each line are the same(only for influx protocol) // query int32_t tsQueryPolicy = 1; @@ -108,8 +109,11 @@ int32_t tsCompressColData = -1; */ int32_t tsCompatibleModel = 1; +// count/hyperloglog function always return values in case of all NULL data or Empty data set. +int32_t tsCountAlwaysReturnValue = 1; + // 10 ms for sliding time, the value will changed in case of time precision changed -int32_t tsMinSlidingTime = 10; +int32_t tsMinSlidingTime = 10; // the maxinum number of distict query result int32_t tsMaxNumOfDistinctResults = 1000 * 10000; @@ -129,7 +133,6 @@ int32_t tsRetryStreamCompDelay = 10 * 1000; // The delayed computing ration. 10% of the whole computing time window by default. float tsStreamComputDelayRatio = 0.1f; -int32_t tsProjectExecInterval = 10000; // every 10sec, the projection will be executed once int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance // the maximum allowed query buffer size during query processing for each data node. @@ -292,6 +295,7 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "jniDebugFlag", jniDebugFlag, 0, 255, 1) != 0) return -1; if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, 1) != 0) return -1; if (cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 1) != 0) return -1; return 0; } @@ -307,6 +311,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, 0) != 0) return -1; if (cfgAddInt32(pCfg, "fnDebugFlag", fnDebugFlag, 0, 255, 0) != 0) return -1; if (cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, 0) != 0) return -1; + if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 0) != 0) return -1; return 0; } @@ -371,6 +376,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 10, 1000000, 0) != 0) return -1; if (cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, 0) != 0) return -1; if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, 0) != 0) return -1; + if (cfgAddInt32(pCfg, "countAlwaysReturnValue", tsCountAlwaysReturnValue, 0, 1, 0) != 0) return -1; if (cfgAddInt32(pCfg, "maxStreamCompDelay", tsMaxStreamComputDelay, 10, 1000000000, 0) != 0) return -1; if (cfgAddInt32(pCfg, "maxFirstStreamCompDelay", tsStreamCompStartDelay, 1000, 1000000000, 0) != 0) return -1; if (cfgAddInt32(pCfg, "retryStreamCompDelay", tsRetryStreamCompDelay, 10, 1000000000, 0) != 0) return -1; @@ -479,6 +485,7 @@ static void taosSetClientLogCfg(SConfig *pCfg) { rpcDebugFlag = cfgGetItem(pCfg, "rpcDebugFlag")->i32; tmrDebugFlag = cfgGetItem(pCfg, "tmrDebugFlag")->i32; jniDebugFlag = cfgGetItem(pCfg, "jniDebugFlag")->i32; + idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32; } static void taosSetServerLogCfg(SConfig *pCfg) { @@ -493,6 +500,7 @@ static void taosSetServerLogCfg(SConfig *pCfg) { fsDebugFlag = cfgGetItem(pCfg, "fsDebugFlag")->i32; fnDebugFlag = cfgGetItem(pCfg, "fnDebugFlag")->i32; smaDebugFlag = cfgGetItem(pCfg, "smaDebugFlag")->i32; + idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32; } static int32_t taosSetClientCfg(SConfig *pCfg) { @@ -562,6 +570,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsMinSlidingTime = cfgGetItem(pCfg, "minSlidingTime")->i32; tsMinIntervalTime = cfgGetItem(pCfg, "minIntervalTime")->i32; tsMaxNumOfDistinctResults = cfgGetItem(pCfg, "maxNumOfDistinctRes")->i32; + tsCountAlwaysReturnValue = cfgGetItem(pCfg, "countAlwaysReturnValue")->i32; tsMaxStreamComputDelay = cfgGetItem(pCfg, "maxStreamCompDelay")->i32; tsStreamCompStartDelay = cfgGetItem(pCfg, "maxFirstStreamCompDelay")->i32; tsRetryStreamCompDelay = cfgGetItem(pCfg, "retryStreamCompDelay")->i32; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 22ac4124340752415ef320effacc781568b20897..9c6c532bcd071bf8799e28076a9ea147d5b81443 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -28,7 +28,7 @@ #undef TD_MSG_SEG_CODE_ #include "tmsgdef.h" -int32_t tInitSubmitMsgIter(SSubmitReq *pMsg, SSubmitMsgIter *pIter) { +int32_t tInitSubmitMsgIter(const SSubmitReq *pMsg, SSubmitMsgIter *pIter) { if (pMsg == NULL) { terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP; return -1; @@ -147,12 +147,24 @@ int32_t tEncodeSQueryNodeAddr(SEncoder *pEncoder, SQueryNodeAddr *pAddr) { return 0; } +int32_t tEncodeSQueryNodeLoad(SEncoder *pEncoder, SQueryNodeLoad *pLoad) { + if (tEncodeSQueryNodeAddr(pEncoder, &pLoad->addr) < 0) return -1; + if (tEncodeU64(pEncoder, pLoad->load) < 0) return -1; + return 0; +} + int32_t tDecodeSQueryNodeAddr(SDecoder *pDecoder, SQueryNodeAddr *pAddr) { if (tDecodeI32(pDecoder, &pAddr->nodeId) < 0) return -1; if (tDecodeSEpSet(pDecoder, &pAddr->epSet) < 0) return -1; return 0; } +int32_t tDecodeSQueryNodeLoad(SDecoder *pDecoder, SQueryNodeLoad *pLoad) { + if (tDecodeSQueryNodeAddr(pDecoder, &pLoad->addr) < 0) return -1; + if (tDecodeU64(pDecoder, &pLoad->load) < 0) return -1; + return 0; +} + int32_t taosEncodeSEpSet(void **buf, const SEpSet *pEp) { int32_t tlen = 0; tlen += taosEncodeFixedI8(buf, pEp->inUse); @@ -304,6 +316,12 @@ static int32_t tSerializeSClientHbRsp(SEncoder *pEncoder, const SClientHbRsp *pR if (tEncodeI32(pEncoder, pRsp->query->onlineDnodes) < 0) return -1; if (tEncodeI8(pEncoder, pRsp->query->killConnection) < 0) return -1; if (tEncodeSEpSet(pEncoder, &pRsp->query->epSet) < 0) return -1; + int32_t num = taosArrayGetSize(pRsp->query->pQnodeList); + if (tEncodeI32(pEncoder, num) < 0) return -1; + for (int32_t i = 0; i < num; ++i) { + SQueryNodeLoad *pLoad = taosArrayGet(pRsp->query->pQnodeList, i); + if (tEncodeSQueryNodeLoad(pEncoder, pLoad) < 0) return -1; + } } else { if (tEncodeI32(pEncoder, queryNum) < 0) return -1; } @@ -333,6 +351,15 @@ static int32_t tDeserializeSClientHbRsp(SDecoder *pDecoder, SClientHbRsp *pRsp) if (tDecodeI32(pDecoder, &pRsp->query->onlineDnodes) < 0) return -1; if (tDecodeI8(pDecoder, &pRsp->query->killConnection) < 0) return -1; if (tDecodeSEpSet(pDecoder, &pRsp->query->epSet) < 0) return -1; + int32_t pQnodeNum = 0; + if (tDecodeI32(pDecoder, &pQnodeNum) < 0) return -1; + if (pQnodeNum > 0) { + pRsp->query->pQnodeList = taosArrayInit(pQnodeNum, sizeof(SQueryNodeLoad)); + if (NULL == pRsp->query->pQnodeList) return -1; + SQueryNodeLoad load = {0}; + if (tDecodeSQueryNodeLoad(pDecoder, &load) < 0) return -1; + taosArrayPush(pRsp->query->pQnodeList, &load); + } } int32_t kvNum = 0; @@ -665,22 +692,25 @@ void tFreeSMAltertbReq(SMAlterStbReq *pReq) { taosArrayDestroy(pReq->pFields); pReq->pFields = NULL; } -int32_t tSerializeSMEpSet(void *buf, int32_t bufLen, SMEpSet *pReq) { + + +int32_t tSerializeSEpSet(void *buf, int32_t bufLen, const SEpSet *pEpset) { SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); if (tStartEncode(&encoder) < 0) return -1; - if (tEncodeSEpSet(&encoder, &pReq->epSet) < 0) return -1; + if (tEncodeSEpSet(&encoder, pEpset) < 0) return -1; tEndEncode(&encoder); int32_t tlen = encoder.pos; tEncoderClear(&encoder); return tlen; } -int32_t tDeserializeSMEpSet(void *buf, int32_t bufLen, SMEpSet *pReq) { + +int32_t tDeserializeSEpSet(void *buf, int32_t bufLen, SEpSet *pEpset) { SDecoder decoder = {0}; tDecoderInit(&decoder, buf, bufLen); if (tStartDecode(&decoder) < 0) return -1; - if (tDecodeSEpSet(&decoder, &pReq->epSet) < 0) return -1; + if (tDecodeSEpSet(&decoder, pEpset) < 0) return -1; tEndDecode(&decoder); tDecoderClear(&decoder); @@ -896,6 +926,18 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) { // mnode loads if (tEncodeI32(&encoder, pReq->mload.syncState) < 0) return -1; + if (tEncodeI32(&encoder, pReq->qload.dnodeId) < 0) return -1; + if (tEncodeI64(&encoder, pReq->qload.numOfProcessedQuery) < 0) return -1; + if (tEncodeI64(&encoder, pReq->qload.numOfProcessedCQuery) < 0) return -1; + if (tEncodeI64(&encoder, pReq->qload.numOfProcessedFetch) < 0) return -1; + if (tEncodeI64(&encoder, pReq->qload.numOfProcessedDrop) < 0) return -1; + if (tEncodeI64(&encoder, pReq->qload.numOfProcessedHb) < 0) return -1; + if (tEncodeI64(&encoder, pReq->qload.cacheDataSize) < 0) return -1; + if (tEncodeI64(&encoder, pReq->qload.numOfQueryInQueue) < 0) return -1; + if (tEncodeI64(&encoder, pReq->qload.numOfFetchInQueue) < 0) return -1; + if (tEncodeI64(&encoder, pReq->qload.timeInQueryQueue) < 0) return -1; + if (tEncodeI64(&encoder, pReq->qload.timeInFetchQueue) < 0) return -1; + tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -953,6 +995,18 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) { if (tDecodeI32(&decoder, &pReq->mload.syncState) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->qload.dnodeId) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedQuery) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedCQuery) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedFetch) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedDrop) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedHb) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->qload.cacheDataSize) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->qload.numOfQueryInQueue) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->qload.numOfFetchInQueue) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->qload.timeInQueryQueue) < 0) return -1; + if (tDecodeI64(&decoder, &pReq->qload.timeInFetchQueue) < 0) return -1; + tEndDecode(&decoder); tDecoderClear(&decoder); return 0; @@ -1919,11 +1973,11 @@ int32_t tSerializeSQnodeListRsp(void *buf, int32_t bufLen, SQnodeListRsp *pRsp) tEncoderInit(&encoder, buf, bufLen); if (tStartEncode(&encoder) < 0) return -1; - int32_t num = taosArrayGetSize(pRsp->addrsList); + int32_t num = taosArrayGetSize(pRsp->qnodeList); if (tEncodeI32(&encoder, num) < 0) return -1; for (int32_t i = 0; i < num; ++i) { - SQueryNodeAddr *addr = taosArrayGet(pRsp->addrsList, i); - if (tEncodeSQueryNodeAddr(&encoder, addr) < 0) return -1; + SQueryNodeLoad *pLoad = taosArrayGet(pRsp->qnodeList, i); + if (tEncodeSQueryNodeLoad(&encoder, pLoad) < 0) return -1; } tEndEncode(&encoder); @@ -1939,15 +1993,15 @@ int32_t tDeserializeSQnodeListRsp(void *buf, int32_t bufLen, SQnodeListRsp *pRsp if (tStartDecode(&decoder) < 0) return -1; int32_t num = 0; if (tDecodeI32(&decoder, &num) < 0) return -1; - if (NULL == pRsp->addrsList) { - pRsp->addrsList = taosArrayInit(num, sizeof(SQueryNodeAddr)); - if (NULL == pRsp->addrsList) return -1; + if (NULL == pRsp->qnodeList) { + pRsp->qnodeList = taosArrayInit(num, sizeof(SQueryNodeLoad)); + if (NULL == pRsp->qnodeList) return -1; } for (int32_t i = 0; i < num; ++i) { - SQueryNodeAddr addr = {0}; - if (tDecodeSQueryNodeAddr(&decoder, &addr) < 0) return -1; - taosArrayPush(pRsp->addrsList, &addr); + SQueryNodeLoad load = {0}; + if (tDecodeSQueryNodeLoad(&decoder, &load) < 0) return -1; + taosArrayPush(pRsp->qnodeList, &load); } tEndDecode(&decoder); @@ -1955,7 +2009,7 @@ int32_t tDeserializeSQnodeListRsp(void *buf, int32_t bufLen, SQnodeListRsp *pRsp return 0; } -void tFreeSQnodeListRsp(SQnodeListRsp *pRsp) { taosArrayDestroy(pRsp->addrsList); } +void tFreeSQnodeListRsp(SQnodeListRsp *pRsp) { taosArrayDestroy(pRsp->qnodeList); } int32_t tSerializeSCompactDbReq(void *buf, int32_t bufLen, SCompactDbReq *pReq) { SEncoder encoder = {0}; @@ -2224,6 +2278,7 @@ int32_t tSerializeSDbCfgRsp(void *buf, int32_t bufLen, const SDbCfgRsp *pRsp) { if (tEncodeI8(&encoder, pRetension->freqUnit) < 0) return -1; if (tEncodeI8(&encoder, pRetension->keepUnit) < 0) return -1; } + if (tEncodeI8(&encoder, pRsp->schemaless) < 0) return -1; tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -2272,6 +2327,7 @@ int32_t tDeserializeSDbCfgRsp(void *buf, int32_t bufLen, SDbCfgRsp *pRsp) { return -1; } } + if (tDecodeI8(&decoder, &pRsp->schemaless) < 0) return -1; tEndDecode(&decoder); tDecoderClear(&decoder); @@ -2666,25 +2722,23 @@ int32_t tDeserializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq * } int32_t tSerializeSCMCreateTopicReq(void *buf, int32_t bufLen, const SCMCreateTopicReq *pReq) { - int32_t sqlLen = 0; - int32_t astLen = 0; - if (pReq->sql != NULL) sqlLen = (int32_t)strlen(pReq->sql); - if (pReq->ast != NULL) astLen = (int32_t)strlen(pReq->ast); - SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); if (tStartEncode(&encoder) < 0) return -1; if (tEncodeCStr(&encoder, pReq->name) < 0) return -1; if (tEncodeI8(&encoder, pReq->igExists) < 0) return -1; - if (tEncodeI8(&encoder, pReq->withTbName) < 0) return -1; - if (tEncodeI8(&encoder, pReq->withSchema) < 0) return -1; - if (tEncodeI8(&encoder, pReq->withTag) < 0) return -1; - if (tEncodeCStr(&encoder, pReq->subscribeDbName) < 0) return -1; - if (tEncodeI32(&encoder, sqlLen) < 0) return -1; - if (tEncodeI32(&encoder, astLen) < 0) return -1; - if (sqlLen > 0 && tEncodeCStr(&encoder, pReq->sql) < 0) return -1; - if (astLen > 0 && tEncodeCStr(&encoder, pReq->ast) < 0) return -1; + if (tEncodeI8(&encoder, pReq->subType) < 0) return -1; + if (tEncodeCStr(&encoder, pReq->subDbName) < 0) return -1; + if (TOPIC_SUB_TYPE__DB == pReq->subType) { + } else if (TOPIC_SUB_TYPE__TABLE == pReq->subType) { + if (tEncodeCStr(&encoder, pReq->subStbName) < 0) return -1; + } else { + if (tEncodeI32(&encoder, strlen(pReq->ast)) < 0) return -1; + if (tEncodeCStr(&encoder, pReq->ast) < 0) return -1; + } + if (tEncodeI32(&encoder, strlen(pReq->sql)) < 0) return -1; + if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1; tEndEncode(&encoder); @@ -2703,26 +2757,26 @@ int32_t tDeserializeSCMCreateTopicReq(void *buf, int32_t bufLen, SCMCreateTopicR if (tStartDecode(&decoder) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1; if (tDecodeI8(&decoder, &pReq->igExists) < 0) return -1; - if (tDecodeI8(&decoder, &pReq->withTbName) < 0) return -1; - if (tDecodeI8(&decoder, &pReq->withSchema) < 0) return -1; - if (tDecodeI8(&decoder, &pReq->withTag) < 0) return -1; - if (tDecodeCStrTo(&decoder, pReq->subscribeDbName) < 0) return -1; + if (tDecodeI8(&decoder, &pReq->subType) < 0) return -1; + if (tDecodeCStrTo(&decoder, pReq->subDbName) < 0) return -1; + if (TOPIC_SUB_TYPE__DB == pReq->subType) { + } else if (TOPIC_SUB_TYPE__TABLE == pReq->subType) { + if (tDecodeCStrTo(&decoder, pReq->subStbName) < 0) return -1; + } else { + if (tDecodeI32(&decoder, &astLen) < 0) return -1; + if (astLen > 0) { + pReq->ast = taosMemoryCalloc(1, astLen + 1); + if (pReq->ast == NULL) return -1; + if (tDecodeCStrTo(&decoder, pReq->ast) < 0) return -1; + } + } if (tDecodeI32(&decoder, &sqlLen) < 0) return -1; - if (tDecodeI32(&decoder, &astLen) < 0) return -1; - if (sqlLen > 0) { pReq->sql = taosMemoryCalloc(1, sqlLen + 1); if (pReq->sql == NULL) return -1; if (tDecodeCStrTo(&decoder, pReq->sql) < 0) return -1; } - if (astLen > 0) { - pReq->ast = taosMemoryCalloc(1, astLen + 1); - if (pReq->ast == NULL) return -1; - if (tDecodeCStrTo(&decoder, pReq->ast) < 0) return -1; - } else { - } - tEndDecode(&decoder); tDecoderClear(&decoder); @@ -2731,7 +2785,9 @@ int32_t tDeserializeSCMCreateTopicReq(void *buf, int32_t bufLen, SCMCreateTopicR void tFreeSCMCreateTopicReq(SCMCreateTopicReq *pReq) { taosMemoryFreeClear(pReq->sql); - taosMemoryFreeClear(pReq->ast); + if (TOPIC_SUB_TYPE__COLUMN == pReq->subType) { + taosMemoryFreeClear(pReq->ast); + } } int32_t tSerializeSCMCreateTopicRsp(void *buf, int32_t bufLen, const SCMCreateTopicRsp *pRsp) { @@ -2880,7 +2936,6 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR if (tStartEncode(&encoder) < 0) return -1; if (tEncodeI32(&encoder, pReq->vgId) < 0) return -1; - if (tEncodeI32(&encoder, pReq->dnodeId) < 0) return -1; if (tEncodeCStr(&encoder, pReq->db) < 0) return -1; if (tEncodeI64(&encoder, pReq->dbUid) < 0) return -1; if (tEncodeI32(&encoder, pReq->vgVersion) < 0) return -1; @@ -2903,6 +2958,7 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR if (tEncodeI8(&encoder, pReq->compression) < 0) return -1; if (tEncodeI8(&encoder, pReq->strict) < 0) return -1; if (tEncodeI8(&encoder, pReq->cacheLastRow) < 0) return -1; + if (tEncodeI8(&encoder, pReq->standby) < 0) return -1; if (tEncodeI8(&encoder, pReq->replica) < 0) return -1; if (tEncodeI8(&encoder, pReq->selfIndex) < 0) return -1; for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) { @@ -2919,6 +2975,11 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR } if (tEncodeI8(&encoder, pReq->isTsma) < 0) return -1; + if (pReq->isTsma) { + uint32_t tsmaLen = (uint32_t)(htonl(((SMsgHead *)pReq->pTsma)->contLen)); + if (tEncodeBinary(&encoder, (const uint8_t *)pReq->pTsma, tsmaLen) < 0) return -1; + } + tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -2932,7 +2993,6 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq * if (tStartDecode(&decoder) < 0) return -1; if (tDecodeI32(&decoder, &pReq->vgId) < 0) return -1; - if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->db) < 0) return -1; if (tDecodeI64(&decoder, &pReq->dbUid) < 0) return -1; if (tDecodeI32(&decoder, &pReq->vgVersion) < 0) return -1; @@ -2955,6 +3015,7 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq * if (tDecodeI8(&decoder, &pReq->compression) < 0) return -1; if (tDecodeI8(&decoder, &pReq->strict) < 0) return -1; if (tDecodeI8(&decoder, &pReq->cacheLastRow) < 0) return -1; + if (tDecodeI8(&decoder, &pReq->standby) < 0) return -1; if (tDecodeI8(&decoder, &pReq->replica) < 0) return -1; if (tDecodeI8(&decoder, &pReq->selfIndex) < 0) return -1; for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) { @@ -2982,6 +3043,9 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq * } if (tDecodeI8(&decoder, &pReq->isTsma) < 0) return -1; + if (pReq->isTsma) { + if (tDecodeBinaryAlloc(&decoder, &pReq->pTsma, NULL) < 0) return -1; + } tEndDecode(&decoder); tDecoderClear(&decoder); @@ -2991,6 +3055,9 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq * int32_t tFreeSCreateVnodeReq(SCreateVnodeReq *pReq) { taosArrayDestroy(pReq->pRetensions); pReq->pRetensions = NULL; + if (pReq->isTsma) { + taosMemoryFreeClear(pReq->pTsma); + } return 0; } @@ -3069,8 +3136,8 @@ int32_t tSerializeSAlterVnodeReq(void *buf, int32_t bufLen, SAlterVnodeReq *pReq if (tEncodeI8(&encoder, pReq->walLevel) < 0) return -1; if (tEncodeI8(&encoder, pReq->strict) < 0) return -1; if (tEncodeI8(&encoder, pReq->cacheLastRow) < 0) return -1; - if (tEncodeI8(&encoder, pReq->replica) < 0) return -1; if (tEncodeI8(&encoder, pReq->selfIndex) < 0) return -1; + if (tEncodeI8(&encoder, pReq->replica) < 0) return -1; for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) { SReplica *pReplica = &pReq->replicas[i]; if (tEncodeSReplica(&encoder, pReplica) < 0) return -1; @@ -3100,8 +3167,8 @@ int32_t tDeserializeSAlterVnodeReq(void *buf, int32_t bufLen, SAlterVnodeReq *pR if (tDecodeI8(&decoder, &pReq->walLevel) < 0) return -1; if (tDecodeI8(&decoder, &pReq->strict) < 0) return -1; if (tDecodeI8(&decoder, &pReq->cacheLastRow) < 0) return -1; - if (tDecodeI8(&decoder, &pReq->replica) < 0) return -1; if (tDecodeI8(&decoder, &pReq->selfIndex) < 0) return -1; + if (tDecodeI8(&decoder, &pReq->replica) < 0) return -1; for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) { SReplica *pReplica = &pReq->replicas[i]; if (tDecodeSReplica(&decoder, pReplica) < 0) return -1; @@ -3507,31 +3574,6 @@ int32_t tDeserializeSSchedulerHbRsp(void *buf, int32_t bufLen, SSchedulerHbRsp * void tFreeSSchedulerHbRsp(SSchedulerHbRsp *pRsp) { taosArrayDestroy(pRsp->taskStatus); } -int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) { - SEncoder encoder = {0}; - tEncoderInit(&encoder, buf, bufLen); - - if (tStartEncode(&encoder) < 0) return -1; - if (tEncodeI32(&encoder, pRsp->code) < 0) return -1; - tEndEncode(&encoder); - - int32_t tlen = encoder.pos; - tEncoderClear(&encoder); - return tlen; -} - -int32_t tDeserializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) { - SDecoder decoder = {0}; - tDecoderInit(&decoder, buf, bufLen); - - if (tStartDecode(&decoder) < 0) return -1; - if (tDecodeI32(&decoder, &pRsp->code) < 0) return -1; - tEndDecode(&decoder); - - tDecoderClear(&decoder); - return 0; -} - int32_t tSerializeSVCreateTbBatchRsp(void *buf, int32_t bufLen, SVCreateTbBatchRsp *pRsp) { // SEncoder encoder = {0}; // tEncoderInit(&encoder, buf, bufLen); @@ -3614,6 +3656,7 @@ int32_t tEncodeTSma(SEncoder *pCoder, const STSma *pSma) { if (tEncodeI8(pCoder, pSma->intervalUnit) < 0) return -1; if (tEncodeI8(pCoder, pSma->slidingUnit) < 0) return -1; if (tEncodeI8(pCoder, pSma->timezoneInt) < 0) return -1; + if (tEncodeI32(pCoder, pSma->dstVgId) < 0) return -1; if (tEncodeCStr(pCoder, pSma->indexName) < 0) return -1; if (tEncodeI32(pCoder, pSma->exprLen) < 0) return -1; if (tEncodeI32(pCoder, pSma->tagsFilterLen) < 0) return -1; @@ -3636,6 +3679,7 @@ int32_t tDecodeTSma(SDecoder *pCoder, STSma *pSma) { if (tDecodeI8(pCoder, &pSma->version) < 0) return -1; if (tDecodeI8(pCoder, &pSma->intervalUnit) < 0) return -1; if (tDecodeI8(pCoder, &pSma->slidingUnit) < 0) return -1; + if (tDecodeI32(pCoder, &pSma->dstVgId) < 0) return -1; if (tDecodeI8(pCoder, &pSma->timezoneInt) < 0) return -1; if (tDecodeCStrTo(pCoder, pSma->indexName) < 0) return -1; if (tDecodeI32(pCoder, &pSma->exprLen) < 0) return -1; @@ -3861,16 +3905,15 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) { if (tStartEncode(pCoder) < 0) return -1; if (tEncodeI32v(pCoder, pReq->flags) < 0) return -1; + if (tEncodeCStr(pCoder, pReq->name) < 0) return -1; if (tEncodeI64(pCoder, pReq->uid) < 0) return -1; if (tEncodeI64(pCoder, pReq->ctime) < 0) return -1; - - if (tEncodeCStr(pCoder, pReq->name) < 0) return -1; if (tEncodeI32(pCoder, pReq->ttl) < 0) return -1; if (tEncodeI8(pCoder, pReq->type) < 0) return -1; if (pReq->type == TSDB_CHILD_TABLE) { if (tEncodeI64(pCoder, pReq->ctb.suid) < 0) return -1; - if (tEncodeBinary(pCoder, pReq->ctb.pTag, kvRowLen(pReq->ctb.pTag)) < 0) return -1; + if (tEncodeTag(pCoder, (const STag *)pReq->ctb.pTag) < 0) return -1; } else if (pReq->type == TSDB_NORMAL_TABLE) { if (tEncodeSSchemaWrapper(pCoder, &pReq->ntb.schemaRow) < 0) return -1; } else { @@ -3882,21 +3925,18 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) { } int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) { - uint32_t len; - if (tStartDecode(pCoder) < 0) return -1; if (tDecodeI32v(pCoder, &pReq->flags) < 0) return -1; + if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1; if (tDecodeI64(pCoder, &pReq->uid) < 0) return -1; if (tDecodeI64(pCoder, &pReq->ctime) < 0) return -1; - - if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1; if (tDecodeI32(pCoder, &pReq->ttl) < 0) return -1; if (tDecodeI8(pCoder, &pReq->type) < 0) return -1; if (pReq->type == TSDB_CHILD_TABLE) { if (tDecodeI64(pCoder, &pReq->ctb.suid) < 0) return -1; - if (tDecodeBinary(pCoder, &pReq->ctb.pTag, &len) < 0) return -1; + if (tDecodeTag(pCoder, (STag **)&pReq->ctb.pTag) < 0) return -1; } else if (pReq->type == TSDB_NORMAL_TABLE) { if (tDecodeSSchemaWrapper(pCoder, &pReq->ntb.schemaRow) < 0) return -1; } else { @@ -4310,13 +4350,96 @@ int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) { int32_t tEncodeSVAlterTbRsp(SEncoder *pEncoder, const SVAlterTbRsp *pRsp) { if (tStartEncode(pEncoder) < 0) return -1; if (tEncodeI32(pEncoder, pRsp->code) < 0) return -1; + if (tEncodeI32(pEncoder, pRsp->pMeta ? 1 : 0) < 0) return -1; + if (pRsp->pMeta) { + if (tEncodeSTableMetaRsp(pEncoder, pRsp->pMeta) < 0) return -1; + } tEndEncode(pEncoder); return 0; } int32_t tDecodeSVAlterTbRsp(SDecoder *pDecoder, SVAlterTbRsp *pRsp) { + int32_t meta = 0; if (tStartDecode(pDecoder) < 0) return -1; if (tDecodeI32(pDecoder, &pRsp->code) < 0) return -1; + if (tDecodeI32(pDecoder, &meta) < 0) return -1; + if (meta) { + pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == pRsp->pMeta) return -1; + if (tDecodeSTableMetaRsp(pDecoder, pRsp->pMeta) < 0) return -1; + } tEndDecode(pDecoder); return 0; } + +int32_t tDeserializeSVAlterTbRsp(void *buf, int32_t bufLen, SVAlterTbRsp *pRsp) { + int32_t meta = 0; + SDecoder decoder = {0}; + tDecoderInit(&decoder, buf, bufLen); + + if (tStartDecode(&decoder) < 0) return -1; + if (tDecodeI32(&decoder, &pRsp->code) < 0) return -1; + if (tDecodeI32(&decoder, &meta) < 0) return -1; + if (meta) { + pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == pRsp->pMeta) return -1; + if (tDecodeSTableMetaRsp(&decoder, pRsp->pMeta) < 0) return -1; + } + tEndDecode(&decoder); + tDecoderClear(&decoder); + return 0; +} + +int32_t tEncodeSMAlterStbRsp(SEncoder *pEncoder, const SMAlterStbRsp *pRsp) { + if (tStartEncode(pEncoder) < 0) return -1; + if (tEncodeI32(pEncoder, pRsp->pMeta->pSchemas ? 1 : 0) < 0) return -1; + if (pRsp->pMeta->pSchemas) { + if (tEncodeSTableMetaRsp(pEncoder, pRsp->pMeta) < 0) return -1; + } + tEndEncode(pEncoder); + return 0; +} + +int32_t tDecodeSMAlterStbRsp(SDecoder *pDecoder, SMAlterStbRsp *pRsp) { + int32_t meta = 0; + if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeI32(pDecoder, &meta) < 0) return -1; + if (meta) { + pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == pRsp->pMeta) return -1; + if (tDecodeSTableMetaRsp(pDecoder, pRsp->pMeta) < 0) return -1; + } + tEndDecode(pDecoder); + return 0; +} + +int32_t tDeserializeSMAlterStbRsp(void *buf, int32_t bufLen, SMAlterStbRsp *pRsp) { + int32_t meta = 0; + SDecoder decoder = {0}; + tDecoderInit(&decoder, buf, bufLen); + + if (tStartDecode(&decoder) < 0) return -1; + if (tDecodeI32(&decoder, &meta) < 0) return -1; + if (meta) { + pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == pRsp->pMeta) return -1; + if (tDecodeSTableMetaRsp(&decoder, pRsp->pMeta) < 0) return -1; + } + tEndDecode(&decoder); + tDecoderClear(&decoder); + return 0; +} + +void tFreeSMAlterStbRsp(SMAlterStbRsp* pRsp) { + if (NULL == pRsp) { + return; + } + + if (pRsp->pMeta) { + taosMemoryFree(pRsp->pMeta->pSchemas); + taosMemoryFree(pRsp->pMeta); + } +} + + + diff --git a/source/common/src/tmsgcb.c b/source/common/src/tmsgcb.c index 126a4c023a09505c8b93174c622e14654aa71b0f..b8eec655b125eadf3a8e4f199168ef4bf96109a0 100644 --- a/source/common/src/tmsgcb.c +++ b/source/common/src/tmsgcb.c @@ -22,41 +22,21 @@ static SMsgCb defaultMsgCb; void tmsgSetDefault(const SMsgCb* msgcb) { defaultMsgCb = *msgcb; } int32_t tmsgPutToQueue(const SMsgCb* msgcb, EQueueType qtype, SRpcMsg* pMsg) { - PutToQueueFp fp = msgcb->queueFps[qtype]; - return (*fp)(msgcb->mgmt, pMsg); + return (*msgcb->putToQueueFp)(msgcb->mgmt, qtype, pMsg); } int32_t tmsgGetQueueSize(const SMsgCb* msgcb, int32_t vgId, EQueueType qtype) { - GetQueueSizeFp fp = msgcb->qsizeFp; - return (*fp)(msgcb->mgmt, vgId, qtype); + return (*msgcb->qsizeFp)(msgcb->mgmt, vgId, qtype); } -int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg) { - SendReqFp fp = defaultMsgCb.sendReqFp; - return (*fp)(epSet, pMsg); -} +int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg) { return (*defaultMsgCb.sendReqFp)(epSet, pMsg); } -void tmsgSendRsp(SRpcMsg* pMsg) { - SendRspFp fp = defaultMsgCb.sendRspFp; - return (*fp)(pMsg); -} +void tmsgSendRsp(SRpcMsg* pMsg) { return (*defaultMsgCb.sendRspFp)(pMsg); } -void tmsgSendRedirectRsp(SRpcMsg* pMsg, const SEpSet* pNewEpSet) { - SendRedirectRspFp fp = defaultMsgCb.sendRedirectRspFp; - (*fp)(pMsg, pNewEpSet); -} +void tmsgSendRedirectRsp(SRpcMsg* pMsg, const SEpSet* pNewEpSet) { (*defaultMsgCb.sendRedirectRspFp)(pMsg, pNewEpSet); } -void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg) { - RegisterBrokenLinkArgFp fp = defaultMsgCb.registerBrokenLinkArgFp; - (*fp)(pMsg); -} +void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg) { (*defaultMsgCb.registerBrokenLinkArgFp)(pMsg); } -void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type) { - ReleaseHandleFp fp = defaultMsgCb.releaseHandleFp; - (*fp)(pHandle, type); -} +void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type) { (*defaultMsgCb.releaseHandleFp)(pHandle, type); } -void tmsgReportStartup(const char* name, const char* desc) { - ReportStartup fp = defaultMsgCb.reportStartupFp; - (*fp)(name, desc); -} \ No newline at end of file +void tmsgReportStartup(const char* name, const char* desc) { (*defaultMsgCb.reportStartupFp)(name, desc); } \ No newline at end of file diff --git a/source/common/src/trow.c b/source/common/src/trow.c index 4d0846f6c2957a6d2a1b74dabf60ee76af57287c..c8a28d7f28f747b65fae3802bc392ac6163e5e1e 100644 --- a/source/common/src/trow.c +++ b/source/common/src/trow.c @@ -605,6 +605,10 @@ static int32_t tdAppendKvRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols * @param pCols */ int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) { +#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS + printf("%s:%d ts: %" PRIi64 " sver:%d maxCols:%" PRIi16 " nCols:%" PRIi16 ", nRows:%d\n", __func__, __LINE__, + TD_ROW_KEY(pRow), TD_ROW_SVER(pRow), pCols->maxCols, pCols->numOfCols, pCols->numOfRows); +#endif if (TD_IS_TP_ROW(pRow)) { return tdAppendTpRowToDataCol(pRow, pSchema, pCols, isMerge); } else if (TD_IS_KV_ROW(pRow)) { @@ -1191,9 +1195,9 @@ bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, } static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2) { - if (*(int16_t *)key1 > ((SColIdx *)key2)->colId) { + if (*(col_id_t *)key1 > ((SKvRowIdx *)key2)->colId) { return 1; - } else if (*(int16_t *)key1 < ((SColIdx *)key2)->colId) { + } else if (*(col_id_t *)key1 < ((SKvRowIdx *)key2)->colId) { return -1; } else { return 0; diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 38ad948981f7bd7a260b9578bce6d9252c4290d9..10ba58af298c59306badc2e299e588e3ec46874f 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -521,21 +521,21 @@ int32_t convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_VARBINARY) { newColData = taosMemoryCalloc(1, charLen + 1); memcpy(newColData, varDataVal(inputData), charLen); - bool ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight); + int32_t ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight); if (ret != TSDB_CODE_SUCCESS) { taosMemoryFree(newColData); - return ret; + return TSDB_CODE_INVALID_TIMESTAMP; } taosMemoryFree(newColData); } else if (type == TSDB_DATA_TYPE_NCHAR) { - newColData = taosMemoryCalloc(1, charLen / TSDB_NCHAR_SIZE + 1); + newColData = taosMemoryCalloc(1, charLen + TSDB_NCHAR_SIZE); int len = taosUcs4ToMbs((TdUcs4 *)varDataVal(inputData), charLen, newColData); if (len < 0){ taosMemoryFree(newColData); return TSDB_CODE_FAILED; } newColData[len] = 0; - bool ret = taosParseTime(newColData, timeVal, len + 1, (int32_t)timePrec, tsDaylight); + int32_t ret = taosParseTime(newColData, timeVal, len + 1, (int32_t)timePrec, tsDaylight); if (ret != TSDB_CODE_SUCCESS) { taosMemoryFree(newColData); return ret; @@ -783,7 +783,7 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio // 2020-07-03 17:48:42 // and the parameter can also be a variable. const char* fmtts(int64_t ts) { - static char buf[96]; + static char buf[96] = {0}; size_t pos = 0; struct tm tm; diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index 1a04e83f8127c0c71570c2cef4f300f05da11e67..2b0f6a01a0b87cdee8d071d1e53bad398ea90f97 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -216,7 +216,7 @@ int main(int argc, char const *argv[]) { return -1; } - dInfo("start to run dnode"); + dInfo("start to open dnode"); dmSetSignalHandle(); int32_t code = dmRun(); dInfo("shutting down the service"); diff --git a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h index ae8879326d6da92b6bd5ab3ea89584b347817fd4..ee811c0071cbd07c03edb7aaf117c3c4461adebb 100644 --- a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h +++ b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h @@ -35,6 +35,7 @@ typedef struct SDnodeMgmt { SendMonitorReportFp sendMonitorReportFp; GetVnodeLoadsFp getVnodeLoadsFp; GetMnodeLoadsFp getMnodeLoadsFp; + GetQnodeLoadsFp getQnodeLoadsFp; } SDnodeMgmt; // dmHandle.c @@ -58,4 +59,4 @@ void dmStopWorker(SDnodeMgmt *pMgmt); } #endif -#endif /*_TD_DND_QNODE_INT_H_*/ \ No newline at end of file +#endif /*_TD_DND_QNODE_INT_H_*/ diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 2533f268e5cd5355c1dba75fb384e977c386d1fa..fbd46db183d3024e40bb472decf80bf4c3936443 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -79,6 +79,8 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { (*pMgmt->getMnodeLoadsFp)(&minfo); req.mload = minfo.load; + (*pMgmt->getQnodeLoadsFp)(&req.qload); + int32_t contLen = tSerializeSStatusReq(NULL, 0, &req); void *pHead = rpcMallocCont(contLen); tSerializeSStatusReq(pHead, contLen, &req); diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c index 59c926545e6f565a124a4846532e4f74efeecd5e..d2db1a4a62fd157b2df235133c85bb6e38ac680d 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c @@ -48,6 +48,7 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { pMgmt->sendMonitorReportFp = pInput->sendMonitorReportFp; pMgmt->getVnodeLoadsFp = pInput->getVnodeLoadsFp; pMgmt->getMnodeLoadsFp = pInput->getMnodeLoadsFp; + pMgmt->getQnodeLoadsFp = pInput->getQnodeLoadsFp; if (dmStartWorker(pMgmt) != 0) { return -1; diff --git a/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h b/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h index bd034fe7d6c21dcf31e0ca4e9e83d7a23fa28fb8..9a0cfdfc9302160a21eccbe98445aad259250335 100644 --- a/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h +++ b/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h @@ -65,10 +65,7 @@ int32_t mmPutNodeMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t mmPutNodeMsgToReadQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t mmPutNodeMsgToQueryQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t mmPutNodeMsgToMonitorQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t mmPutRpcMsgToQueryQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t mmPutRpcMsgToReadQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t mmPutRpcMsgToWriteQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t mmPutRpcMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); +int32_t mmPutRpcMsgToQueue(SMnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc); #ifdef __cplusplus } diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c index f6350ba27954349a89849f66a9d15be7ffb6266d..8d0d503d8f6a09a7a73233bad9816ce7023a4d53 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c @@ -134,7 +134,7 @@ SArray *mmGetMsgHandles() { // Requests handled by DNODE if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_MNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_MNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_MND_ALTER_MNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_MNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_QNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_QNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; @@ -190,13 +190,15 @@ SArray *mmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_TRANS, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_GRANT, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_AUTH, mmPutNodeMsgToReadQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_MNODE, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_MND_ALTER_MNODE, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_TOPIC, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_ALTER_TOPIC, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_TOPIC, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_SUBSCRIBE, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_COMMIT_OFFSET, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_ASK_EP, mmPutNodeMsgToReadQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_DROP_CGROUP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_DROP_CGROUP_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_STREAM, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; @@ -217,9 +219,9 @@ SArray *mmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, mmPutNodeMsgToQueryQueue, 1) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, mmPutNodeMsgToQueryQueue, 1) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_VNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_VNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT_VNODE_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_REPLICA_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT_RSP, mmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_TIMEOUT, mmPutNodeMsgToSyncQueue, 1) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_PING, mmPutNodeMsgToSyncQueue, 1) == NULL) goto _OVER; diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c index 1b973f3045d5dd4e2f6e5fcc4e25413068af6af5..0f3c06cb3a909d2c936895026425883f225376f1 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c @@ -105,10 +105,7 @@ static int32_t mmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { pMgmt->path = pInput->path; pMgmt->name = pInput->name; pMgmt->msgCb = pInput->msgCb; - pMgmt->msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)mmPutRpcMsgToQueryQueue; - pMgmt->msgCb.queueFps[READ_QUEUE] = (PutToQueueFp)mmPutRpcMsgToReadQueue; - pMgmt->msgCb.queueFps[WRITE_QUEUE] = (PutToQueueFp)mmPutRpcMsgToWriteQueue; - pMgmt->msgCb.queueFps[SYNC_QUEUE] = (PutToQueueFp)mmPutRpcMsgToSyncQueue; + pMgmt->msgCb.putToQueueFp = (PutToQueueFp)mmPutRpcMsgToQueue; pMgmt->msgCb.mgmt = pMgmt; taosThreadRwlockInit(&pMgmt->lock, NULL); diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c index 85120102bc629c30f7520268a8054657fe1201ec..460f2242f2752f10959c637c2b814a3a20f2e9cd 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c @@ -40,7 +40,7 @@ static void mmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { break; default: pMsg->info.node = pMgmt->pMnode; - code = mndProcessMsg(pMsg); + code = mndProcessRpcMsg(pMsg); } if (IsReq(pMsg) && pMsg->info.handle != NULL && code != TSDB_CODE_ACTION_IN_PROGRESS) { @@ -96,40 +96,38 @@ int32_t mmPutNodeMsgToMonitorQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { return mmPutNodeMsgToWorker(&pMgmt->monitorWorker, pMsg); } -static inline int32_t mmPutRpcMsgToWorker(SSingleWorker *pWorker, SRpcMsg *pRpc) { +int32_t mmPutRpcMsgToQueue(SMnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) { SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM); if (pMsg == NULL) return -1; - - dTrace("msg:%p, create and put into worker:%s, type:%s", pMsg, pWorker->name, TMSG_INFO(pRpc->msgType)); memcpy(pMsg, pRpc, sizeof(SRpcMsg)); - taosWriteQitem(pWorker->queue, pMsg); - return 0; -} -int32_t mmPutRpcMsgToQueryQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { - return mmPutRpcMsgToWorker(&pMgmt->queryWorker, pMsg); -} - -int32_t mmPutRpcMsgToWriteQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { - return mmPutRpcMsgToWorker(&pMgmt->writeWorker, pMsg); -} - -int32_t mmPutRpcMsgToReadQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { - return mmPutRpcMsgToWorker(&pMgmt->readWorker, pMsg); -} - -int32_t mmPutRpcMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { - int32_t code = -1; - if (mmAcquire(pMgmt) == 0) { - code = mmPutRpcMsgToWorker(&pMgmt->syncWorker, pMsg); - mmRelease(pMgmt); - } - - if (code != 0) { - rpcFreeCont(pMsg->pCont); - pMsg->pCont = NULL; + switch (qtype) { + case WRITE_QUEUE: + dTrace("msg:%p, is created and will put into vnode-write queue", pMsg); + taosWriteQitem(pMgmt->writeWorker.queue, pMsg); + return 0; + case QUERY_QUEUE: + dTrace("msg:%p, is created and will put into vnode-query queue", pMsg); + taosWriteQitem(pMgmt->queryWorker.queue, pMsg); + return 0; + + case READ_QUEUE: + dTrace("msg:%p, is created and will put into vnode-read queue", pMsg); + taosWriteQitem(pMgmt->readWorker.queue, pMsg); + return 0; + case SYNC_QUEUE: + if (mmAcquire(pMgmt) == 0) { + dTrace("msg:%p, is created and will put into vnode-sync queue", pMsg); + taosWriteQitem(pMgmt->syncWorker.queue, pMsg); + mmRelease(pMgmt); + return 0; + } else { + return -1; + } + default: + terrno = TSDB_CODE_INVALID_PARA; + return -1; } - return code; } int32_t mmStartWorker(SMnodeMgmt *pMgmt) { diff --git a/source/dnode/mgmt/mgmt_qnode/inc/qmInt.h b/source/dnode/mgmt/mgmt_qnode/inc/qmInt.h index 9738fb0c454a460a80fa0516b6e2e0ff1e8b05ff..54e9da24a4e1f581cbb7aa009e29efb9ceac221e 100644 --- a/source/dnode/mgmt/mgmt_qnode/inc/qmInt.h +++ b/source/dnode/mgmt/mgmt_qnode/inc/qmInt.h @@ -42,8 +42,7 @@ int32_t qmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg); int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pMsg); // qmWorker.c -int32_t qmPutRpcMsgToQueryQueue(SQnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t qmPutRpcMsgToFetchQueue(SQnodeMgmt *pMgmt, SRpcMsg *pMsg); +int32_t qmPutRpcMsgToQueue(SQnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pMsg); int32_t qmGetQueueSize(SQnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype); int32_t qmStartWorker(SQnodeMgmt *pMgmt); diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c index c4b1ab63e46d62720131953bbddc928fc351d31c..864f5b485afdea2c798cbc35a12466ecfa1b69b8 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c @@ -16,7 +16,19 @@ #define _DEFAULT_SOURCE #include "qmInt.h" -void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) {} +void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) { + SQnodeLoad qload = {0}; + qndGetLoad(pMgmt->pQnode, &qload); + + qload.dnodeId = pMgmt->pData->dnodeId; + +} + +void qmGetQnodeLoads(SQnodeMgmt *pMgmt, SQnodeLoad *pInfo) { + qndGetLoad(pMgmt->pQnode, pInfo); + + pInfo->dnodeId = pMgmt->pData->dnodeId; +} int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pMsg) { SMonQmInfo qmInfo = {0}; @@ -101,8 +113,6 @@ SArray *qmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSP, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_RES_READY, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASKS_STATUS, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER; diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c index 06c18ab2889c05c910ad933338dc608a1d15d676..3b425a0b4923b3384bd620fc86e421cdc211ba4a 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c @@ -43,8 +43,7 @@ static int32_t qmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { pMgmt->path = pInput->path; pMgmt->name = pInput->name; pMgmt->msgCb = pInput->msgCb; - pMgmt->msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qmPutRpcMsgToQueryQueue; - pMgmt->msgCb.queueFps[FETCH_QUEUE] = (PutToQueueFp)qmPutRpcMsgToFetchQueue; + pMgmt->msgCb.putToQueueFp = (PutToQueueFp)qmPutRpcMsgToQueue; pMgmt->msgCb.qsizeFp = (GetQueueSizeFp)qmGetQueueSize; pMgmt->msgCb.mgmt = pMgmt; diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c index 35c94b7fbe786434cfb59191c8899949099d0325..e36efa83db24b0d6b706518ab3c789ece80c21d5 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c @@ -36,7 +36,7 @@ static void qmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { code = qmProcessGetMonitorInfoReq(pMgmt, pMsg); break; default: - code = qndProcessQueryMsg(pMgmt->pQnode, pMsg); + code = qndProcessQueryMsg(pMgmt->pQnode, pInfo->timestamp, pMsg); break; } @@ -68,22 +68,24 @@ int32_t qmPutNodeMsgToMonitorQueue(SQnodeMgmt *pMgmt, SRpcMsg *pMsg) { return qmPutNodeMsgToWorker(&pMgmt->monitorWorker, pMsg); } -static int32_t qmPutRpcMsgToWorker(SQnodeMgmt *pMgmt, SSingleWorker *pWorker, SRpcMsg *pRpc) { +int32_t qmPutRpcMsgToQueue(SQnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) { SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM); if (pMsg == NULL) return -1; - - dTrace("msg:%p, create and put into worker:%s, type:%s", pMsg, pWorker->name, TMSG_INFO(pRpc->msgType)); memcpy(pMsg, pRpc, sizeof(SRpcMsg)); - taosWriteQitem(pWorker->queue, pMsg); - return 0; -} - -int32_t qmPutRpcMsgToQueryQueue(SQnodeMgmt *pMgmt, SRpcMsg *pRpc) { - return qmPutRpcMsgToWorker(pMgmt, &pMgmt->queryWorker, pRpc); -} -int32_t qmPutRpcMsgToFetchQueue(SQnodeMgmt *pMgmt, SRpcMsg *pRpc) { - return qmPutRpcMsgToWorker(pMgmt, &pMgmt->fetchWorker, pRpc); + switch (qtype) { + case QUERY_QUEUE: + dTrace("msg:%p, is created and will put into qnode-query queue", pMsg); + taosWriteQitem(pMgmt->queryWorker.queue, pMsg); + return 0; + case READ_QUEUE: + dTrace("msg:%p, is created and will put into qnode-fetch queue", pMsg); + taosWriteQitem(pMgmt->fetchWorker.queue, pMsg); + return 0; + default: + terrno = TSDB_CODE_INVALID_PARA; + return -1; + } } int32_t qmGetQueueSize(SQnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) { diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h index 5ec33fe810a777e654a9b64160169003f983ab77..adc18fdced4777fcfdc731993ce758e301c34ac9 100644 --- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h +++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h @@ -44,26 +44,26 @@ typedef struct SVnodeMgmt { } SVnodeMgmt; typedef struct { - int32_t vgId; - int32_t vgVersion; - int8_t dropped; - char path[PATH_MAX + 20]; + int32_t vgId; + int32_t vgVersion; + int8_t dropped; + char path[PATH_MAX + 20]; } SWrapperCfg; typedef struct { - int32_t vgId; - int32_t refCount; - int32_t vgVersion; - int8_t dropped; - int8_t accessState; - char *path; - SVnode *pImpl; - STaosQueue *pWriteQ; - STaosQueue *pSyncQ; - STaosQueue *pApplyQ; - STaosQueue *pQueryQ; - STaosQueue *pFetchQ; - STaosQueue *pMergeQ; + int32_t vgId; + int32_t refCount; + int32_t vgVersion; + int8_t dropped; + int8_t accessState; + char *path; + SVnode *pImpl; + STaosQueue *pWriteQ; + STaosQueue *pSyncQ; + STaosQueue *pApplyQ; + STaosQueue *pQueryQ; + STaosQueue *pFetchQ; + STaosQueue *pMergeQ; } SVnodeObj; typedef struct { @@ -100,21 +100,16 @@ void vmStopWorker(SVnodeMgmt *pMgmt); int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode); void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode); -int32_t vmPutRpcMsgToWriteQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t vmPutRpcMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t vmPutRpcMsgToApplyQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t vmPutRpcMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t vmPutRpcMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t vmPutRpcMsgToMergeQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype); - -int32_t vmPutNodeMsgToWriteQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t vmPutNodeMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t vmPutNodeMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t vmPutNodeMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t vmPutNodeMsgToMergeQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t vmPutNodeMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); -int32_t vmPutNodeMsgToMonitorQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); +int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc); + +int32_t vmPutMsgToWriteQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); +int32_t vmPutMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); +int32_t vmPutMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); +int32_t vmPutMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); +int32_t vmPutMsgToMergeQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); +int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); +int32_t vmPutMsgToMonitorQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); #ifdef __cplusplus } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index cf5a7ad88544bad3e9fbe21e5605b621148183fe..613f3fb994da4fca90842b971ca65b3e6db7fbc7 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -16,6 +16,8 @@ #define _DEFAULT_SOURCE #include "vmInt.h" +#define MAX_CONTENT_LEN 1024 * 1024 + SVnodeObj **vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes) { taosThreadRwlockRdlock(&pMgmt->lock); @@ -29,7 +31,7 @@ SVnodeObj **vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes) { SVnodeObj *pVnode = *ppVnode; if (pVnode && num < size) { int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); - dTrace("vgId:%d, acquire vnode, refCount:%d", pVnode->vgId, refCount); + // dTrace("vgId:%d, acquire vnode, refCount:%d", pVnode->vgId, refCount); pVnodes[num] = (*ppVnode); num++; pIter = taosHashIterate(pMgmt->hash, pIter); @@ -47,7 +49,7 @@ SVnodeObj **vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes) { int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes) { int32_t code = TSDB_CODE_INVALID_JSON_FORMAT; int32_t len = 0; - int32_t maxLen = 1024 * 1024; + int32_t maxLen = MAX_CONTENT_LEN; char *content = taosMemoryCalloc(1, maxLen + 1); cJSON *root = NULL; FILE *fp = NULL; @@ -128,7 +130,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes = vnodesNum; code = 0; - dDebug("succcessed to read file %s", file); + dDebug("succcessed to read file %s, numOfVnodes:%d", file, vnodesNum); _OVER: if (content != NULL) taosMemoryFree(content); @@ -156,7 +158,7 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { SVnodeObj **pVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes); int32_t len = 0; - int32_t maxLen = 1024 * 1024; + int32_t maxLen = MAX_CONTENT_LEN; char *content = taosMemoryCalloc(1, maxLen + 1); if (content == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -195,6 +197,6 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { taosMemoryFree(pVnodes); } - dDebug("successed to write %s", realfile); + dDebug("successed to write %s, numOfVnodes:%d", realfile, numOfVnodes); return taosRenameFile(file, realfile); } \ No newline at end of file diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index ac56a9ba3d7aca44839890d04e62944cf1a8c23e..f08ac6b9d00f06d76a388fa78027b165c3060cda 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -140,6 +140,7 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) { pCfg->szCache = pCreate->pages; pCfg->szBuf = (uint64_t)pCreate->buffer * 1024 * 1024; pCfg->isWeak = true; + pCfg->isTsma = pCreate->isTsma; pCfg->tsdbCfg.compression = pCreate->compression; pCfg->tsdbCfg.precision = pCreate->precision; pCfg->tsdbCfg.days = pCreate->daysPerFile; @@ -149,20 +150,26 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) { pCfg->tsdbCfg.minRows = pCreate->minRows; pCfg->tsdbCfg.maxRows = pCreate->maxRows; for (size_t i = 0; i < taosArrayGetSize(pCreate->pRetensions); ++i) { - memcpy(&pCfg->tsdbCfg.retentions[i], taosArrayGet(pCreate->pRetensions, i), sizeof(SRetention)); + SRetention *pRetention = &pCfg->tsdbCfg.retentions[i]; + memcpy(pRetention, taosArrayGet(pCreate->pRetensions, i), sizeof(SRetention)); + if (i == 0) { + if ((pRetention->freq > 0 && pRetention->keep > 0)) pCfg->isRsma = 1; + } } + pCfg->walCfg.vgId = pCreate->vgId; pCfg->hashBegin = pCreate->hashBegin; pCfg->hashEnd = pCreate->hashEnd; pCfg->hashMethod = pCreate->hashMethod; + pCfg->standby = pCfg->standby; pCfg->syncCfg.myIndex = pCreate->selfIndex; pCfg->syncCfg.replicaNum = pCreate->replica; memset(&pCfg->syncCfg.nodeInfo, 0, sizeof(pCfg->syncCfg.nodeInfo)); for (int i = 0; i < pCreate->replica; ++i) { - pCfg->syncCfg.nodeInfo[i].nodePort = pCreate->replicas[i].port; - snprintf(pCfg->syncCfg.nodeInfo[i].nodeFqdn, sizeof(pCfg->syncCfg.nodeInfo[i].nodeFqdn), "%s", - pCreate->replicas[i].fqdn); + SNodeInfo *pNode = &pCfg->syncCfg.nodeInfo[i]; + pNode->nodePort = pCreate->replicas[i].port; + tstrncpy(pNode->nodeFqdn, pCreate->replicas[i].fqdn, sizeof(pNode->nodeFqdn)); } } @@ -173,8 +180,28 @@ static void vmGenerateWrapperCfg(SVnodeMgmt *pMgmt, SCreateVnodeReq *pCreate, SW snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCreate->vgId); } +static int32_t vmTsmaAdjustDays(SVnodeCfg *pCfg, SCreateVnodeReq *pReq) { + if (pReq->isTsma) { + SMsgHead *smaMsg = pReq->pTsma; + uint32_t contLen = (uint32_t)(htonl(smaMsg->contLen) - sizeof(SMsgHead)); + return smaGetTSmaDays(pCfg, POINTER_SHIFT(smaMsg, sizeof(SMsgHead)), contLen, &pCfg->tsdbCfg.days); + } + return 0; +} + +static int32_t vmTsmaProcessCreate(SVnode *pVnode, SCreateVnodeReq *pReq) { + if (pReq->isTsma) { + SMsgHead *smaMsg = pReq->pTsma; + uint32_t contLen = (uint32_t)(htonl(smaMsg->contLen) - sizeof(SMsgHead)); + return vnodeProcessCreateTSma(pVnode, POINTER_SHIFT(smaMsg, sizeof(SMsgHead)), contLen); + } + return 0; +} + int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { SCreateVnodeReq createReq = {0}; + SVnodeCfg vnodeCfg = {0}; + SWrapperCfg wrapperCfg = {0}; int32_t code = -1; char path[TSDB_FILENAME_LEN] = {0}; @@ -183,12 +210,16 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return -1; } - dDebug("vgId:%d, create vnode req is received, tsma:%d", createReq.vgId, createReq.isTsma); - - SVnodeCfg vnodeCfg = {0}; + dDebug("vgId:%d, start to create vnode, tsma:%d standby:%d", createReq.vgId, createReq.isTsma, + createReq.standby); vmGenerateVnodeCfg(&createReq, &vnodeCfg); - SWrapperCfg wrapperCfg = {0}; + if (vmTsmaAdjustDays(&vnodeCfg, &createReq) < 0) { + dError("vgId:%d, failed to adjust tsma days since %s", createReq.vgId, terrstr()); + code = terrno; + goto _OVER; + } + vmGenerateWrapperCfg(pMgmt, &createReq, &wrapperCfg); SVnodeObj *pVnode = vmAcquireVnode(pMgmt, createReq.vgId); @@ -197,19 +228,21 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { tFreeSCreateVnodeReq(&createReq); vmReleaseVnode(pMgmt, pVnode); terrno = TSDB_CODE_NODE_ALREADY_DEPLOYED; - return -1; + code = terrno; + goto _OVER; } snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vnodeCfg.vgId); if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) { tFreeSCreateVnodeReq(&createReq); dError("vgId:%d, failed to create vnode since %s", createReq.vgId, terrstr()); - return -1; + code = terrno; + goto _OVER; } SVnode *pImpl = vnodeOpen(path, pMgmt->pTfs, pMgmt->msgCb); if (pImpl == NULL) { - dError("vgId:%d, failed to create vnode since %s", createReq.vgId, terrstr()); + dError("vgId:%d, failed to open vnode since %s", createReq.vgId, terrstr()); code = terrno; goto _OVER; } @@ -217,6 +250,14 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { code = vmOpenVnode(pMgmt, &wrapperCfg, pImpl); if (code != 0) { dError("vgId:%d, failed to open vnode since %s", createReq.vgId, terrstr()); + code = terrno; + goto _OVER; + } + + code = vmTsmaProcessCreate(pImpl, &createReq); + if (code != 0) { + dError("vgId:%d, failed to create tsma since %s", createReq.vgId, terrstr()); + code = terrno; goto _OVER; } @@ -227,12 +268,17 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { } code = vmWriteVnodeListToFile(pMgmt); - if (code != 0) goto _OVER; + if (code != 0) { + code = terrno; + goto _OVER; + } _OVER: if (code != 0) { vnodeClose(pImpl); vnodeDestroy(path, pMgmt->pTfs); + } else { + dInfo("vgId:%d, vnode is created", createReq.vgId); } tFreeSCreateVnodeReq(&createReq); @@ -248,7 +294,7 @@ int32_t vmProcessDropVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { } int32_t vgId = dropReq.vgId; - dDebug("vgId:%d, drop vnode req is received", vgId); + dDebug("vgId:%d, start to drop vnode", vgId); SVnodeObj *pVnode = vmAcquireVnode(pMgmt, vgId); if (pVnode == NULL) { @@ -275,60 +321,59 @@ SArray *vmGetMsgHandles() { SArray *pArray = taosArrayInit(32, sizeof(SMgmtHandle)); if (pArray == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_MON_VM_INFO, vmPutNodeMsgToMonitorQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_MON_VM_LOAD, vmPutNodeMsgToMonitorQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_MON_VM_INFO, vmPutMsgToMonitorQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_MON_VM_LOAD, vmPutMsgToMonitorQueue, 0) == NULL) goto _OVER; // Requests handled by VNODE - if (dmSetMgmtHandle(pArray, TDMT_VND_SUBMIT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY, vmPutNodeMsgToQueryQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_CONTINUE, vmPutNodeMsgToQueryQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_TABLE, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_UPDATE_TAG_VAL, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TABLE_META, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TABLES_META, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_CONSUME, vmPutNodeMsgToQueryQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_QUERY, vmPutNodeMsgToQueryQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_CONNECT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_DISCONNECT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - // if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_SET_CUR, vmPutNodeMsgToWriteQueue, 0)== NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_RES_READY, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASKS_STATUS, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_TASK, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_STB, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_TABLE, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TABLE, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_SMA, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_SMA, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_SMA, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_SUBMIT_RSMA, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_CONSUME, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_DEPLOY, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RUN, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_DISPATCH, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RECOVER, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER; - - if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_VNODE, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT_VNODE, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE, vmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_VNODE, vmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER; - - if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_TIMEOUT, vmPutNodeMsgToSyncQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_PING, vmPutNodeMsgToSyncQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_PING_REPLY, vmPutNodeMsgToSyncQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_CLIENT_REQUEST, vmPutNodeMsgToSyncQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_CLIENT_REQUEST_REPLY, vmPutNodeMsgToSyncQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_REQUEST_VOTE, vmPutNodeMsgToSyncQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_REQUEST_VOTE_REPLY, vmPutNodeMsgToSyncQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_APPEND_ENTRIES, vmPutNodeMsgToSyncQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_APPEND_ENTRIES_REPLY, vmPutNodeMsgToSyncQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SUBMIT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_CONTINUE, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_UPDATE_TAG_VAL, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_TABLE_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_TABLES_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_CONSUME, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_QUERY, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_CONNECT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_DISCONNECT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + // if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_SET_CUR, vmPutMsgToWriteQueue, 0)== NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_SMA, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_SMA, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_SMA, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SUBMIT_RSMA, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_CONSUME, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_DEPLOY, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RUN, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_DISPATCH, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_TASK_RECOVER, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + + if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_REPLICA, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_VNODE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER; + + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_TIMEOUT, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_PING, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_PING_REPLY, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_CLIENT_REQUEST, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_CLIENT_REQUEST_REPLY, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_REQUEST_VOTE, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_REQUEST_VOTE_REPLY, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_APPEND_ENTRIES, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_SYNC_APPEND_ENTRIES_REPLY, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER; code = 0; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 0c8d492ef449624e7462b736fcdd9c2ffb9c2ac2..23927255bb8c920bfaf9de12e91bbffe4bd1de39 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -18,21 +18,17 @@ SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId) { SVnodeObj *pVnode = NULL; - int32_t refCount = 0; taosThreadRwlockRdlock(&pMgmt->lock); taosHashGetDup(pMgmt->hash, &vgId, sizeof(int32_t), (void *)&pVnode); if (pVnode == NULL) { terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; } else { - refCount = atomic_add_fetch_32(&pVnode->refCount, 1); + int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); + // dTrace("vgId:%d, acquire vnode, ref:%d", pVnode->vgId, refCount); } taosThreadRwlockUnlock(&pMgmt->lock); - if (pVnode != NULL) { - dTrace("vgId:%d, acquire vnode, refCount:%d", pVnode->vgId, refCount); - } - return pVnode; } @@ -41,8 +37,8 @@ void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { taosThreadRwlockRdlock(&pMgmt->lock); int32_t refCount = atomic_sub_fetch_32(&pVnode->refCount, 1); + // dTrace("vgId:%d, release vnode, ref:%d", pVnode->vgId, refCount); taosThreadRwlockUnlock(&pMgmt->lock); - dTrace("vgId:%d, release vnode, refCount:%d", pVnode->vgId, refCount); } int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { @@ -138,7 +134,7 @@ static void *vmOpenVnodeInThread(void *param) { } } - dDebug("thread:%d, total vnodes:%d, opened:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened, + dDebug("thread:%d, numOfVnodes:%d, opened:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened, pThread->failed); return NULL; } @@ -160,7 +156,7 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) { pMgmt->state.totalVnodes = numOfVnodes; - int32_t threadNum = 1; // tsNumOfCores; + int32_t threadNum = 1; int32_t vnodesPerThread = numOfVnodes / threadNum + 1; SVnodeThread *threads = taosMemoryCalloc(threadNum, sizeof(SVnodeThread)); @@ -253,12 +249,7 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { pMgmt->path = pInput->path; pMgmt->name = pInput->name; pMgmt->msgCb = pInput->msgCb; - pMgmt->msgCb.queueFps[WRITE_QUEUE] = (PutToQueueFp)vmPutRpcMsgToWriteQueue; - pMgmt->msgCb.queueFps[SYNC_QUEUE] = (PutToQueueFp)vmPutRpcMsgToSyncQueue; - pMgmt->msgCb.queueFps[APPLY_QUEUE] = (PutToQueueFp)vmPutRpcMsgToApplyQueue; - pMgmt->msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)vmPutRpcMsgToQueryQueue; - pMgmt->msgCb.queueFps[FETCH_QUEUE] = (PutToQueueFp)vmPutRpcMsgToFetchQueue; - pMgmt->msgCb.queueFps[MERGE_QUEUE] = (PutToQueueFp)vmPutRpcMsgToMergeQueue; + pMgmt->msgCb.putToQueueFp = (PutToQueueFp)vmPutRpcMsgToQueue; pMgmt->msgCb.qsizeFp = (GetQueueSizeFp)vmGetQueueSize; pMgmt->msgCb.mgmt = pMgmt; taosThreadRwlockInit(&pMgmt->lock, NULL); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 6183794bdd9c87da091a64c5333ad42f70dd824e..bedf4733797fd72e73b0b25aec8a5b2add627d74 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -29,11 +29,11 @@ static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) { tmsgSendRsp(&rsp); } -static void vmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { +static void vmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { SVnodeMgmt *pMgmt = pInfo->ahandle; int32_t code = -1; - dTrace("msg:%p, get from vnode queue, type:%s", pMsg, TMSG_INFO(pMsg->msgType)); + dTrace("msg:%p, get from vnode-mgmt queue", pMsg); switch (pMsg->msgType) { case TDMT_MON_VM_INFO: code = vmProcessGetMonitorInfoReq(pMgmt, pMsg); @@ -49,11 +49,14 @@ static void vmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { break; default: terrno = TSDB_CODE_MSG_NOT_PROCESSED; - dError("msg:%p, not processed in vnode queue", pMsg); + dError("msg:%p, not processed in vnode-mgmt queue", pMsg); } if (IsReq(pMsg)) { - if (code != 0 && terrno != 0) code = terrno; + if (code != 0 && terrno != 0) { + dError("msg:%p failed to process since %s", pMsg, terrstr()); + code = terrno; + } vmSendRsp(pMsg, code); } @@ -65,13 +68,15 @@ static void vmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { SVnodeObj *pVnode = pInfo->ahandle; - dTrace("msg:%p, get from vnode-query queue", pMsg); + dTrace("vgId:%d, msg:%p get from vnode-query queue", pVnode->vgId, pMsg); int32_t code = vnodeProcessQueryMsg(pVnode->pImpl, pMsg); if (code != 0) { if (terrno != 0) code = terrno; + dError("vgId:%d, msg:%p failed to query since %s", pVnode->vgId, pMsg, terrstr()); vmSendRsp(pMsg, code); } - dTrace("msg:%p, is freed, code:0x%x", pMsg, code); + + dTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); } @@ -79,63 +84,66 @@ static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { static void vmProcessFetchQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { SVnodeObj *pVnode = pInfo->ahandle; - dTrace("msg:%p, get from vnode-fetch queue", pMsg); + dTrace("vgId:%d, msg:%p get from vnode-fetch queue", pVnode->vgId, pMsg); int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo); if (code != 0) { if (terrno != 0) code = terrno; + dError("vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr()); vmSendRsp(pMsg, code); } - dTrace("msg:%p, is freed, code:0x%x", pMsg, code); + + dTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); } static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) { + int32_t code = 0; + SRpcMsg *pMsg = NULL; SVnodeObj *pVnode = pInfo->ahandle; - SArray *pArray = taosArrayInit(numOfMsgs, sizeof(SRpcMsg *)); - if (pArray == NULL) { - dError("failed to process %d msgs in write-queue since %s", numOfMsgs, terrstr()); - return; - } + int64_t sync = vnodeGetSyncHandle(pVnode->pImpl); + SArray *pArray = taosArrayInit(numOfMsgs, sizeof(SRpcMsg **)); - for (int32_t i = 0; i < numOfMsgs; ++i) { - SRpcMsg *pMsg = NULL; + for (int32_t m = 0; m < numOfMsgs; m++) { if (taosGetQitem(qall, (void **)&pMsg) == 0) continue; + dTrace("vgId:%d, msg:%p get from vnode-write queue", pVnode->vgId, pMsg); - dTrace("msg:%p, get from vnode-write queue", pMsg); if (taosArrayPush(pArray, &pMsg) == NULL) { - dTrace("msg:%p, failed to process since %s", pMsg, terrstr()); + dError("vgId:%d, failed to push msg:%p to vnode-write array", pVnode->vgId, pMsg); vmSendRsp(pMsg, TSDB_CODE_OUT_OF_MEMORY); } } - for (int i = 0; i < taosArrayGetSize(pArray); i++) { - SRpcMsg *pMsg = *(SRpcMsg **)taosArrayGet(pArray, i); - SRpcMsg rsp = {.info = pMsg->info}; + for (int32_t m = 0; m < taosArrayGetSize(pArray); m++) { + pMsg = *(SRpcMsg **)taosArrayGet(pArray, m); + code = vnodePreprocessReq(pVnode->pImpl, pMsg); - vnodePreprocessReq(pVnode->pImpl, pMsg); + if (code == TSDB_CODE_ACTION_IN_PROGRESS) continue; + if (code != 0) { + dError("vgId:%d, msg:%p failed to write since %s", pVnode->vgId, pMsg, tstrerror(code)); + vmSendRsp(pMsg, code); + continue; + } - int32_t ret = syncPropose(vnodeGetSyncHandle(pVnode->pImpl), pMsg, false); - if (ret == TAOS_SYNC_PROPOSE_NOT_LEADER) { - dTrace("msg:%p, is redirect since not leader, vgId:%d ", pMsg, pVnode->vgId); - rsp.code = TSDB_CODE_RPC_REDIRECT; - SEpSet newEpSet; - syncGetEpSet(vnodeGetSyncHandle(pVnode->pImpl), &newEpSet); + code = syncPropose(sync, pMsg, false); + if (code == TAOS_SYNC_PROPOSE_SUCCESS) { + continue; + } else if (code == TAOS_SYNC_PROPOSE_NOT_LEADER) { + dTrace("vgId:%d, msg:%p is redirect since not leader", pVnode->vgId, pMsg); + SEpSet newEpSet = {0}; + syncGetEpSet(sync, &newEpSet); newEpSet.inUse = (newEpSet.inUse + 1) % newEpSet.numOfEps; + SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info}; tmsgSendRedirectRsp(&rsp, &newEpSet); - } else if (ret == TAOS_SYNC_PROPOSE_OTHER_ERROR) { - rsp.code = TSDB_CODE_SYN_INTERNAL_ERROR; - tmsgSendRsp(&rsp); - } else if (ret == TAOS_SYNC_PROPOSE_SUCCESS) { - // send response in applyQ } else { - assert(0); + dError("vgId:%d, msg:%p failed to write since %s", pVnode->vgId, pMsg, tstrerror(code)); + vmSendRsp(pMsg, code); } } for (int32_t i = 0; i < numOfMsgs; i++) { - SRpcMsg *pMsg = *(SRpcMsg **)taosArrayGet(pArray, i); - dTrace("msg:%p, is freed", pMsg); + pMsg = *(SRpcMsg **)taosArrayGet(pArray, i); + dTrace("vgId:%d, msg:%p is freed", pVnode->vgId, pMsg); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); } @@ -145,10 +153,11 @@ static void vmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) { SVnodeObj *pVnode = pInfo->ahandle; + SRpcMsg *pMsg = NULL; for (int32_t i = 0; i < numOfMsgs; ++i) { - SRpcMsg *pMsg = NULL; - taosGetQitem(qall, (void **)&pMsg); + if (taosGetQitem(qall, (void **)&pMsg) == 0) continue; + dTrace("vgId:%d, msg:%p get from vnode-apply queue", pVnode->vgId, pMsg); // init response rpc msg SRpcMsg rsp = {0}; @@ -163,7 +172,7 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO // apply data into tsdb if (vnodeProcessWriteReq(pVnode->pImpl, &originalRpcMsg, pSyncApplyMsg->fsmMeta.index, &rsp) < 0) { rsp.code = terrno; - dTrace("msg:%p, process write error since %s", pMsg, terrstr()); + dError("vgId:%d, msg:%p failed to apply since %s", pVnode->vgId, pMsg, terrstr()); } syncApplyMsgDestroy(pSyncApplyMsg); @@ -175,6 +184,7 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO tmsgSendRsp(&rsp); } + dTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, rsp.code); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); } @@ -182,23 +192,22 @@ static void vmProcessApplyQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) { SVnodeObj *pVnode = pInfo->ahandle; + SRpcMsg *pMsg = NULL; for (int32_t i = 0; i < numOfMsgs; ++i) { - SRpcMsg *pMsg = NULL; - taosGetQitem(qall, (void **)&pMsg); + if (taosGetQitem(qall, (void **)&pMsg) == 0) continue; + dTrace("vgId:%d, msg:%p get from vnode-sync queue", pVnode->vgId, pMsg); int32_t code = vnodeProcessSyncReq(pVnode->pImpl, pMsg, NULL); if (code != 0) { + dError("vgId:%d, msg:%p failed to sync since %s", pVnode->vgId, pMsg, terrstr()); if (pMsg->info.handle != NULL) { - SRpcMsg rsp = { - .code = (terrno < 0) ? terrno : code, - .info = pMsg->info, - }; - dTrace("msg:%p, failed to process sync queue since %s", pMsg, terrstr()); - tmsgSendRsp(&rsp); + if (terrno != 0) code = terrno; + vmSendRsp(pMsg, code); } } + dTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); } @@ -206,24 +215,26 @@ static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOf static void vmProcessMergeQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) { SVnodeObj *pVnode = pInfo->ahandle; + SRpcMsg *pMsg = NULL; for (int32_t i = 0; i < numOfMsgs; ++i) { - SRpcMsg *pMsg = NULL; - taosGetQitem(qall, (void **)&pMsg); + if (taosGetQitem(qall, (void **)&pMsg) == 0) continue; + dTrace("vgId:%d, msg:%p get from vnode-merge queue", pVnode->vgId, pMsg); - dTrace("msg:%p, get from vnode-merge queue", pMsg); int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo); if (code != 0) { + dError("vgId:%d, msg:%p failed to merge since %s", pVnode->vgId, pMsg, terrstr()); if (terrno != 0) code = terrno; vmSendRsp(pMsg, code); } + dTrace("msg:%p, is freed, code:0x%x", pMsg, code); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); } } -static int32_t vmPutNodeMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) { +static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) { SMsgHead *pHead = pMsg->pCont; int32_t code = 0; @@ -232,31 +243,36 @@ static int32_t vmPutNodeMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId); if (pVnode == NULL) { - dError("vgId:%d, failed to put msg:%p into vnode-queue since %s", pHead->vgId, pMsg, terrstr()); + dError("vgId:%d, failed to put msg:%p into vnode queue since %s, type:%s", pHead->vgId, pMsg, terrstr(), + TMSG_INFO(pMsg->msgType)); return terrno != 0 ? terrno : -1; } switch (qtype) { case QUERY_QUEUE: - dTrace("msg:%p, put into vnode-query worker, type:%s", pMsg, TMSG_INFO(pMsg->msgType)); + dTrace("vgId:%d, msg:%p put into vnode-query queue", pVnode->vgId, pMsg); taosWriteQitem(pVnode->pQueryQ, pMsg); break; case FETCH_QUEUE: - dTrace("msg:%p, put into vnode-fetch worker, type:%s", pMsg, TMSG_INFO(pMsg->msgType)); + dTrace("vgId:%d, msg:%p put into vnode-fetch queue", pVnode->vgId, pMsg); taosWriteQitem(pVnode->pFetchQ, pMsg); break; case WRITE_QUEUE: - dTrace("msg:%p, put into vnode-write worker, type:%s", pMsg, TMSG_INFO(pMsg->msgType)); + dTrace("vgId:%d, msg:%p put into vnode-write queue", pVnode->vgId, pMsg); taosWriteQitem(pVnode->pWriteQ, pMsg); break; case SYNC_QUEUE: - dTrace("msg:%p, put into vnode-sync worker, type:%s", pMsg, TMSG_INFO(pMsg->msgType)); + dTrace("vgId:%d, msg:%p put into vnode-sync queue", pVnode->vgId, pMsg); taosWriteQitem(pVnode->pSyncQ, pMsg); break; case MERGE_QUEUE: - dTrace("msg:%p, put into vnode-merge worker, type:%s", pMsg, TMSG_INFO(pMsg->msgType)); + dTrace("vgId:%d, msg:%p put into vnode-merge queue", pVnode->vgId, pMsg); taosWriteQitem(pVnode->pMergeQ, pMsg); break; + case APPLY_QUEUE: + dTrace("vgId:%d, msg:%p put into vnode-apply queue", pVnode->vgId, pMsg); + taosWriteQitem(pVnode->pApplyQ, pMsg); + break; default: code = -1; terrno = TSDB_CODE_INVALID_PARA; @@ -267,110 +283,39 @@ static int32_t vmPutNodeMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType return code; } -int32_t vmPutNodeMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { - return vmPutNodeMsgToQueue(pMgmt, pMsg, SYNC_QUEUE); -} +int32_t vmPutMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_QUEUE); } -int32_t vmPutNodeMsgToWriteQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { - return vmPutNodeMsgToQueue(pMgmt, pMsg, WRITE_QUEUE); -} +int32_t vmPutMsgToWriteQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, WRITE_QUEUE); } -int32_t vmPutNodeMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { - return vmPutNodeMsgToQueue(pMgmt, pMsg, QUERY_QUEUE); -} +int32_t vmPutMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, QUERY_QUEUE); } -int32_t vmPutNodeMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { - return vmPutNodeMsgToQueue(pMgmt, pMsg, FETCH_QUEUE); -} +int32_t vmPutMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, FETCH_QUEUE); } -int32_t vmPutNodeMsgToMergeQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { - return vmPutNodeMsgToQueue(pMgmt, pMsg, MERGE_QUEUE); -} +int32_t vmPutMsgToMergeQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, MERGE_QUEUE); } -int32_t vmPutNodeMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { - SSingleWorker *pWorker = &pMgmt->mgmtWorker; - dTrace("msg:%p, put into vnode-mgmt worker, type:%s", pMsg, TMSG_INFO(pMsg->msgType)); - taosWriteQitem(pWorker->queue, pMsg); +int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { + dTrace("msg:%p, put into vnode-mgmt queue", pMsg); + taosWriteQitem(pMgmt->mgmtWorker.queue, pMsg); return 0; } -int32_t vmPutNodeMsgToMonitorQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { - SSingleWorker *pWorker = &pMgmt->monitorWorker; - dTrace("msg:%p, put into vnode-monitor worker, type:%s", pMsg, TMSG_INFO(pMsg->msgType)); - taosWriteQitem(pWorker->queue, pMsg); +int32_t vmPutMsgToMonitorQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { + dTrace("msg:%p, put into vnode-monitor queue", pMsg); + taosWriteQitem(pMgmt->monitorWorker.queue, pMsg); return 0; } -static int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pRpc, EQueueType qtype) { - SMsgHead *pHead = pRpc->pCont; - SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId); - if (pVnode == NULL) return -1; - +int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) { SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM); - int32_t code = 0; - - if (pMsg == NULL) { - rpcFreeCont(pRpc->pCont); - pRpc->pCont = NULL; - code = -1; - } else { - memcpy(pMsg, pRpc, sizeof(SRpcMsg)); - switch (qtype) { - case WRITE_QUEUE: - dTrace("msg:%p, create and put into vnode-write worker, type:%s", pMsg, TMSG_INFO(pRpc->msgType)); - taosWriteQitem(pVnode->pWriteQ, pMsg); - break; - case QUERY_QUEUE: - dTrace("msg:%p, create and put into vnode-query queue, type:%s", pMsg, TMSG_INFO(pRpc->msgType)); - taosWriteQitem(pVnode->pQueryQ, pMsg); - break; - case FETCH_QUEUE: - dTrace("msg:%p, create and put into vnode-fetch queue, type:%s", pMsg, TMSG_INFO(pRpc->msgType)); - taosWriteQitem(pVnode->pFetchQ, pMsg); - break; - case APPLY_QUEUE: - dTrace("msg:%p, create and put into vnode-apply queue, type:%s", pMsg, TMSG_INFO(pRpc->msgType)); - taosWriteQitem(pVnode->pApplyQ, pMsg); - break; - case MERGE_QUEUE: - dTrace("msg:%p, create and put into vnode-merge queue, type:%s", pMsg, TMSG_INFO(pRpc->msgType)); - taosWriteQitem(pVnode->pMergeQ, pMsg); - break; - case SYNC_QUEUE: - dTrace("msg:%p, create and put into vnode-sync queue, type:%s", pMsg, TMSG_INFO(pRpc->msgType)); - taosWriteQitem(pVnode->pSyncQ, pMsg); - break; - default: - code = -1; - terrno = TSDB_CODE_INVALID_PARA; - break; - } - } - - vmReleaseVnode(pMgmt, pVnode); - return code; -} - -int32_t vmPutRpcMsgToWriteQueue(SVnodeMgmt *pMgmt, SRpcMsg *pRpc) { - return vmPutRpcMsgToQueue(pMgmt, pRpc, WRITE_QUEUE); -} - -int32_t vmPutRpcMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pRpc) { return vmPutRpcMsgToQueue(pMgmt, pRpc, SYNC_QUEUE); } - -int32_t vmPutRpcMsgToApplyQueue(SVnodeMgmt *pMgmt, SRpcMsg *pRpc) { - return vmPutRpcMsgToQueue(pMgmt, pRpc, APPLY_QUEUE); -} + if (pMsg == NULL) return -1; -int32_t vmPutRpcMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pRpc) { - return vmPutRpcMsgToQueue(pMgmt, pRpc, QUERY_QUEUE); -} - -int32_t vmPutRpcMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pRpc) { - return vmPutRpcMsgToQueue(pMgmt, pRpc, FETCH_QUEUE); -} + SMsgHead *pHead = pRpc->pCont; + dTrace("vgId:%d, msg:%p is created, type:%s", pHead->vgId, pMsg, TMSG_INFO(pMsg->msgType)); -int32_t vmPutRpcMsgToMergeQueue(SVnodeMgmt *pMgmt, SRpcMsg *pRpc) { - return vmPutRpcMsgToQueue(pMgmt, pRpc, MERGE_QUEUE); + pHead->contLen = htonl(pHead->contLen); + pHead->vgId = htonl(pHead->vgId); + memcpy(pMsg, pRpc, sizeof(SRpcMsg)); + return vmPutMsgToQueue(pMgmt, pMsg, qtype); } int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) { @@ -466,29 +411,23 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) { pMPool->max = tsNumOfVnodeMergeThreads; if (tWWorkerInit(pMPool) != 0) return -1; - SSingleWorkerCfg cfg = { + SSingleWorkerCfg mgmtCfg = { .min = 1, .max = 1, .name = "vnode-mgmt", - .fp = (FItem)vmProcessQueue, + .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt, }; - if (tSingleWorkerInit(&pMgmt->mgmtWorker, &cfg) != 0) { - dError("failed to start vnode-mgmt worker since %s", terrstr()); - return -1; - } + if (tSingleWorkerInit(&pMgmt->mgmtWorker, &mgmtCfg) != 0) return -1; - SSingleWorkerCfg mCfg = { + SSingleWorkerCfg monitorCfg = { .min = 1, .max = 1, .name = "vnode-monitor", - .fp = (FItem)vmProcessQueue, + .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt, }; - if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) { - dError("failed to start vnode-monitor worker since %s", terrstr()); - return -1; - } + if (tSingleWorkerInit(&pMgmt->monitorWorker, &monitorCfg) != 0) return -1; dDebug("vnode workers are initialized"); return 0; diff --git a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h index 27f1140f2379f2db9a5856ff72ad0fbc0f42d9f2..adde0557965fb7651c66a8b4791d4a671db91201 100644 --- a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h +++ b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h @@ -168,6 +168,7 @@ int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg); void dmSendMonitorReport(); void dmGetVnodeLoads(SMonVloadInfo *pInfo); void dmGetMnodeLoads(SMonMloadInfo *pInfo); +void dmGetQnodeLoads(SQnodeLoad *pInfo); #ifdef __cplusplus } diff --git a/source/dnode/mgmt/node_mgmt/inc/dmNodes.h b/source/dnode/mgmt/node_mgmt/inc/dmNodes.h index 3ac71de530d4dd9dad6ccd6b29b7789f56a85b1e..8c2d57808fc5d8e29c4bef5079f504c8a9e39802 100644 --- a/source/dnode/mgmt/node_mgmt/inc/dmNodes.h +++ b/source/dnode/mgmt/node_mgmt/inc/dmNodes.h @@ -37,6 +37,7 @@ void bmGetMonitorInfo(void *pMgmt, SMonBmInfo *pInfo); void vmGetVnodeLoads(void *pMgmt, SMonVloadInfo *pInfo); void mmGetMnodeLoads(void *pMgmt, SMonMloadInfo *pInfo); +void qmGetQnodeLoads(void *pMgmt, SQnodeLoad *pInfo); #ifdef __cplusplus } diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index 07d0c43360a5de639f5af2b64208d13c79192687..528beb280bfd05aa4030a3351aaf278f31b96e17 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -50,26 +50,26 @@ static int32_t dmInitMonitor() { } int32_t dmInit(int8_t rtype) { - dInfo("start to init env"); + dInfo("start to init dnode env"); if (dmCheckRepeatInit(dmInstance()) != 0) return -1; if (dmInitSystem() != 0) return -1; if (dmInitMonitor() != 0) return -1; if (dmInitDnode(dmInstance(), rtype) != 0) return -1; - dInfo("env is initialized"); + dInfo("dnode env is initialized"); return 0; } static int32_t dmCheckRepeatCleanup(SDnode *pDnode) { if (atomic_val_compare_exchange_8(&pDnode->once, DND_ENV_READY, DND_ENV_CLEANUP) != DND_ENV_READY) { - dError("env is already cleaned up"); + dError("dnode env is already cleaned up"); return -1; } return 0; } void dmCleanup() { - dDebug("start to cleanup env"); + dDebug("start to cleanup dnode env"); SDnode *pDnode = dmInstance(); if (dmCheckRepeatCleanup(pDnode) != 0) return; dmCleanupDnode(pDnode); @@ -79,7 +79,7 @@ void dmCleanup() { udfcClose(); udfStopUdfd(); taosStopCacheRefreshWorker(); - dInfo("env is cleaned up"); + dInfo("dnode env is cleaned up"); taosCloseLog(); taosCleanupCfg(); @@ -178,6 +178,7 @@ SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper) { .sendMonitorReportFp = dmSendMonitorReport, .getVnodeLoadsFp = dmGetVnodeLoads, .getMnodeLoadsFp = dmGetMnodeLoads, + .getQnodeLoadsFp = dmGetQnodeLoads, }; opt.msgCb = dmGetMsgcb(pWrapper->pDnode); diff --git a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c index 787f5e50190fb099d5998d4f604decd8b17feb68..ee27f27f06fe0ce502fdfd729a2b573ddb221c37 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c @@ -229,7 +229,7 @@ SMgmtWrapper *dmAcquireWrapper(SDnode *pDnode, EDndNodeType ntype) { taosThreadRwlockRdlock(&pWrapper->lock); if (pWrapper->deployed) { int32_t refCount = atomic_add_fetch_32(&pWrapper->refCount, 1); - dTrace("node:%s, is acquired, ref:%d", pWrapper->name, refCount); + // dTrace("node:%s, is acquired, ref:%d", pWrapper->name, refCount); } else { terrno = TSDB_CODE_NODE_NOT_DEPLOYED; pRetWrapper = NULL; @@ -245,7 +245,7 @@ int32_t dmMarkWrapper(SMgmtWrapper *pWrapper) { taosThreadRwlockRdlock(&pWrapper->lock); if (pWrapper->deployed || (InParentProc(pWrapper) && pWrapper->required)) { int32_t refCount = atomic_add_fetch_32(&pWrapper->refCount, 1); - dTrace("node:%s, is marked, ref:%d", pWrapper->name, refCount); + // dTrace("node:%s, is marked, ref:%d", pWrapper->name, refCount); } else { terrno = TSDB_CODE_NODE_NOT_DEPLOYED; code = -1; @@ -261,7 +261,7 @@ void dmReleaseWrapper(SMgmtWrapper *pWrapper) { taosThreadRwlockRdlock(&pWrapper->lock); int32_t refCount = atomic_sub_fetch_32(&pWrapper->refCount, 1); taosThreadRwlockUnlock(&pWrapper->lock); - dTrace("node:%s, is released, ref:%d", pWrapper->name, refCount); + // dTrace("node:%s, is released, ref:%d", pWrapper->name, refCount); } static void dmGetServerStartupStatus(SDnode *pDnode, SServerStatusRsp *pStatus) { diff --git a/source/dnode/mgmt/node_mgmt/src/dmMonitor.c b/source/dnode/mgmt/node_mgmt/src/dmMonitor.c index 0b74d865fd5680311c483003a58da1785813a275..ecad390ef94a635fdeed8256004fce9978fde822 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMonitor.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMonitor.c @@ -170,3 +170,17 @@ void dmGetMnodeLoads(SMonMloadInfo *pInfo) { dmReleaseWrapper(pWrapper); } } + +void dmGetQnodeLoads(SQnodeLoad *pInfo) { + SDnode *pDnode = dmInstance(); + SMgmtWrapper *pWrapper = &pDnode->wrappers[QNODE]; + if (dmMarkWrapper(pWrapper) == 0) { + if (tsMultiProcess) { + dmSendLocalRecv(pDnode, TDMT_MON_QM_LOAD, tDeserializeSQnodeLoad, pInfo); + } else if (pWrapper->pMgmt != NULL) { + qmGetQnodeLoads(pWrapper->pMgmt, pInfo); + } + dmReleaseWrapper(pWrapper); + } +} + diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index 6fbfae8b416efc68a0be9b101f1308aeba723752..8b939d15ce35e4a8ccb9f032025250476a3b8a82 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -62,8 +62,10 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { dmProcessNetTestReq(pDnode, pRpc); return; } else if (pRpc->msgType == TDMT_MND_SYSTABLE_RETRIEVE_RSP || pRpc->msgType == TDMT_VND_FETCH_RSP) { - qWorkerProcessFetchRsp(NULL, NULL, pRpc); + qWorkerProcessFetchRsp(NULL, NULL, pRpc, 0); return; + } else if (pRpc->msgType == TDMT_MND_STATUS_RSP && pEpSet != NULL) { + dmSetMnodeEpSet(&pDnode->data, pEpSet); } else { } @@ -115,6 +117,7 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { if (pMsg == NULL) { goto _OVER; } + dTrace("msg:%p, is created, type:%s", pMsg, TMSG_INFO(pRpc->msgType)); if (dmBuildNodeMsg(pMsg, pRpc) != 0) { goto _OVER; @@ -128,7 +131,7 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { _OVER: if (code != 0) { - dError("msg:%p, failed to process since %s", pMsg, terrstr()); + dTrace("msg:%p, failed to process since %s, type:%s", pMsg, terrstr(), TMSG_INFO(pRpc->msgType)); if (terrno != 0) code = terrno; if (IsReq(pRpc)) { @@ -204,29 +207,28 @@ static inline void dmSendRsp(SRpcMsg *pMsg) { } static void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) { - SMEpSet msg = {0}; - dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &msg.epSet); + SEpSet epSet = {0}; + dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet); - int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg); + int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); pMsg->pCont = rpcMallocCont(contLen); if (pMsg->pCont == NULL) { pMsg->code = TSDB_CODE_OUT_OF_MEMORY; } else { - tSerializeSMEpSet(pMsg->pCont, contLen, &msg); + tSerializeSEpSet(pMsg->pCont, contLen, &epSet); pMsg->contLen = contLen; } } static inline void dmSendRedirectRsp(SRpcMsg *pMsg, const SEpSet *pNewEpSet) { SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info}; - SMEpSet msg = {.epSet = *pNewEpSet}; - int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg); + int32_t contLen = tSerializeSEpSet(NULL, 0, pNewEpSet); rsp.pCont = rpcMallocCont(contLen); if (rsp.pCont == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; } else { - tSerializeSMEpSet(rsp.pCont, contLen, &msg); + tSerializeSEpSet(rsp.pCont, contLen, pNewEpSet); rsp.contLen = contLen; } dmSendRsp(&rsp); diff --git a/source/dnode/mgmt/node_util/inc/dmUtil.h b/source/dnode/mgmt/node_util/inc/dmUtil.h index 0d921c2e8b8d810891d1718648f1aead826f9116..c142a6cfd892413f1a69e2e7ce1d41524b1dbb27 100644 --- a/source/dnode/mgmt/node_util/inc/dmUtil.h +++ b/source/dnode/mgmt/node_util/inc/dmUtil.h @@ -34,6 +34,7 @@ #include "dnode.h" #include "mnode.h" +#include "qnode.h" #include "monitor.h" #include "sync.h" #include "wal.h" @@ -92,6 +93,7 @@ typedef int32_t (*ProcessDropNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg); typedef void (*SendMonitorReportFp)(); typedef void (*GetVnodeLoadsFp)(SMonVloadInfo *pInfo); typedef void (*GetMnodeLoadsFp)(SMonMloadInfo *pInfo); +typedef void (*GetQnodeLoadsFp)(SQnodeLoad *pInfo); typedef struct { int32_t dnodeId; @@ -118,6 +120,7 @@ typedef struct { SendMonitorReportFp sendMonitorReportFp; GetVnodeLoadsFp getVnodeLoadsFp; GetMnodeLoadsFp getMnodeLoadsFp; + GetQnodeLoadsFp getQnodeLoadsFp; } SMgmtInputOpt; typedef struct { @@ -180,4 +183,4 @@ void dmSetMnodeEpSet(SDnodeData *pData, SEpSet *pEpSet); } #endif -#endif /*_TD_DM_INT_H_*/ \ No newline at end of file +#endif /*_TD_DM_INT_H_*/ diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index e0af20e41bfef194d90d30316c16042522e7f87d..937c1ab7faa29191ea65f8770d0ffb2c531b3c35 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -148,7 +148,6 @@ int32_t dmReadEps(SDnodeData *pData) { code = 0; dDebug("succcessed to read file %s", file); - dmPrintEps(pData); _OVER: if (content != NULL) taosMemoryFree(content); @@ -162,6 +161,7 @@ _OVER: taosArrayPush(pData->dnodeEps, &dnodeEp); } + dDebug("reset dnode list on startup"); dmResetEps(pData, pData->dnodeEps); if (dmIsEpChanged(pData, pData->dnodeId, tsLocalEp)) { @@ -236,11 +236,13 @@ void dmUpdateEps(SDnodeData *pData, SArray *eps) { int32_t numOfEpsOld = (int32_t)taosArrayGetSize(pData->dnodeEps); if (numOfEps != numOfEpsOld) { + dDebug("new dnode list get from mnode"); dmResetEps(pData, eps); dmWriteEps(pData); } else { int32_t size = numOfEps * sizeof(SDnodeEp); if (memcmp(pData->dnodeEps->pData, eps->pData, size) != 0) { + dDebug("new dnode list get from mnode"); dmResetEps(pData, eps); dmWriteEps(pData); } @@ -282,7 +284,7 @@ static void dmResetEps(SDnodeData *pData, SArray *dnodeEps) { static void dmPrintEps(SDnodeData *pData) { int32_t numOfEps = (int32_t)taosArrayGetSize(pData->dnodeEps); - dDebug("print dnode ep list, num:%d", numOfEps); + dDebug("print dnode list, num:%d", numOfEps); for (int32_t i = 0; i < numOfEps; i++) { SDnodeEp *pEp = taosArrayGet(pData->dnodeEps, i); dDebug("dnode:%d, fqdn:%s port:%u is_mnode:%d", pEp->id, pEp->ep.fqdn, pEp->ep.port, pEp->isMnode); @@ -326,6 +328,7 @@ void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet } void dmSetMnodeEpSet(SDnodeData *pData, SEpSet *pEpSet) { + if (memcmp(pEpSet, &pData->mnodeEps, sizeof(SEpSet)) == 0) return; taosThreadRwlockWrlock(&pData->lock); pData->mnodeEps = *pEpSet; taosThreadRwlockUnlock(&pData->lock); diff --git a/source/dnode/mgmt/node_util/src/dmFile.c b/source/dnode/mgmt/node_util/src/dmFile.c index 7ac6fc129d2bb591706d6ed722878359c4993515..78e706f90814950287aed067103690f9c215e8e3 100644 --- a/source/dnode/mgmt/node_util/src/dmFile.c +++ b/source/dnode/mgmt/node_util/src/dmFile.c @@ -135,7 +135,7 @@ TdFilePtr dmCheckRunning(const char *dataDir) { return NULL; } - dDebug("file:%s is locked", filepath); + dDebug("lock file:%s to prevent repeated starts", filepath); return pFile; } diff --git a/source/dnode/mgmt/test/mnode/dmnode.cpp b/source/dnode/mgmt/test/mnode/dmnode.cpp index 8c945b50ac48b4b1e290875c58a98f168971bc37..857f58befce1898be621a37ea7cb33feb692d58a 100644 --- a/source/dnode/mgmt/test/mnode/dmnode.cpp +++ b/source/dnode/mgmt/test/mnode/dmnode.cpp @@ -94,7 +94,7 @@ TEST_F(DndTestMnode, 02_Alter_Mnode) { void* pReq = rpcMallocCont(contLen); tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq); - SRpcMsg* pRsp = test.SendReq(TDMT_DND_ALTER_MNODE, pReq, contLen); + SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_MNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION); } @@ -111,7 +111,7 @@ TEST_F(DndTestMnode, 02_Alter_Mnode) { void* pReq = rpcMallocCont(contLen); tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq); - SRpcMsg* pRsp = test.SendReq(TDMT_DND_ALTER_MNODE, pReq, contLen); + SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_MNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION); } @@ -128,7 +128,7 @@ TEST_F(DndTestMnode, 02_Alter_Mnode) { void* pReq = rpcMallocCont(contLen); tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq); - SRpcMsg* pRsp = test.SendReq(TDMT_DND_ALTER_MNODE, pReq, contLen); + SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_MNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, 0); } @@ -186,7 +186,7 @@ TEST_F(DndTestMnode, 03_Drop_Mnode) { void* pReq = rpcMallocCont(contLen); tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq); - SRpcMsg* pRsp = test.SendReq(TDMT_DND_ALTER_MNODE, pReq, contLen); + SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_MNODE, pReq, contLen); ASSERT_NE(pRsp, nullptr); ASSERT_EQ(pRsp->code, TSDB_CODE_NODE_NOT_DEPLOYED); } diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 6318f2e3f2b790b5aff25e774c762a8bb5f4c4da..83a36f4b0d5509884b2e99e7bd0eb4663a564959 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -54,75 +54,32 @@ typedef enum { } EAuthOp; typedef enum { - TRN_STAGE_PREPARE = 0, - TRN_STAGE_REDO_LOG = 1, - TRN_STAGE_REDO_ACTION = 2, - TRN_STAGE_ROLLBACK = 3, - TRN_STAGE_UNDO_ACTION = 4, - TRN_STAGE_UNDO_LOG = 5, - TRN_STAGE_COMMIT = 6, - TRN_STAGE_COMMIT_LOG = 7, - TRN_STAGE_FINISHED = 8 -} ETrnStage; + TRN_CONFLICT_NOTHING = 0, + TRN_CONFLICT_GLOBAL = 1, + TRN_CONFLICT_DB = 2, + TRN_CONFLICT_DB_INSIDE = 3, +} ETrnConflct; typedef enum { - TRN_TYPE_BASIC_SCOPE = 1000, - TRN_TYPE_CREATE_ACCT = 1001, - TRN_TYPE_CREATE_CLUSTER = 1002, - TRN_TYPE_CREATE_USER = 1003, - TRN_TYPE_ALTER_USER = 1004, - TRN_TYPE_DROP_USER = 1005, - TRN_TYPE_CREATE_FUNC = 1006, - TRN_TYPE_DROP_FUNC = 1007, - - TRN_TYPE_CREATE_SNODE = 1010, - TRN_TYPE_DROP_SNODE = 1011, - TRN_TYPE_CREATE_QNODE = 1012, - TRN_TYPE_DROP_QNODE = 10013, - TRN_TYPE_CREATE_BNODE = 1014, - TRN_TYPE_DROP_BNODE = 1015, - TRN_TYPE_CREATE_MNODE = 1016, - TRN_TYPE_DROP_MNODE = 1017, - - TRN_TYPE_CREATE_TOPIC = 1020, - TRN_TYPE_DROP_TOPIC = 1021, - TRN_TYPE_SUBSCRIBE = 1022, - TRN_TYPE_REBALANCE = 1023, - TRN_TYPE_COMMIT_OFFSET = 1024, - TRN_TYPE_CREATE_STREAM = 1025, - TRN_TYPE_DROP_STREAM = 1026, - TRN_TYPE_ALTER_STREAM = 1027, - TRN_TYPE_CONSUMER_LOST = 1028, - TRN_TYPE_CONSUMER_RECOVER = 1029, - TRN_TYPE_BASIC_SCOPE_END, - - TRN_TYPE_GLOBAL_SCOPE = 2000, - TRN_TYPE_CREATE_DNODE = 2001, - TRN_TYPE_DROP_DNODE = 2002, - TRN_TYPE_GLOBAL_SCOPE_END, - - TRN_TYPE_DB_SCOPE = 3000, - TRN_TYPE_CREATE_DB = 3001, - TRN_TYPE_ALTER_DB = 3002, - TRN_TYPE_DROP_DB = 3003, - TRN_TYPE_SPLIT_VGROUP = 3004, - TRN_TYPE_MERGE_VGROUP = 3015, - TRN_TYPE_DB_SCOPE_END, - - TRN_TYPE_STB_SCOPE = 4000, - TRN_TYPE_CREATE_STB = 4001, - TRN_TYPE_ALTER_STB = 4002, - TRN_TYPE_DROP_STB = 4003, - TRN_TYPE_CREATE_SMA = 4004, - TRN_TYPE_DROP_SMA = 4005, - TRN_TYPE_STB_SCOPE_END, -} ETrnType; + TRN_STAGE_PREPARE = 0, + TRN_STAGE_REDO_ACTION = 1, + TRN_STAGE_ROLLBACK = 2, + TRN_STAGE_UNDO_ACTION = 3, + TRN_STAGE_COMMIT = 4, + TRN_STAGE_COMMIT_ACTION = 5, + TRN_STAGE_FINISHED = 6 +} ETrnStage; typedef enum { TRN_POLICY_ROLLBACK = 0, TRN_POLICY_RETRY = 1, } ETrnPolicy; +typedef enum { + TRN_EXEC_PRARLLEL = 0, + TRN_EXEC_SERIAL = 1, +} ETrnExec; + typedef enum { DND_REASON_ONLINE = 0, DND_REASON_STATUS_MSG_TIMEOUT, @@ -150,22 +107,24 @@ typedef struct { int32_t id; ETrnStage stage; ETrnPolicy policy; - ETrnType type; + ETrnConflct conflict; + ETrnExec exec; int32_t code; int32_t failedTimes; SRpcHandleInfo rpcInfo; void* rpcRsp; int32_t rpcRspLen; - SArray* redoLogs; - SArray* undoLogs; - SArray* commitLogs; + int32_t redoActionPos; SArray* redoActions; SArray* undoActions; + SArray* commitActions; int64_t createdTime; int64_t lastExecTime; - int64_t dbUid; + int32_t lastErrorAction; + int32_t lastErrorNo; + tmsg_t lastErrorMsgType; + SEpSet lastErrorEpset; char dbname[TSDB_DB_FNAME_LEN]; - char lastError[TSDB_TRANS_ERROR_LEN]; int32_t startFunc; int32_t stopFunc; int32_t paramLen; @@ -209,6 +168,7 @@ typedef struct { int64_t createdTime; int64_t updateTime; SDnodeObj* pDnode; + SQnodeLoad load; } SQnodeObj; typedef struct { @@ -295,6 +255,7 @@ typedef struct { int8_t hashMethod; // default is 1 int32_t numOfRetensions; SArray* pRetensions; + int8_t schemaless; } SDbCfg; typedef struct { @@ -333,6 +294,7 @@ typedef struct { int8_t isTsma; int8_t replica; SVnodeGid vnodeGid[TSDB_MAX_REPLICA]; + void* pTsma; } SVgObj; typedef struct { @@ -449,19 +411,15 @@ typedef struct { int64_t uid; int64_t dbUid; int32_t version; - int8_t subType; // db or table - int8_t withTbName; - int8_t withSchema; - int8_t withTag; + int8_t subType; // column, db or stable SRWLatch lock; - int32_t consumerCnt; int32_t sqlLen; int32_t astLen; char* sql; char* ast; char* physicalPlan; SSchemaWrapper schema; - int32_t refConsumerCnt; + int64_t stbUid; } SMqTopicObj; typedef struct { @@ -520,9 +478,7 @@ typedef struct { int64_t dbUid; int32_t vgNum; int8_t subType; - int8_t withTbName; - int8_t withSchema; - int8_t withTag; + int64_t stbUid; SHashObj* consumerHash; // consumerId -> SMqConsumerEp SArray* unassignedVgs; // SArray } SMqSubscribeObj; @@ -574,7 +530,7 @@ typedef struct { } SMqRebOutputObj; typedef struct { - char name[TSDB_TOPIC_FNAME_LEN]; + char name[TSDB_STREAM_FNAME_LEN]; char sourceDb[TSDB_DB_FNAME_LEN]; char targetDb[TSDB_DB_FNAME_LEN]; char targetSTbName[TSDB_TABLE_FNAME_LEN]; diff --git a/source/dnode/mnode/impl/inc/mndInt.h b/source/dnode/mnode/impl/inc/mndInt.h index 189ea82bfc8f53f2ebdf84c04214bf80e6f21882..6661347e4206b28d6977b622bc4cd8777b34abb7 100644 --- a/source/dnode/mnode/impl/inc/mndInt.h +++ b/source/dnode/mnode/impl/inc/mndInt.h @@ -75,12 +75,12 @@ typedef struct { } STelemMgmt; typedef struct { - SWal *pWal; - sem_t syncSem; - int64_t sync; - bool standby; - bool restored; - int32_t errCode; + SWal *pWal; + sem_t syncSem; + int64_t sync; + bool standby; + int32_t errCode; + int32_t transId; } SSyncMgmt; typedef struct { @@ -89,34 +89,45 @@ typedef struct { } SGrantInfo; typedef struct SMnode { - int32_t selfDnodeId; - int64_t clusterId; - TdThread thread; - bool deploy; - bool stopped; - int8_t replica; - int8_t selfIndex; - SReplica replicas[TSDB_MAX_REPLICA]; - char *path; - int64_t checkTime; - SSdb *pSdb; - SMgmtWrapper *pWrapper; - SArray *pSteps; - SQHandle *pQuery; - SShowMgmt showMgmt; - SProfileMgmt profileMgmt; - STelemMgmt telemMgmt; - SSyncMgmt syncMgmt; - SHashObj *infosMeta; - SHashObj *perfsMeta; - SGrantInfo grant; - MndMsgFp msgFp[TDMT_MAX]; - SMsgCb msgCb; + int32_t selfDnodeId; + int64_t clusterId; + TdThread thread; + TdThreadRwlock lock; + int32_t rpcRef; + int32_t syncRef; + bool stopped; + bool restored; + bool deploy; + int8_t replica; + int8_t selfIndex; + SReplica replicas[TSDB_MAX_REPLICA]; + char *path; + int64_t checkTime; + SSdb *pSdb; + SArray *pSteps; + SQHandle *pQuery; + SHashObj *infosMeta; + SHashObj *perfsMeta; + SShowMgmt showMgmt; + SProfileMgmt profileMgmt; + STelemMgmt telemMgmt; + SSyncMgmt syncMgmt; + SGrantInfo grant; + MndMsgFp msgFp[TDMT_MAX]; + SMsgCb msgCb; } SMnode; void mndSetMsgHandle(SMnode *pMnode, tmsg_t msgType, MndMsgFp fp); int64_t mndGenerateUid(char *name, int32_t len); +int32_t mndAcquireRpcRef(SMnode *pMnode); +void mndReleaseRpcRef(SMnode *pMnode); +void mndSetRestore(SMnode *pMnode, bool restored); +void mndSetStop(SMnode *pMnode); +bool mndGetStop(SMnode *pMnode); +int32_t mndAcquireSyncRef(SMnode *pMnode); +void mndReleaseSyncRef(SMnode *pMnode); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/inc/mndOffset.h b/source/dnode/mnode/impl/inc/mndOffset.h index 900181858bd724873ea948d450e830cc83643463..f7569b964875bbffe90c8fc5525fda8f68b688b8 100644 --- a/source/dnode/mnode/impl/inc/mndOffset.h +++ b/source/dnode/mnode/impl/inc/mndOffset.h @@ -39,6 +39,7 @@ static FORCE_INLINE int32_t mndMakePartitionKey(char *key, const char *cgroup, c int32_t mndDropOffsetByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb); int32_t mndDropOffsetByTopic(SMnode *pMnode, STrans *pTrans, const char *topic); +int32_t mndDropOffsetBySubKey(SMnode *pMnode, STrans *pTrans, const char *subKey); bool mndOffsetFromTopic(SMqOffsetObj *pOffset, const char *topic); diff --git a/source/dnode/mnode/impl/inc/mndQnode.h b/source/dnode/mnode/impl/inc/mndQnode.h index 5d177b3f6db6e2f8c81be3c4461bdea0870ba322..3e38565a4fe67b93d8ba8b9d30160ce54b13dee5 100644 --- a/source/dnode/mnode/impl/inc/mndQnode.h +++ b/source/dnode/mnode/impl/inc/mndQnode.h @@ -22,9 +22,15 @@ extern "C" { #endif +#define QNODE_LOAD_VALUE(pQnode) (pQnode ? (pQnode->load.numOfQueryInQueue + pQnode->load.numOfFetchInQueue) : 0) + int32_t mndInitQnode(SMnode *pMnode); void mndCleanupQnode(SMnode *pMnode); +SQnodeObj *mndAcquireQnode(SMnode *pMnode, int32_t qnodeId); +void mndReleaseQnode(SMnode *pMnode, SQnodeObj *pObj); +int32_t mndCreateQnodeList(SMnode *pMnode, SArray** pList, int32_t limit); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/inc/mndScheduler.h b/source/dnode/mnode/impl/inc/mndScheduler.h index 9f4e377dd17dffc94ab04366e2c1ba61e170b92f..05aea3f68c4023ceb68e16ad875f59c666f63171 100644 --- a/source/dnode/mnode/impl/inc/mndScheduler.h +++ b/source/dnode/mnode/impl/inc/mndScheduler.h @@ -30,7 +30,7 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream); int32_t mndConvertRSmaTask(const char* ast, int64_t uid, int8_t triggerType, int64_t watermark, char** pStr, - int32_t* pLen); + int32_t* pLen, double filesFactor); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/inc/mndSubscribe.h b/source/dnode/mnode/impl/inc/mndSubscribe.h index 50cede62ce424ae855f46ba0f359b5088058e4d1..d91c2bd4c3f69063420f3a775f6183e3eaa3824d 100644 --- a/source/dnode/mnode/impl/inc/mndSubscribe.h +++ b/source/dnode/mnode/impl/inc/mndSubscribe.h @@ -33,6 +33,7 @@ int32_t mndMakeSubscribeKey(char *key, const char *cgroup, const char *topicName int32_t mndDropSubByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb); int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topic); +int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/inc/mndSync.h b/source/dnode/mnode/impl/inc/mndSync.h index 356f215267fcfd76f5a851202c6290b9433796ee..cb9d70d5ee48f542dbe58100328b7f2284ea2926 100644 --- a/source/dnode/mnode/impl/inc/mndSync.h +++ b/source/dnode/mnode/impl/inc/mndSync.h @@ -25,7 +25,7 @@ extern "C" { int32_t mndInitSync(SMnode *pMnode); void mndCleanupSync(SMnode *pMnode); bool mndIsMaster(SMnode *pMnode); -int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw); +int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId); void mndSyncStart(SMnode *pMnode); void mndSyncStop(SMnode *pMnode); diff --git a/source/dnode/mnode/impl/inc/mndTopic.h b/source/dnode/mnode/impl/inc/mndTopic.h index c5c4800e0295fa48ee4bf9669200f7ce7a31eff8..4aa18ea591a7058d8ecbbdcb901b0ebdcd82181b 100644 --- a/source/dnode/mnode/impl/inc/mndTopic.h +++ b/source/dnode/mnode/impl/inc/mndTopic.h @@ -37,6 +37,8 @@ const char *mndTopicGetShowName(const char topic[TSDB_TOPIC_FNAME_LEN]); int32_t mndSetTopicCommitLogs(SMnode *pMnode, STrans *pTrans, SMqTopicObj *pTopic); +bool mndCheckColAndTagModifiable(SMnode *pMnode, int64_t suid, const SArray *colIds); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/inc/mndTrans.h b/source/dnode/mnode/impl/inc/mndTrans.h index 84e7a17192b7ba41028989d8bc58e88229731e10..6d1f3710830563e24fe124a3a95582b316ef4e00 100644 --- a/source/dnode/mnode/impl/inc/mndTrans.h +++ b/source/dnode/mnode/impl/inc/mndTrans.h @@ -22,24 +22,29 @@ extern "C" { #endif +typedef enum { + TRANS_START_FUNC_TEST = 1, + TRANS_STOP_FUNC_TEST = 2, + TRANS_START_FUNC_MQ_REB = 3, + TRANS_STOP_FUNC_MQ_REB = 4, +} ETrnFunc; + typedef struct { - SEpSet epSet; - tmsg_t msgType; - int8_t msgSent; - int8_t msgReceived; - int32_t errCode; - int32_t acceptableCode; - int32_t contLen; - void *pCont; + int32_t id; + int32_t errCode; + int32_t acceptableCode; + int8_t stage; + int8_t actionType; // 0-msg, 1-raw + int8_t rawWritten; + int8_t msgSent; + int8_t msgReceived; + tmsg_t msgType; + SEpSet epSet; + int32_t contLen; + void *pCont; + SSdbRaw *pRaw; } STransAction; -typedef enum { - TEST_TRANS_START_FUNC = 1, - TEST_TRANS_STOP_FUNC = 2, - MQ_REB_TRANS_START_FUNC = 3, - MQ_REB_TRANS_STOP_FUNC = 4, -} ETrnFuncType; - typedef void (*TransCbFp)(SMnode *pMnode, void *param, int32_t paramLen); int32_t mndInitTrans(SMnode *pMnode); @@ -47,7 +52,7 @@ void mndCleanupTrans(SMnode *pMnode); STrans *mndAcquireTrans(SMnode *pMnode, int32_t transId); void mndReleaseTrans(SMnode *pMnode, STrans *pTrans); -STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnType type, const SRpcMsg *pReq); +STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict, const SRpcMsg *pReq); void mndTransDrop(STrans *pTrans); int32_t mndTransAppendRedolog(STrans *pTrans, SSdbRaw *pRaw); int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw); @@ -55,8 +60,9 @@ int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw); int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction); int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction); void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen); -void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen); -void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb); +void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen); +void mndTransSetDbName(STrans *pTrans, const char *dbname); +void mndTransSetSerial(STrans *pTrans); int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans); void mndTransProcessRsp(SRpcMsg *pRsp); diff --git a/source/dnode/mnode/impl/inc/mndVgroup.h b/source/dnode/mnode/impl/inc/mndVgroup.h index c9099b6b050481b78030befbe93de59139df1b27..3f4f3f2053bd4fd633488eaf4a4fac71d642df51 100644 --- a/source/dnode/mnode/impl/inc/mndVgroup.h +++ b/source/dnode/mnode/impl/inc/mndVgroup.h @@ -36,7 +36,7 @@ SArray *mndBuildDnodesArray(SMnode *pMnode); int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray); int32_t mndRemoveVnodeFromVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray, SVnodeGid *del1, SVnodeGid *del2); -void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen); +void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen, bool standby); void *mndBuildDropVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen); void *mndBuildAlterVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen); diff --git a/source/dnode/mnode/impl/src/mndAcct.c b/source/dnode/mnode/impl/src/mndAcct.c index a4fde4b70670952dbf14554aa0fce15f77cb49f5..0ce4a8c76e72ce2f2513819139b00a01c67f5231 100644 --- a/source/dnode/mnode/impl/src/mndAcct.c +++ b/source/dnode/mnode/impl/src/mndAcct.c @@ -78,11 +78,9 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) { if (pRaw == NULL) return -1; sdbSetRawStatus(pRaw, SDB_STATUS_READY); - mDebug("acct:%s, will be created while deploy sdb, raw:%p", acctObj.acct, pRaw); -#if 0 - return sdbWrite(pMnode->pSdb, pRaw); -#else - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_ACCT, NULL); + mDebug("acct:%s, will be created when deploying, raw:%p", acctObj.acct, pRaw); + + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, NULL); if (pTrans == NULL) { mError("acct:%s, failed to create since %s", acctObj.acct, terrstr()); return -1; @@ -94,7 +92,6 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) { mndTransDrop(pTrans); return -1; } - sdbSetRawStatus(pRaw, SDB_STATUS_READY); if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -104,7 +101,6 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) { mndTransDrop(pTrans); return 0; -#endif } static SSdbRaw *mndAcctActionEncode(SAcctObj *pAcct) { diff --git a/source/dnode/mnode/impl/src/mndBnode.c b/source/dnode/mnode/impl/src/mndBnode.c index 3316a09462ff1d5ff7c940e623941c7abe72a76c..801f335a8056757c2cbe2d7f1ca6d65a4501003f 100644 --- a/source/dnode/mnode/impl/src/mndBnode.c +++ b/source/dnode/mnode/impl/src/mndBnode.c @@ -246,7 +246,7 @@ static int32_t mndCreateBnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, bnodeObj.createdTime = taosGetTimestampMs(); bnodeObj.updateTime = bnodeObj.createdTime; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_BNODE, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to create bnode:%d", pTrans->id, pCreate->dnodeId); @@ -363,7 +363,7 @@ static int32_t mndSetDropBnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SBn static int32_t mndDropBnode(SMnode *pMnode, SRpcMsg *pReq, SBnodeObj *pObj) { int32_t code = -1; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_DROP_BNODE, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop bnode:%d", pTrans->id, pObj->id); diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c index a421be5c062a709bdd1e74f583a95142da2aac82..bb3377d16ac815489ce0cfbec22307ebb02156d0 100644 --- a/source/dnode/mnode/impl/src/mndCluster.c +++ b/source/dnode/mnode/impl/src/mndCluster.c @@ -172,17 +172,15 @@ static int32_t mndCreateDefaultCluster(SMnode *pMnode) { clusterObj.id = mndGenerateUid(clusterObj.name, TSDB_CLUSTER_ID_LEN); clusterObj.id = (clusterObj.id >= 0 ? clusterObj.id : -clusterObj.id); pMnode->clusterId = clusterObj.id; - mDebug("cluster:%" PRId64 ", name is %s", clusterObj.id, clusterObj.name); + mInfo("cluster:%" PRId64 ", name is %s", clusterObj.id, clusterObj.name); SSdbRaw *pRaw = mndClusterActionEncode(&clusterObj); if (pRaw == NULL) return -1; sdbSetRawStatus(pRaw, SDB_STATUS_READY); - mDebug("cluster:%" PRId64 ", will be created while deploy sdb, raw:%p", clusterObj.id, pRaw); -#if 0 - return sdbWrite(pMnode->pSdb, pRaw); -#else - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_CLUSTER, NULL); + mDebug("cluster:%" PRId64 ", will be created when deploying, raw:%p", clusterObj.id, pRaw); + + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, NULL); if (pTrans == NULL) { mError("cluster:%" PRId64 ", failed to create since %s", clusterObj.id, terrstr()); return -1; @@ -204,7 +202,6 @@ static int32_t mndCreateDefaultCluster(SMnode *pMnode) { mndTransDrop(pTrans); return 0; -#endif } static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 7cebeb35f5bb9e3f2b363c438a1ce70ad3296717..1f8bf0699322ffdaad5c479b3c8fec3451645527 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -97,7 +97,7 @@ static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg) { mndReleaseConsumer(pMnode, pConsumer); - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CONSUMER_LOST, pMsg); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg); if (pTrans == NULL) goto FAIL; if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL; if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL; @@ -121,7 +121,7 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) { mndReleaseConsumer(pMnode, pConsumer); - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CONSUMER_RECOVER, pMsg); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg); if (pTrans == NULL) goto FAIL; if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL; if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL; @@ -306,6 +306,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { SMqTopicObj *pTopic = mndAcquireTopic(pMnode, topic); ASSERT(pTopic); taosRLockLatch(&pTopic->lock); + tstrncpy(topicEp.db, pTopic->db, TSDB_DB_FNAME_LEN); topicEp.schema.nCols = pTopic->schema.nCols; if (topicEp.schema.nCols) { topicEp.schema.pSchema = taosMemoryCalloc(topicEp.schema.nCols, sizeof(SSchema)); @@ -403,7 +404,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { int32_t newTopicNum = taosArrayGetSize(newSub); // check topic existance - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_SUBSCRIBE, pMsg); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg); if (pTrans == NULL) goto SUBSCRIBE_OVER; for (int32_t i = 0; i < newTopicNum; i++) { @@ -414,6 +415,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { goto SUBSCRIBE_OVER; } +#if 0 // ref topic to prevent drop // TODO make topic complete SMqTopicObj topicObj = {0}; @@ -422,6 +424,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { mInfo("subscribe topic %s by consumer %ld cgroup %s, refcnt %d", pTopic->name, consumerId, cgroup, topicObj.refConsumerCnt); if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) goto SUBSCRIBE_OVER; +#endif mndReleaseTopic(pMnode, pTopic); } diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 95d3383ee10e378c4c5a66e9d16de4fda90db9ed..6d7a638c30818d64e436f2de989c7d2fe1d7b9c5 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -115,6 +115,7 @@ static SSdbRaw *mndDbActionEncode(SDbObj *pDb) { SDB_SET_INT8(pRaw, dataPos, pRetension->freqUnit, _OVER) SDB_SET_INT8(pRaw, dataPos, pRetension->keepUnit, _OVER) } + SDB_SET_INT8(pRaw, dataPos, pDb->cfg.schemaless, _OVER) SDB_SET_RESERVE(pRaw, dataPos, DB_RESERVE_SIZE, _OVER) SDB_SET_DATALEN(pRaw, dataPos, _OVER) @@ -192,6 +193,7 @@ static SSdbRow *mndDbActionDecode(SSdbRaw *pRaw) { } } } + SDB_GET_INT8(pRaw, dataPos, &pDb->cfg.schemaless, _OVER) SDB_GET_RESERVE(pRaw, dataPos, DB_RESERVE_SIZE, _OVER) taosInitRWLatch(&pDb->lock); @@ -261,7 +263,7 @@ void mndReleaseDb(SMnode *pMnode, SDbObj *pDb) { sdbRelease(pSdb, pDb); } -static int32_t mndAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid) { +static int32_t mndAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid, bool standby) { STransAction action = {0}; SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId); @@ -270,7 +272,7 @@ static int32_t mndAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *p mndReleaseDnode(pMnode, pDnode); int32_t contLen = 0; - void *pReq = mndBuildCreateVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen); + void *pReq = mndBuildCreateVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen, standby); if (pReq == NULL) return -1; action.pCont = pReq; @@ -286,7 +288,7 @@ static int32_t mndAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *p return 0; } -static int32_t mndAddAlterVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) { +static int32_t mndAddAlterVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, tmsg_t msgType) { STransAction action = {0}; action.epSet = mndGetVgroupEpset(pMnode, pVgroup); @@ -296,7 +298,7 @@ static int32_t mndAddAlterVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pD action.pCont = pReq; action.contLen = contLen; - action.msgType = TDMT_VND_ALTER_VNODE; + action.msgType = msgType; if (mndTransAppendRedoAction(pTrans, &action) != 0) { taosMemoryFree(pReq); @@ -380,6 +382,7 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) { if (pCfg->replications < TSDB_MIN_DB_REPLICA || pCfg->replications > TSDB_MAX_DB_REPLICA) return -1; if (pCfg->replications != 1 && pCfg->replications != 3) return -1; if (pCfg->strict < TSDB_DB_STRICT_OFF || pCfg->strict > TSDB_DB_STRICT_ON) return -1; + if (pCfg->schemaless < TSDB_DB_SCHEMALESS_OFF || pCfg->schemaless > TSDB_DB_SCHEMALESS_ON) return -1; if (pCfg->cacheLastRow < TSDB_MIN_DB_CACHE_LAST_ROW || pCfg->cacheLastRow > TSDB_MAX_DB_CACHE_LAST_ROW) return -1; if (pCfg->hashMethod != 1) return -1; if (pCfg->replications > mndGetDnodeSize(pMnode)) { @@ -388,7 +391,7 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) { } terrno = 0; - return TSDB_CODE_SUCCESS; + return terrno; } static void mndSetDefaultDbCfg(SDbCfg *pCfg) { @@ -411,6 +414,8 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) { if (pCfg->strict < 0) pCfg->strict = TSDB_DEFAULT_DB_STRICT; if (pCfg->cacheLastRow < 0) pCfg->cacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW; if (pCfg->numOfRetensions < 0) pCfg->numOfRetensions = 0; + if (pCfg->schemaless < 0) pCfg->schemaless = TSDB_DB_SCHEMALESS_OFF; + } static int32_t mndSetCreateDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) { @@ -467,7 +472,7 @@ static int32_t mndSetCreateDbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj for (int32_t vn = 0; vn < pVgroup->replica; ++vn) { SVnodeGid *pVgid = pVgroup->vnodeGid + vn; - if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, pVgroup, pVgid) != 0) { + if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, pVgroup, pVgid, false) != 0) { return -1; } } @@ -521,6 +526,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, .strict = pCreate->strict, .cacheLastRow = pCreate->cacheLastRow, .hashMethod = 1, + .schemaless = pCreate->schemaless, }; dbObj.cfg.numOfRetensions = pCreate->numOfRetensions; @@ -545,12 +551,12 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, } int32_t code = -1; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_DB, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to create db:%s", pTrans->id, pCreate->db); - mndTransSetDbInfo(pTrans, &dbObj); + mndTransSetDbName(pTrans, dbObj.name); if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; @@ -688,29 +694,37 @@ static int32_t mndSetDbCfgFromAlterDbReq(SDbObj *pDb, SAlterDbReq *pAlter) { static int32_t mndSetAlterDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pOld, SDbObj *pNew) { SSdbRaw *pRedoRaw = mndDbActionEncode(pOld); if (pRedoRaw == NULL) return -1; - if (mndTransAppendRedolog(pTrans, pRedoRaw) != 0) return -1; - if (sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY) != 0) return -1; + if (mndTransAppendRedolog(pTrans, pRedoRaw) != 0) { + sdbFreeRaw(pRedoRaw); + return -1; + } + sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY); return 0; } static int32_t mndSetAlterDbCommitLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pOld, SDbObj *pNew) { SSdbRaw *pCommitRaw = mndDbActionEncode(pNew); if (pCommitRaw == NULL) return -1; - if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) return -1; - if (sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY) != 0) return -1; + if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { + sdbFreeRaw(pCommitRaw); + return -1; + } + sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); return 0; } static int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SArray *pArray) { if (pVgroup->replica <= 0 || pVgroup->replica == pDb->cfg.replications) { - if (mndAddAlterVnodeAction(pMnode, pTrans, pDb, pVgroup) != 0) { + if (mndAddAlterVnodeAction(pMnode, pTrans, pDb, pVgroup, TDMT_VND_ALTER_CONFIG) != 0) { return -1; } } else { SVgObj newVgroup = {0}; memcpy(&newVgroup, pVgroup, sizeof(SVgObj)); + mndTransSetSerial(pTrans); + if (newVgroup.replica < pDb->cfg.replications) { mInfo("db:%s, vgId:%d, will add 2 vnodes, vn:0 dnode:%d", pVgroup->dbName, pVgroup->vgId, pVgroup->vnodeGid[0].dnodeId); @@ -720,9 +734,9 @@ static int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj return -1; } newVgroup.replica = pDb->cfg.replications; - if (mndAddAlterVnodeAction(pMnode, pTrans, pDb, &newVgroup) != 0) return -1; - if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVgroup, &newVgroup.vnodeGid[1]) != 0) return -1; - if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVgroup, &newVgroup.vnodeGid[2]) != 0) return -1; + if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVgroup, &newVgroup.vnodeGid[1], true) != 0) return -1; + if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVgroup, &newVgroup.vnodeGid[2], true) != 0) return -1; + if (mndAddAlterVnodeAction(pMnode, pTrans, pDb, &newVgroup, TDMT_VND_ALTER_REPLICA) != 0) return -1; } else { mInfo("db:%s, vgId:%d, will remove 2 vnodes", pVgroup->dbName, pVgroup->vgId); @@ -733,15 +747,18 @@ static int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj return -1; } newVgroup.replica = pDb->cfg.replications; - if (mndAddAlterVnodeAction(pMnode, pTrans, pDb, &newVgroup) != 0) return -1; + if (mndAddAlterVnodeAction(pMnode, pTrans, pDb, &newVgroup, TDMT_VND_ALTER_REPLICA) != 0) return -1; if (mndAddDropVnodeAction(pMnode, pTrans, pDb, &newVgroup, &del1, true) != 0) return -1; if (mndAddDropVnodeAction(pMnode, pTrans, pDb, &newVgroup, &del2, true) != 0) return -1; } SSdbRaw *pVgRaw = mndVgroupActionEncode(&newVgroup); if (pVgRaw == NULL) return -1; - if (mndTransAppendCommitlog(pTrans, pVgRaw) != 0) return -1; - if (sdbSetRawStatus(pVgRaw, SDB_STATUS_READY) != 0) return -1; + if (mndTransAppendCommitlog(pTrans, pVgRaw) != 0) { + sdbFreeRaw(pVgRaw); + return -1; + } + sdbSetRawStatus(pVgRaw, SDB_STATUS_READY); } return 0; @@ -774,18 +791,16 @@ static int32_t mndSetAlterDbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj * } static int32_t mndAlterDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pOld, SDbObj *pNew) { - int32_t code = -1; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_ALTER_DB, pReq); - if (pTrans == NULL) goto _OVER; - + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq); + if (pTrans == NULL) return -1; mDebug("trans:%d, used to alter db:%s", pTrans->id, pOld->name); - mndTransSetDbInfo(pTrans, pOld); + int32_t code = -1; + mndTransSetDbName(pTrans, pOld->name); if (mndSetAlterDbRedoLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER; if (mndSetAlterDbCommitLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER; if (mndSetAlterDbRedoActions(pMnode, pTrans, pOld, pNew) != 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; - code = 0; _OVER: @@ -890,6 +905,7 @@ static int32_t mndProcessGetDbCfgReq(SRpcMsg *pReq) { cfgRsp.cacheLastRow = pDb->cfg.cacheLastRow; cfgRsp.numOfRetensions = pDb->cfg.numOfRetensions; cfgRsp.pRetensions = pDb->cfg.pRetensions; + cfgRsp.schemaless = pDb->cfg.schemaless; int32_t contLen = tSerializeSDbCfgRsp(NULL, 0, &cfgRsp); void *pRsp = rpcMallocCont(contLen); @@ -1036,17 +1052,17 @@ static int32_t mndBuildDropDbRsp(SDbObj *pDb, int32_t *pRspLen, void **ppRsp, bo static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) { int32_t code = -1; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_DROP_DB, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop db:%s", pTrans->id, pDb->name); - mndTransSetDbInfo(pTrans, pDb); + mndTransSetDbName(pTrans, pDb->name); if (mndSetDropDbRedoLogs(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndSetDropDbCommitLogs(pMnode, pTrans, pDb) != 0) goto _OVER; - /*if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/ - /*if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/ - /*if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/ + if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER; + if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER; + if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndSetDropDbRedoActions(pMnode, pTrans, pDb) != 0) goto _OVER; SUserObj *pUser = mndAcquireUser(pMnode, pDb->createUser); @@ -1314,7 +1330,7 @@ int32_t mndValidateDbInfo(SMnode *pMnode, SDbVgVersion *pDbs, int32_t numOfDbs, SDbObj *pDb = mndAcquireDb(pMnode, pDbVgVersion->dbFName); if (pDb == NULL) { - mDebug("db:%s, no exist", pDbVgVersion->dbFName); + mTrace("db:%s, no exist", pDbVgVersion->dbFName); memcpy(usedbRsp.db, pDbVgVersion->dbFName, TSDB_DB_FNAME_LEN); usedbRsp.uid = pDbVgVersion->dbId; usedbRsp.vgVersion = -1; @@ -1533,8 +1549,11 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.numOfStables, false); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, rows, (const char *)statusB, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols); + colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.schemaless, false); } } diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index 35ba25acd54abf39351e51cfea42152f41b57b9e..b6659e163223914682252b8986b95dcea133d732 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -395,10 +395,8 @@ SMqSubscribeObj *tCloneSubscribeObj(const SMqSubscribeObj *pSub) { taosInitRWLatch(&pSubNew->lock); pSubNew->dbUid = pSub->dbUid; + pSubNew->stbUid = pSub->stbUid; pSubNew->subType = pSub->subType; - pSubNew->withTbName = pSub->withTbName; - pSubNew->withSchema = pSub->withSchema; - pSubNew->withTag = pSub->withTag; pSubNew->vgNum = pSub->vgNum; pSubNew->consumerHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); @@ -431,9 +429,7 @@ int32_t tEncodeSubscribeObj(void **buf, const SMqSubscribeObj *pSub) { tlen += taosEncodeFixedI64(buf, pSub->dbUid); tlen += taosEncodeFixedI32(buf, pSub->vgNum); tlen += taosEncodeFixedI8(buf, pSub->subType); - tlen += taosEncodeFixedI8(buf, pSub->withTbName); - tlen += taosEncodeFixedI8(buf, pSub->withSchema); - tlen += taosEncodeFixedI8(buf, pSub->withTag); + tlen += taosEncodeFixedI64(buf, pSub->stbUid); void *pIter = NULL; int32_t sz = taosHashGetSize(pSub->consumerHash); @@ -458,9 +454,7 @@ void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub) { buf = taosDecodeFixedI64(buf, &pSub->dbUid); buf = taosDecodeFixedI32(buf, &pSub->vgNum); buf = taosDecodeFixedI8(buf, &pSub->subType); - buf = taosDecodeFixedI8(buf, &pSub->withTbName); - buf = taosDecodeFixedI8(buf, &pSub->withSchema); - buf = taosDecodeFixedI8(buf, &pSub->withTag); + buf = taosDecodeFixedI64(buf, &pSub->stbUid); int32_t sz; buf = taosDecodeFixedI32(buf, &sz); diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 22f858c60bdbfd56652570195b89cbf3f207651a..aeff018aa82da7216e21bb46270a6bbb8c3ead7a 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -17,6 +17,7 @@ #include "mndDnode.h" #include "mndAuth.h" #include "mndMnode.h" +#include "mndQnode.h" #include "mndShow.h" #include "mndTrans.h" #include "mndUser.h" @@ -98,12 +99,9 @@ static int32_t mndCreateDefaultDnode(SMnode *pMnode) { if (pRaw == NULL) return -1; if (sdbSetRawStatus(pRaw, SDB_STATUS_READY) != 0) return -1; - mDebug("dnode:%d, will be created while deploy sdb, raw:%p", dnodeObj.id, pRaw); + mDebug("dnode:%d, will be created when deploying, raw:%p", dnodeObj.id, pRaw); -#if 0 - return sdbWrite(pMnode->pSdb, pRaw); -#else - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_DNODE, NULL); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, NULL); if (pTrans == NULL) { mError("dnode:%s, failed to create since %s", dnodeObj.ep, terrstr()); return -1; @@ -125,7 +123,6 @@ static int32_t mndCreateDefaultDnode(SMnode *pMnode) { mndTransDrop(pTrans); return 0; -#endif } static SSdbRaw *mndDnodeActionEncode(SDnodeObj *pDnode) { @@ -259,7 +256,7 @@ int32_t mndGetDnodeSize(SMnode *pMnode) { bool mndIsDnodeOnline(SMnode *pMnode, SDnodeObj *pDnode, int64_t curMs) { int64_t interval = TABS(pDnode->lastAccessTime - curMs); - if (interval > 30000 * tsStatusInterval) { + if (interval > 5000 * tsStatusInterval) { if (pDnode->rebootTime > 0) { pDnode->offlineReason = DND_REASON_STATUS_MSG_TIMEOUT; } @@ -388,9 +385,16 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) { mndReleaseMnode(pMnode, pObj); } + SQnodeObj *pQnode = mndAcquireQnode(pMnode, statusReq.qload.dnodeId); + if (pQnode != NULL) { + pQnode->load = statusReq.qload; + mndReleaseQnode(pMnode, pQnode); + } + + int64_t dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE) + sdbGetTableVer(pMnode->pSdb, SDB_MNODE); int64_t curMs = taosGetTimestampMs(); bool online = mndIsDnodeOnline(pMnode, pDnode, curMs); - bool dnodeChanged = (statusReq.dnodeVer != sdbGetTableVer(pMnode->pSdb, SDB_DNODE)); + bool dnodeChanged = (statusReq.dnodeVer != dnodeVer); bool reboot = (pDnode->rebootTime != statusReq.rebootTime); bool needCheck = !online || dnodeChanged || reboot; @@ -433,7 +437,8 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) { if (!online) { mInfo("dnode:%d, from offline to online", pDnode->id); } else { - mDebug("dnode:%d, send dnode eps", pDnode->id); + mDebug("dnode:%d, send dnode epset, online:%d ver:% " PRId64 ":%" PRId64 " reboot:%d", pDnode->id, online, + statusReq.dnodeVer, dnodeVer, reboot); } pDnode->rebootTime = statusReq.rebootTime; @@ -441,7 +446,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) { pDnode->numOfSupportVnodes = statusReq.numOfSupportVnodes; SStatusRsp statusRsp = {0}; - statusRsp.dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE) + sdbGetTableVer(pMnode->pSdb, SDB_MNODE); + statusRsp.dnodeVer = dnodeVer; statusRsp.dnodeCfg.dnodeId = pDnode->id; statusRsp.dnodeCfg.clusterId = pMnode->clusterId; statusRsp.pDnodeEps = taosArrayInit(mndGetDnodeSize(pMnode), sizeof(SDnodeEp)); @@ -479,7 +484,7 @@ static int32_t mndCreateDnode(SMnode *pMnode, SRpcMsg *pReq, SCreateDnodeReq *pC memcpy(dnodeObj.fqdn, pCreate->fqdn, TSDB_FQDN_LEN); snprintf(dnodeObj.ep, TSDB_EP_LEN, "%s:%u", dnodeObj.fqdn, dnodeObj.port); - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_DNODE, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_GLOBAL, pReq); if (pTrans == NULL) { mError("dnode:%s, failed to create since %s", dnodeObj.ep, terrstr()); return -1; @@ -555,7 +560,7 @@ CREATE_DNODE_OVER: } static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode) { - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_DNODE, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_GLOBAL, pReq); if (pTrans == NULL) { mError("dnode:%d, failed to drop since %s", pDnode->id, terrstr()); return -1; @@ -608,7 +613,7 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) { pMObj = mndAcquireMnode(pMnode, dropReq.dnodeId); if (pMObj != NULL) { - terrno = TSDB_CODE_MND_MNODE_DEPLOYED; + terrno = TSDB_CODE_MND_MNODE_NOT_EXIST; goto DROP_DNODE_OVER; } diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c index 9107dab693d4c9eb6adc6599d03126d5a59a5a69..bf4baebd8584bd8324f3e4e53836bbd8a2002fad 100644 --- a/source/dnode/mnode/impl/src/mndFunc.c +++ b/source/dnode/mnode/impl/src/mndFunc.c @@ -215,7 +215,7 @@ static int32_t mndCreateFunc(SMnode *pMnode, SRpcMsg *pReq, SCreateFuncReq *pCre } memcpy(func.pCode, pCreate->pCode, func.codeSize); - pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_FUNC, pReq); + pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to create func:%s", pTrans->id, pCreate->name); @@ -245,7 +245,7 @@ _OVER: static int32_t mndDropFunc(SMnode *pMnode, SRpcMsg *pReq, SFuncObj *pFunc) { int32_t code = -1; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_FUNC, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop user:%s", pTrans->id, pFunc->name); diff --git a/source/dnode/mnode/impl/src/mnode.c b/source/dnode/mnode/impl/src/mndMain.c similarity index 80% rename from source/dnode/mnode/impl/src/mnode.c rename to source/dnode/mnode/impl/src/mndMain.c index 4e4f69e01d746eb8c249a567b1bf166fc31aa6ba..3a3fd7ebdb5ac8f56a64ea5b0169dfeda8cd3b97 100644 --- a/source/dnode/mnode/impl/src/mnode.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -85,7 +85,7 @@ static void *mndThreadFp(void *param) { while (1) { lastTime++; taosMsleep(100); - if (pMnode->stopped) break; + if (mndGetStop(pMnode)) break; if (lastTime % (tsTransPullupInterval * 10) == 0) { mndPullupTrans(pMnode); @@ -118,7 +118,6 @@ static int32_t mndInitTimer(SMnode *pMnode) { } static void mndCleanupTimer(SMnode *pMnode) { - pMnode->stopped = true; if (taosCheckPthreadValid(pMnode->thread)) { taosThreadJoin(pMnode->thread, NULL); taosThreadClear(&pMnode->thread); @@ -335,15 +334,19 @@ void mndClose(SMnode *pMnode) { int32_t mndStart(SMnode *pMnode) { mndSyncStart(pMnode); if (pMnode->deploy) { - if (sdbDeploy(pMnode->pSdb) != 0) return -1; - pMnode->syncMgmt.restored = true; + if (sdbDeploy(pMnode->pSdb) != 0) { + mError("failed to deploy sdb while start mnode"); + return -1; + } + mndSetRestore(pMnode, true); } return mndInitTimer(pMnode); } void mndStop(SMnode *pMnode) { + mndSetStop(pMnode); mndSyncStop(pMnode); - return mndCleanupTimer(pMnode); + mndCleanupTimer(pMnode); } int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { @@ -362,7 +365,12 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { return TAOS_SYNC_PROPOSE_OTHER_ERROR; } - char logBuf[512]; + if (mndAcquireSyncRef(pMnode) != 0) { + mError("failed to process sync msg:%p type:%s since %s", pMsg, TMSG_INFO(pMsg->msgType), terrstr()); + return TAOS_SYNC_PROPOSE_OTHER_ERROR; + } + + char logBuf[512] = {0}; char *syncNodeStr = sync2SimpleStr(pMgmt->sync); snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr); syncRpcMsgLog2(logBuf, pMsg); @@ -405,47 +413,67 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { code = TAOS_SYNC_PROPOSE_OTHER_ERROR; } + mndReleaseSyncRef(pMnode); return code; } -int32_t mndProcessMsg(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - void *ahandle = pMsg->info.ahandle; - mTrace("msg:%p, will be processed, type:%s app:%p", pMsg, TMSG_INFO(pMsg->msgType), ahandle); +static int32_t mndCheckMnodeState(SRpcMsg *pMsg) { + if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0; - if (IsReq(pMsg)) { - if (!mndIsMaster(pMnode)) { - terrno = TSDB_CODE_APP_NOT_READY; - mDebug("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle); - return -1; - } + if (IsReq(pMsg) && pMsg->msgType != TDMT_MND_MQ_TIMER && pMsg->msgType != TDMT_MND_TELEM_TIMER && + pMsg->msgType != TDMT_MND_TRANS_TIMER) { + mError("msg:%p, failed to check mnode state since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, + TMSG_INFO(pMsg->msgType)); - if (pMsg->contLen == 0 || pMsg->pCont == NULL) { - terrno = TSDB_CODE_INVALID_MSG_LEN; - mError("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle); - return -1; + SEpSet epSet = {0}; + mndGetMnodeEpSet(pMsg->info.node, &epSet); + + int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); + pMsg->info.rsp = rpcMallocCont(contLen); + if (pMsg->info.rsp != NULL) { + tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet); + pMsg->info.rspLen = contLen; + terrno = TSDB_CODE_RPC_REDIRECT; + } else { + terrno = TSDB_CODE_OUT_OF_MEMORY; } } + return -1; +} + +static int32_t mndCheckMsgContent(SRpcMsg *pMsg) { + if (!IsReq(pMsg)) return 0; + if (pMsg->contLen != 0 && pMsg->pCont != NULL) return 0; + + mError("msg:%p, failed to check msg content, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); + terrno = TSDB_CODE_INVALID_MSG_LEN; + return -1; +} + +int32_t mndProcessRpcMsg(SRpcMsg *pMsg) { + SMnode *pMnode = pMsg->info.node; MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(pMsg->msgType)]; if (fp == NULL) { + mError("msg:%p, failed to get msg handle, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); terrno = TSDB_CODE_MSG_NOT_PROCESSED; - mError("msg:%p, failed to process since no msg handle, app:%p", pMsg, ahandle); return -1; } + if (mndCheckMsgContent(pMsg) != 0) return -1; + if (mndCheckMnodeState(pMsg) != 0) return -1; + + mTrace("msg:%p, start to process in mnode, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); int32_t code = (*fp)(pMsg); + mndReleaseRpcRef(pMnode); + if (code == TSDB_CODE_ACTION_IN_PROGRESS) { - terrno = code; - mTrace("msg:%p, in progress, app:%p", pMsg, ahandle); - } else if (code != 0) { - if (terrno != TSDB_CODE_OPS_NOT_SUPPORT) { - mError("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle); - } else { - mTrace("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle); - } + mTrace("msg:%p, won't response immediately since in progress", pMsg); + } else if (code == 0) { + mTrace("msg:%p, successfully processed and response", pMsg); } else { - mTrace("msg:%p, is processed, app:%p", pMsg, ahandle); + mError("msg:%p, failed to process since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, + TMSG_INFO(pMsg->msgType)); } return code; @@ -474,7 +502,7 @@ int64_t mndGenerateUid(char *name, int32_t len) { int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgroupInfo *pVgroupInfo, SMonGrantInfo *pGrantInfo) { - if (!mndIsMaster(pMnode)) return -1; + if (mndAcquireRpcRef(pMnode) != 0) return -1; SSdb *pSdb = pMnode->pSdb; int64_t ms = taosGetTimestampMs(); @@ -483,6 +511,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr pClusterInfo->mnodes = taosArrayInit(sdbGetSize(pSdb, SDB_MNODE), sizeof(SMonMnodeDesc)); pVgroupInfo->vgroups = taosArrayInit(sdbGetSize(pSdb, SDB_VGROUP), sizeof(SMonVgroupDesc)); if (pClusterInfo->dnodes == NULL || pClusterInfo->mnodes == NULL || pVgroupInfo->vgroups == NULL) { + mndReleaseRpcRef(pMnode); return -1; } @@ -577,6 +606,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr pGrantInfo->timeseries_total = INT32_MAX; } + mndReleaseRpcRef(pMnode); return 0; } @@ -584,3 +614,76 @@ int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad) { pLoad->syncState = syncGetMyRole(pMnode->syncMgmt.sync); return 0; } + +int32_t mndAcquireRpcRef(SMnode *pMnode) { + int32_t code = 0; + taosThreadRwlockRdlock(&pMnode->lock); + if (pMnode->stopped) { + terrno = TSDB_CODE_APP_NOT_READY; + code = -1; + } else if (!mndIsMaster(pMnode)) { + code = -1; + } else { + int32_t ref = atomic_add_fetch_32(&pMnode->rpcRef, 1); + mTrace("mnode rpc is acquired, ref:%d", ref); + } + taosThreadRwlockUnlock(&pMnode->lock); + return code; +} + +void mndReleaseRpcRef(SMnode *pMnode) { + taosThreadRwlockRdlock(&pMnode->lock); + int32_t ref = atomic_sub_fetch_32(&pMnode->rpcRef, 1); + mTrace("mnode rpc is released, ref:%d", ref); + taosThreadRwlockUnlock(&pMnode->lock); +} + +void mndSetRestore(SMnode *pMnode, bool restored) { + if (restored) { + taosThreadRwlockWrlock(&pMnode->lock); + pMnode->restored = true; + taosThreadRwlockUnlock(&pMnode->lock); + mTrace("mnode set restored:%d", restored); + } else { + taosThreadRwlockWrlock(&pMnode->lock); + pMnode->restored = false; + taosThreadRwlockUnlock(&pMnode->lock); + mTrace("mnode set restored:%d", restored); + while (1) { + if (pMnode->rpcRef <= 0) break; + taosMsleep(3); + } + } +} + +bool mndGetRestored(SMnode *pMnode) { return pMnode->restored; } + +void mndSetStop(SMnode *pMnode) { + taosThreadRwlockWrlock(&pMnode->lock); + pMnode->stopped = true; + taosThreadRwlockUnlock(&pMnode->lock); + mTrace("mnode set stopped"); +} + +bool mndGetStop(SMnode *pMnode) { return pMnode->stopped; } + +int32_t mndAcquireSyncRef(SMnode *pMnode) { + int32_t code = 0; + taosThreadRwlockRdlock(&pMnode->lock); + if (pMnode->stopped) { + terrno = TSDB_CODE_APP_NOT_READY; + code = -1; + } else { + int32_t ref = atomic_add_fetch_32(&pMnode->syncRef, 1); + mTrace("mnode sync is acquired, ref:%d", ref); + } + taosThreadRwlockUnlock(&pMnode->lock); + return code; +} + +void mndReleaseSyncRef(SMnode *pMnode) { + taosThreadRwlockRdlock(&pMnode->lock); + int32_t ref = atomic_sub_fetch_32(&pMnode->syncRef, 1); + mTrace("mnode sync is released, ref:%d", ref); + taosThreadRwlockUnlock(&pMnode->lock); +} diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index 82e6256295782f03b4cebf83f267c8e59a4fe1c0..013a991e98d1b57c88d0cbf262877afa05d587f6 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -18,6 +18,7 @@ #include "mndAuth.h" #include "mndDnode.h" #include "mndShow.h" +#include "mndSync.h" #include "mndTrans.h" #include "mndUser.h" @@ -52,10 +53,10 @@ int32_t mndInitMnode(SMnode *pMnode) { }; mndSetMsgHandle(pMnode, TDMT_MND_CREATE_MNODE, mndProcessCreateMnodeReq); - mndSetMsgHandle(pMnode, TDMT_DND_ALTER_MNODE, mndProcessAlterMnodeReq); + mndSetMsgHandle(pMnode, TDMT_MND_ALTER_MNODE, mndProcessAlterMnodeReq); mndSetMsgHandle(pMnode, TDMT_MND_DROP_MNODE, mndProcessDropMnodeReq); mndSetMsgHandle(pMnode, TDMT_DND_CREATE_MNODE_RSP, mndProcessCreateMnodeRsp); - mndSetMsgHandle(pMnode, TDMT_DND_ALTER_MNODE_RSP, mndProcessAlterMnodeRsp); + mndSetMsgHandle(pMnode, TDMT_MND_ALTER_MNODE_RSP, mndProcessAlterMnodeRsp); mndSetMsgHandle(pMnode, TDMT_DND_DROP_MNODE_RSP, mndProcessDropMnodeRsp); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_MNODE, mndRetrieveMnodes); @@ -89,12 +90,9 @@ static int32_t mndCreateDefaultMnode(SMnode *pMnode) { if (pRaw == NULL) return -1; sdbSetRawStatus(pRaw, SDB_STATUS_READY); - mDebug("mnode:%d, will be created while deploy sdb, raw:%p", mnodeObj.id, pRaw); + mDebug("mnode:%d, will be created when deploying, raw:%p", mnodeObj.id, pRaw); -#if 0 - return sdbWrite(pMnode->pSdb, pRaw); -#else - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_DNODE, NULL); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, NULL); if (pTrans == NULL) { mError("mnode:%d, failed to create since %s", mnodeObj.id, terrstr()); return -1; @@ -116,7 +114,6 @@ static int32_t mndCreateDefaultMnode(SMnode *pMnode) { mndTransDrop(pTrans); return 0; -#endif } static SSdbRaw *mndMnodeActionEncode(SMnodeObj *pObj) { @@ -222,23 +219,24 @@ bool mndIsMnode(SMnode *pMnode, int32_t dnodeId) { } void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) { - SSdb *pSdb = pMnode->pSdb; - pEpSet->numOfEps = 0; + SSdb *pSdb = pMnode->pSdb; + int32_t totalMnodes = sdbGetSize(pSdb, SDB_MNODE); + void *pIter = NULL; - void *pIter = NULL; while (1) { SMnodeObj *pObj = NULL; pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj); if (pIter == NULL) break; - if (pObj->pDnode == NULL) { - mError("mnode:%d, no corresponding dnode exists", pObj->id); - } else { - if (pObj->id == pMnode->selfDnodeId || pObj->state == TAOS_SYNC_STATE_LEADER) { + + if (pObj->id == pMnode->selfDnodeId) { + if (mndIsMaster(pMnode)) { pEpSet->inUse = pEpSet->numOfEps; + } else { + pEpSet->inUse = (pEpSet->numOfEps + 1) % totalMnodes; } - addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port); - sdbRelease(pSdb, pObj); } + addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port); + sdbRelease(pSdb, pObj); } } @@ -313,16 +311,16 @@ static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno memcpy(createEpset.eps[0].fqdn, pDnode->fqdn, TSDB_FQDN_LEN); { - int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &alterReq); + int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &createReq); void *pReq = taosMemoryMalloc(contLen); - tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq); + tSerializeSDCreateMnodeReq(pReq, contLen, &createReq); STransAction action = { - .epSet = alterEpset, + .epSet = createEpset, .pCont = pReq, .contLen = contLen, - .msgType = TDMT_DND_ALTER_MNODE, - .acceptableCode = 0, + .msgType = TDMT_DND_CREATE_MNODE, + .acceptableCode = TSDB_CODE_NODE_ALREADY_DEPLOYED, }; if (mndTransAppendRedoAction(pTrans, &action) != 0) { @@ -332,16 +330,16 @@ static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno } { - int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &createReq); + int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &alterReq); void *pReq = taosMemoryMalloc(contLen); - tSerializeSDCreateMnodeReq(pReq, contLen, &createReq); + tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq); STransAction action = { - .epSet = createEpset, + .epSet = alterEpset, .pCont = pReq, .contLen = contLen, - .msgType = TDMT_DND_CREATE_MNODE, - .acceptableCode = TSDB_CODE_NODE_ALREADY_DEPLOYED, + .msgType = TDMT_MND_ALTER_MNODE, + .acceptableCode = 0, }; if (mndTransAppendRedoAction(pTrans, &action) != 0) { @@ -361,10 +359,11 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, mnodeObj.createdTime = taosGetTimestampMs(); mnodeObj.updateTime = mnodeObj.createdTime; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_MNODE, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId); + mndTransSetSerial(pTrans); if (mndSetCreateMnodeRedoLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER; if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER; if (mndSetCreateMnodeRedoActions(pMnode, pTrans, pDnode, &mnodeObj) != 0) goto _OVER; @@ -401,12 +400,22 @@ static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq) { goto _OVER; } + if (sdbGetSize(pMnode->pSdb, SDB_MNODE) >= 3) { + terrno = TSDB_CODE_MND_TOO_MANY_MNODES; + goto _OVER; + } + pDnode = mndAcquireDnode(pMnode, createReq.dnodeId); if (pDnode == NULL) { terrno = TSDB_CODE_MND_DNODE_NOT_EXIST; goto _OVER; } + if (!mndIsDnodeOnline(pMnode, pDnode, taosGetTimestampMs())) { + terrno = TSDB_CODE_NODE_OFFLINE; + goto _OVER; + } + pUser = mndAcquireUser(pMnode, pReq->conn.user); if (pUser == NULL) { terrno = TSDB_CODE_MND_NO_USER_FROM_CONN; @@ -497,7 +506,7 @@ static int32_t mndSetDropMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDnode .epSet = alterEpset, .pCont = pReq, .contLen = contLen, - .msgType = TDMT_DND_ALTER_MNODE, + .msgType = TDMT_MND_ALTER_MNODE, .acceptableCode = 0, }; @@ -532,11 +541,11 @@ static int32_t mndSetDropMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDnode static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) { int32_t code = -1; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_DROP_MNODE, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id); - + mndTransSetSerial(pTrans); if (mndSetDropMnodeRedoLogs(pMnode, pTrans, pObj) != 0) goto _OVER; if (mndSetDropMnodeCommitLogs(pMnode, pTrans, pObj) != 0) goto _OVER; if (mndSetDropMnodeRedoActions(pMnode, pTrans, pObj->pDnode, pObj) != 0) goto _OVER; @@ -628,10 +637,12 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB int32_t numOfRows = 0; int32_t cols = 0; SMnodeObj *pObj = NULL; + ESdbStatus objStatus; char *pWrite; + int64_t curMs = taosGetTimestampMs(); while (numOfRows < rows) { - pShow->pIter = sdbFetch(pSdb, SDB_MNODE, pShow->pIter, (void **)&pObj); + pShow->pIter = sdbFetchAll(pSdb, SDB_MNODE, pShow->pIter, (void **)&pObj, &objStatus); if (pShow->pIter == NULL) break; cols = 0; @@ -644,18 +655,26 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, b1, false); - const char *roles = NULL; + const char *roles = "offline"; if (pObj->id == pMnode->selfDnodeId) { roles = syncStr(TAOS_SYNC_STATE_LEADER); - } else { + } + if (pObj->pDnode && mndIsDnodeOnline(pMnode, pObj->pDnode, curMs)) { roles = syncStr(pObj->state); } - char *b2 = taosMemoryCalloc(1, 12 + VARSTR_HEADER_SIZE); + char b2[12 + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(b2, roles, pShow->pMeta->pSchemas[cols].bytes); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)b2, false); + const char *status = "ready"; + if (objStatus == SDB_STATUS_CREATING) status = "creating"; + if (objStatus == SDB_STATUS_DROPPING) status = "dropping"; + char b3[9 + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(b3, status, pShow->pMeta->pSchemas[cols].bytes); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)b3, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)&pObj->createdTime, false); @@ -701,14 +720,17 @@ static int32_t mndProcessAlterMnodeReq(SRpcMsg *pReq) { } } + mTrace("trans:-1, sync reconfig will be proposed"); + SSyncMgmt *pMgmt = &pMnode->syncMgmt; pMgmt->standby = 0; int32_t code = syncReconfig(pMgmt->sync, &cfg); if (code != 0) { - mError("failed to alter mnode sync since %s", terrstr()); + mError("trans:-1, failed to propose sync reconfig since %s", terrstr()); return code; } else { pMgmt->errCode = 0; + pMgmt->transId = -1; tsem_wait(&pMgmt->syncSem); mInfo("alter mnode sync result:%s", tstrerror(pMgmt->errCode)); terrno = pMgmt->errCode; diff --git a/source/dnode/mnode/impl/src/mndOffset.c b/source/dnode/mnode/impl/src/mndOffset.c index dca07f6a6d2910630a939d119b6d21e287112866..00c8bb30d03d87545750b87d9eddab9efb8e821e 100644 --- a/source/dnode/mnode/impl/src/mndOffset.c +++ b/source/dnode/mnode/impl/src/mndOffset.c @@ -21,6 +21,7 @@ #include "mndMnode.h" #include "mndShow.h" #include "mndStb.h" +#include "mndTopic.h" #include "mndTrans.h" #include "mndUser.h" #include "mndVgroup.h" @@ -58,6 +59,12 @@ bool mndOffsetFromTopic(SMqOffsetObj *pOffset, const char *topic) { return false; } +bool mndOffsetFromSubKey(SMqOffsetObj *pOffset, const char *subKey) { + int32_t i = 0; + while (pOffset->key[i] != ':') i++; + if (strcmp(&pOffset->key[i + 1], subKey) == 0) return true; + return false; +} SSdbRaw *mndOffsetActionEncode(SMqOffsetObj *pOffset) { terrno = TSDB_CODE_OUT_OF_MEMORY; void *buf = NULL; @@ -172,7 +179,7 @@ static int32_t mndProcessCommitOffsetReq(SRpcMsg *pMsg) { tDecodeSMqCMCommitOffsetReq(&decoder, &commitOffsetReq); - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_COMMIT_OFFSET, pMsg); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg); for (int32_t i = 0; i < commitOffsetReq.num; i++) { SMqOffset *pOffset = &commitOffsetReq.offsets[i]; @@ -182,7 +189,15 @@ static int32_t mndProcessCommitOffsetReq(SRpcMsg *pMsg) { bool create = false; SMqOffsetObj *pOffsetObj = mndAcquireOffset(pMnode, key); if (pOffsetObj == NULL) { + SMqTopicObj *pTopic = mndAcquireTopic(pMnode, pOffset->topicName); + if (pTopic == NULL) { + terrno = TSDB_CODE_MND_TOPIC_NOT_EXIST; + mError("submit offset to topic %s failed since %s", pOffset->topicName, terrstr()); + continue; + } pOffsetObj = taosMemoryMalloc(sizeof(SMqOffsetObj)); + pOffsetObj->dbUid = pTopic->dbUid; + mndReleaseTopic(pMnode, pTopic); memcpy(pOffsetObj->key, key, TSDB_PARTITION_KEY_LEN); create = true; } @@ -303,7 +318,35 @@ int32_t mndDropOffsetByTopic(SMnode *pMnode, STrans *pTrans, const char *topic) continue; } - if (mndSetDropOffsetRedoLogs(pMnode, pTrans, pOffset) < 0) { + if (mndSetDropOffsetCommitLogs(pMnode, pTrans, pOffset) < 0) { + sdbRelease(pSdb, pOffset); + goto END; + } + + sdbRelease(pSdb, pOffset); + } + + code = 0; +END: + return code; +} + +int32_t mndDropOffsetBySubKey(SMnode *pMnode, STrans *pTrans, const char *subKey) { + int32_t code = -1; + SSdb *pSdb = pMnode->pSdb; + + void *pIter = NULL; + SMqOffsetObj *pOffset = NULL; + while (1) { + pIter = sdbFetch(pSdb, SDB_OFFSET, pIter, (void **)&pOffset); + if (pIter == NULL) break; + + if (!mndOffsetFromSubKey(pOffset, subKey)) { + sdbRelease(pSdb, pOffset); + continue; + } + + if (mndSetDropOffsetCommitLogs(pMnode, pTrans, pOffset) < 0) { sdbRelease(pSdb, pOffset); goto END; } diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index c9c52af0fe3ef377317530c26648c811d1112c95..bacdf2f3665fc144eec8320e68fc38fab330e34c 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -18,6 +18,7 @@ #include "mndDb.h" #include "mndDnode.h" #include "mndMnode.h" +#include "mndQnode.h" #include "mndShow.h" #include "mndStb.h" #include "mndUser.h" @@ -382,6 +383,9 @@ static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHb rspBasic->totalDnodes = mndGetDnodeSize(pMnode); rspBasic->onlineDnodes = 1; // TODO mndGetMnodeEpSet(pMnode, &rspBasic->epSet); + + mndCreateQnodeList(pMnode, &rspBasic->pQnodeList, -1); + mndReleaseConn(pMnode, pConn); hbRsp.query = rspBasic; diff --git a/source/dnode/mnode/impl/src/mndQnode.c b/source/dnode/mnode/impl/src/mndQnode.c index 3dc6200229b8a519fcf193393535500e98f4df20..27881865af11913b4a04c4fc84df115e98823fd1 100644 --- a/source/dnode/mnode/impl/src/mndQnode.c +++ b/source/dnode/mnode/impl/src/mndQnode.c @@ -60,7 +60,7 @@ int32_t mndInitQnode(SMnode *pMnode) { void mndCleanupQnode(SMnode *pMnode) {} -static SQnodeObj *mndAcquireQnode(SMnode *pMnode, int32_t qnodeId) { +SQnodeObj *mndAcquireQnode(SMnode *pMnode, int32_t qnodeId) { SQnodeObj *pObj = sdbAcquire(pMnode->pSdb, SDB_QNODE, &qnodeId); if (pObj == NULL && terrno == TSDB_CODE_SDB_OBJ_NOT_THERE) { terrno = TSDB_CODE_MND_QNODE_NOT_EXIST; @@ -68,7 +68,7 @@ static SQnodeObj *mndAcquireQnode(SMnode *pMnode, int32_t qnodeId) { return pObj; } -static void mndReleaseQnode(SMnode *pMnode, SQnodeObj *pObj) { +void mndReleaseQnode(SMnode *pMnode, SQnodeObj *pObj) { SSdb *pSdb = pMnode->pSdb; sdbRelease(pSdb, pObj); } @@ -248,7 +248,7 @@ static int32_t mndCreateQnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, qnodeObj.createdTime = taosGetTimestampMs(); qnodeObj.updateTime = qnodeObj.createdTime; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_QNODE, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to create qnode:%d", pTrans->id, pCreate->dnodeId); @@ -365,7 +365,7 @@ static int32_t mndSetDropQnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SQn static int32_t mndDropQnode(SMnode *pMnode, SRpcMsg *pReq, SQnodeObj *pObj) { int32_t code = -1; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_DROP_QNODE, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop qnode:%d", pTrans->id, pObj->id); @@ -429,49 +429,62 @@ _OVER: return code; } -static int32_t mndProcessQnodeListReq(SRpcMsg *pReq) { - int32_t code = -1; - int32_t numOfRows = 0; - SMnode *pMnode = pReq->info.node; - SSdb *pSdb = pMnode->pSdb; +int32_t mndCreateQnodeList(SMnode *pMnode, SArray** pList, int32_t limit) { + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; SQnodeObj *pObj = NULL; - SQnodeListReq qlistReq = {0}; - SQnodeListRsp qlistRsp = {0}; - - if (tDeserializeSQnodeListReq(pReq->pCont, pReq->contLen, &qlistReq) != 0) { - mError("failed to parse qnode list req"); - terrno = TSDB_CODE_INVALID_MSG; - goto _OVER; - } + int32_t numOfRows = 0; - qlistRsp.addrsList = taosArrayInit(5, sizeof(SQueryNodeAddr)); - if (NULL == qlistRsp.addrsList) { + SArray* qnodeList = taosArrayInit(5, sizeof(SQueryNodeLoad)); + if (NULL == qnodeList) { mError("failed to alloc epSet while process qnode list req"); terrno = TSDB_CODE_OUT_OF_MEMORY; - goto _OVER; + return terrno; } - - void *pIter = NULL; + while (1) { pIter = sdbFetch(pSdb, SDB_QNODE, pIter, (void **)&pObj); if (pIter == NULL) break; - SQueryNodeAddr nodeAddr = {0}; - nodeAddr.nodeId = QNODE_HANDLE; - nodeAddr.epSet.numOfEps = 1; - tstrncpy(nodeAddr.epSet.eps[0].fqdn, pObj->pDnode->fqdn, TSDB_FQDN_LEN); - nodeAddr.epSet.eps[0].port = pObj->pDnode->port; + SQueryNodeLoad nodeLoad = {0}; + nodeLoad.addr.nodeId = QNODE_HANDLE; + nodeLoad.addr.epSet.numOfEps = 1; + tstrncpy(nodeLoad.addr.epSet.eps[0].fqdn, pObj->pDnode->fqdn, TSDB_FQDN_LEN); + nodeLoad.addr.epSet.eps[0].port = pObj->pDnode->port; + nodeLoad.load = QNODE_LOAD_VALUE(pObj); - (void)taosArrayPush(qlistRsp.addrsList, &nodeAddr); + (void)taosArrayPush(qnodeList, &nodeLoad); numOfRows++; sdbRelease(pSdb, pObj); - if (qlistReq.rowNum > 0 && numOfRows >= qlistReq.rowNum) { + if (limit > 0 && numOfRows >= limit) { break; } } + *pList = qnodeList; + + return TSDB_CODE_SUCCESS; +} + + +static int32_t mndProcessQnodeListReq(SRpcMsg *pReq) { + int32_t code = -1; + SMnode *pMnode = pReq->info.node; + SQnodeListReq qlistReq = {0}; + SQnodeListRsp qlistRsp = {0}; + + if (tDeserializeSQnodeListReq(pReq->pCont, pReq->contLen, &qlistReq) != 0) { + mError("failed to parse qnode list req"); + terrno = TSDB_CODE_INVALID_MSG; + goto _OVER; + } + + if (mndCreateQnodeList(pMnode, &qlistRsp.qnodeList, qlistReq.rowNum) != 0) { + goto _OVER; + } + int32_t rspLen = tSerializeSQnodeListRsp(NULL, 0, &qlistRsp); void *pRsp = rpcMallocCont(rspLen); if (pRsp == NULL) { diff --git a/source/dnode/mnode/impl/src/mndQuery.c b/source/dnode/mnode/impl/src/mndQuery.c index 78b70c9a74133b859b4175b195d4a939c37ebccc..97594f2b913334ac17e2bd5e6c8fc95e19a03e9e 100644 --- a/source/dnode/mnode/impl/src/mndQuery.c +++ b/source/dnode/mnode/impl/src/mndQuery.c @@ -26,19 +26,19 @@ int32_t mndProcessQueryMsg(SRpcMsg *pMsg) { mTrace("msg:%p, in query queue is processing", pMsg); switch (pMsg->msgType) { case TDMT_VND_QUERY: - code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg); + code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_QUERY_CONTINUE: - code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg); + code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_FETCH: - code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg); + code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_DROP_TASK: - code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg); + code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_QUERY_HEARTBEAT: - code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg); + code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg, 0); break; default: terrno = TSDB_CODE_VND_APP_ERROR; diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index 58b51e4c548106a393b65ab3142064cd0c249481..b390a7fe4a37bcb057fcc19837a58eb08d277799 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -36,7 +36,7 @@ extern bool tsStreamSchedV; int32_t mndConvertRSmaTask(const char* ast, int64_t uid, int8_t triggerType, int64_t watermark, char** pStr, - int32_t* pLen) { + int32_t* pLen, double filesFactor) { SNode* pAst = NULL; SQueryPlan* pPlan = NULL; terrno = TSDB_CODE_SUCCESS; @@ -58,6 +58,7 @@ int32_t mndConvertRSmaTask(const char* ast, int64_t uid, int8_t triggerType, int .rSmaQuery = true, .triggerType = triggerType, .watermark = watermark, + .filesFactor = filesFactor, }; if (qCreateQueryPlan(&cxt, &pPlan, NULL) < 0) { terrno = TSDB_CODE_QRY_INVALID_INPUT; @@ -286,7 +287,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { pStream->tasks = taosArrayInit(totLevel, sizeof(void*)); bool hasExtraSink = false; - if (totLevel == 2) { + if (totLevel == 2 || strcmp(pStream->sourceDb, pStream->targetDb) != 0) { SArray* taskOneLevel = taosArrayInit(0, sizeof(void*)); taosArrayPush(pStream->tasks, &taskOneLevel); // add extra sink @@ -407,7 +408,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { /*pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC;*/ pTask->dispatchMsgType = TDMT_VND_TASK_DISPATCH; - SDbObj* pDb = mndAcquireDb(pMnode, pStream->sourceDb); + SDbObj* pDb = mndAcquireDb(pMnode, pStream->targetDb); ASSERT(pDb); if (mndExtractDbInfo(pMnode, pDb, &pTask->shuffleDispatcher.dbInfo, NULL) < 0) { sdbRelease(pSdb, pDb); @@ -506,7 +507,7 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib SQueryPlan* pPlan = NULL; SSubplan* plan = NULL; - if (pTopic->subType == TOPIC_SUB_TYPE__TABLE) { + if (pTopic->subType == TOPIC_SUB_TYPE__COLUMN) { pPlan = qStringToQueryPlan(pTopic->physicalPlan); if (pPlan == NULL) { terrno = TSDB_CODE_QRY_INVALID_INPUT; @@ -552,7 +553,7 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib mDebug("init subscription %s, assign vg: %d", pSub->key, pVgEp->vgId); - if (pTopic->subType == TOPIC_SUB_TYPE__TABLE) { + if (pTopic->subType == TOPIC_SUB_TYPE__COLUMN) { int32_t msgLen; plan->execNode.epSet = pVgEp->epSet; diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index cb7d3e81f631c34d278f15c7427c141dbf30ead9..6cb70d1f27895cd64f08fdb383f4072739e03f52 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -409,7 +409,8 @@ static int32_t mndSetCreateSmaRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj return 0; } -static int32_t mndSetCreateSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) { +static int32_t mndSetCreateSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, + SSmaObj *pSma) { SVnodeGid *pVgid = pVgroup->vnodeGid + 0; SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId); if (pDnode == NULL) return -1; @@ -419,9 +420,14 @@ static int32_t mndSetCreateSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, mndReleaseDnode(pMnode, pDnode); // todo add sma info here + int32_t smaContLen = 0; + void *pSmaReq = mndBuildVCreateSmaReq(pMnode, pVgroup, pSma, &smaContLen); + if (pSmaReq == NULL) return -1; + pVgroup->pTsma = pSmaReq; int32_t contLen = 0; - void *pReq = mndBuildCreateVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen); + void *pReq = mndBuildCreateVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen, false); + taosMemoryFreeClear(pSmaReq); if (pReq == NULL) return -1; action.pCont = pReq; @@ -502,18 +508,19 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea streamObj.fixedSinkVgId = smaObj.dstVgId; int32_t code = -1; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_SMA, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to create sma:%s", pTrans->id, pCreate->name); - mndTransSetDbInfo(pTrans, pDb); + mndTransSetDbName(pTrans, pDb->name); + mndTransSetSerial(pTrans); if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER; if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER; if (mndSetCreateSmaCommitLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER; if (mndSetCreateSmaVgroupCommitLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER; if (mndSetCreateSmaRedoActions(pMnode, pTrans, pDb, &smaObj) != 0) goto _OVER; - if (mndSetCreateSmaVgroupRedoActions(pMnode, pTrans, pDb, &streamObj.fixedSinkVg) != 0) goto _OVER; + if (mndSetCreateSmaVgroupRedoActions(pMnode, pTrans, pDb, &streamObj.fixedSinkVg, &smaObj) != 0) goto _OVER; if (mndAddStreamToTrans(pMnode, &streamObj, pCreate->ast, STREAM_TRIGGER_AT_ONCE, 0, pTrans) != 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; @@ -746,11 +753,11 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p pVgroup = mndAcquireVgroup(pMnode, pSma->dstVgId); if (pVgroup == NULL) goto _OVER; - pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_SMA, pReq); + pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name); - mndTransSetDbInfo(pTrans, pDb); + mndTransSetDbName(pTrans, pDb->name); if (mndSetDropSmaRedoLogs(pMnode, pTrans, pSma) != 0) goto _OVER; if (mndSetDropSmaVgroupRedoLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndSnode.c b/source/dnode/mnode/impl/src/mndSnode.c index 87b61f59ecb088692941a9f57ebf89db2cefa054..c6acb4fef4a09ef78c561178f11428cb3004b4f3 100644 --- a/source/dnode/mnode/impl/src/mndSnode.c +++ b/source/dnode/mnode/impl/src/mndSnode.c @@ -253,7 +253,7 @@ static int32_t mndCreateSnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, snodeObj.createdTime = taosGetTimestampMs(); snodeObj.updateTime = snodeObj.createdTime; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_SNODE, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to create snode:%d", pTrans->id, pCreate->dnodeId); @@ -372,7 +372,7 @@ static int32_t mndSetDropSnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SSn static int32_t mndDropSnode(SMnode *pMnode, SRpcMsg *pReq, SSnodeObj *pObj) { int32_t code = -1; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_DROP_SNODE, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop snode:%d", pTrans->id, pObj->id); diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index b33c09a0f9d0a4740a3b0b9ce9fb06dd5ea878ae..556837f397e0d9c09546d5fa9400654b44f39401 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -397,13 +397,13 @@ static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pSt req.pRSmaParam.xFilesFactor = pStb->xFilesFactor; req.pRSmaParam.delay = pStb->delay; if (pStb->ast1Len > 0) { - if (mndConvertRSmaTask(pStb->pAst1, pStb->uid, 0, 0, &req.pRSmaParam.qmsg1, &req.pRSmaParam.qmsg1Len) != + if (mndConvertRSmaTask(pStb->pAst1, pStb->uid, 0, 0, &req.pRSmaParam.qmsg1, &req.pRSmaParam.qmsg1Len, req.pRSmaParam.xFilesFactor) != TSDB_CODE_SUCCESS) { return NULL; } } if (pStb->ast2Len > 0) { - if (mndConvertRSmaTask(pStb->pAst2, pStb->uid, 0, 0, &req.pRSmaParam.qmsg2, &req.pRSmaParam.qmsg2Len) != + if (mndConvertRSmaTask(pStb->pAst2, pStb->uid, 0, 0, &req.pRSmaParam.qmsg2, &req.pRSmaParam.qmsg2Len, req.pRSmaParam.xFilesFactor) != TSDB_CODE_SUCCESS) { return NULL; } @@ -735,7 +735,7 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea int32_t code = -1; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_STB, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to create stb:%s", pTrans->id, pCreate->name); @@ -754,7 +754,7 @@ _OVER: } int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) { - mndTransSetDbInfo(pTrans, pDb); + mndTransSetDbName(pTrans, pDb->name); if (mndSetCreateStbRedoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1; if (mndSetCreateStbUndoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1; if (mndSetCreateStbCommitLogs(pMnode, pTrans, pDb, pStb) != 0) return -1; @@ -1207,13 +1207,125 @@ static int32_t mndSetAlterStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj return 0; } + +static int32_t mndBuildStbSchemaImp(SDbObj *pDb, SStbObj *pStb, const char *tbName, STableMetaRsp *pRsp) { + taosRLockLatch(&pStb->lock); + + int32_t totalCols = pStb->numOfColumns + pStb->numOfTags; + pRsp->pSchemas = taosMemoryCalloc(totalCols, sizeof(SSchema)); + if (pRsp->pSchemas == NULL) { + taosRUnLockLatch(&pStb->lock); + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + + strcpy(pRsp->dbFName, pStb->db); + strcpy(pRsp->tbName, tbName); + strcpy(pRsp->stbName, tbName); + pRsp->dbId = pDb->uid; + pRsp->numOfTags = pStb->numOfTags; + pRsp->numOfColumns = pStb->numOfColumns; + pRsp->precision = pDb->cfg.precision; + pRsp->tableType = TSDB_SUPER_TABLE; + pRsp->sversion = pStb->colVer; + pRsp->tversion = pStb->tagVer; + pRsp->suid = pStb->uid; + pRsp->tuid = pStb->uid; + + for (int32_t i = 0; i < pStb->numOfColumns; ++i) { + SSchema *pSchema = &pRsp->pSchemas[i]; + SSchema *pSrcSchema = &pStb->pColumns[i]; + memcpy(pSchema->name, pSrcSchema->name, TSDB_COL_NAME_LEN); + pSchema->type = pSrcSchema->type; + pSchema->colId = pSrcSchema->colId; + pSchema->bytes = pSrcSchema->bytes; + } + + for (int32_t i = 0; i < pStb->numOfTags; ++i) { + SSchema *pSchema = &pRsp->pSchemas[i + pStb->numOfColumns]; + SSchema *pSrcSchema = &pStb->pTags[i]; + memcpy(pSchema->name, pSrcSchema->name, TSDB_COL_NAME_LEN); + pSchema->type = pSrcSchema->type; + pSchema->colId = pSrcSchema->colId; + pSchema->bytes = pSrcSchema->bytes; + } + + taosRUnLockLatch(&pStb->lock); + return 0; +} + +static int32_t mndBuildStbSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp) { + char tbFName[TSDB_TABLE_FNAME_LEN] = {0}; + snprintf(tbFName, sizeof(tbFName), "%s.%s", dbFName, tbName); + + SDbObj *pDb = mndAcquireDb(pMnode, dbFName); + if (pDb == NULL) { + terrno = TSDB_CODE_MND_DB_NOT_SELECTED; + return -1; + } + + SStbObj *pStb = mndAcquireStb(pMnode, tbFName); + if (pStb == NULL) { + mndReleaseDb(pMnode, pDb); + terrno = TSDB_CODE_MND_INVALID_STB; + return -1; + } + + int32_t code = mndBuildStbSchemaImp(pDb, pStb, tbName, pRsp); + mndReleaseDb(pMnode, pDb); + mndReleaseStb(pMnode, pStb); + return code; +} + + +static int32_t mndBuildSMAlterStbRsp(SDbObj *pDb, const SMAlterStbReq *pAlter, SStbObj *pObj, void **pCont, int32_t *pLen) { + int ret; + SEncoder ec = {0}; + uint32_t contLen = 0; + SMAlterStbRsp alterRsp = {0}; + SName name = {0}; + tNameFromString(&name, pAlter->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + + alterRsp.pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp)); + if (NULL == alterRsp.pMeta) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + + ret = mndBuildStbSchemaImp(pDb, pObj, name.tname, alterRsp.pMeta); + if (ret) { + tFreeSMAlterStbRsp(&alterRsp); + return ret; + } + + tEncodeSize(tEncodeSMAlterStbRsp, &alterRsp, contLen, ret); + if (ret) { + tFreeSMAlterStbRsp(&alterRsp); + return ret; + } + + void* cont = taosMemoryMalloc(contLen); + tEncoderInit(&ec, cont, contLen); + tEncodeSMAlterStbRsp(&ec, &alterRsp); + tEncoderClear(&ec); + + tFreeSMAlterStbRsp(&alterRsp); + + *pCont = cont; + *pLen = contLen; + + return 0; +} + static int32_t mndAlterStb(SMnode *pMnode, SRpcMsg *pReq, const SMAlterStbReq *pAlter, SDbObj *pDb, SStbObj *pOld) { + bool needRsp = true; SStbObj stbObj = {0}; taosRLockLatch(&pOld->lock); memcpy(&stbObj, pOld, sizeof(SStbObj)); stbObj.pColumns = NULL; stbObj.pTags = NULL; stbObj.updateTime = taosGetTimestampMs(); + stbObj.lock = 0; taosRUnLockLatch(&pOld->lock); int32_t code = -1; @@ -1247,9 +1359,11 @@ static int32_t mndAlterStb(SMnode *pMnode, SRpcMsg *pReq, const SMAlterStbReq *p code = mndAlterStbColumnBytes(pOld, &stbObj, pField0); break; case TSDB_ALTER_TABLE_UPDATE_OPTIONS: + needRsp = false; code = mndUpdateStbCommentAndTTL(pOld, &stbObj, pAlter->comment, pAlter->commentLen, pAlter->ttl); break; default: + needRsp = false; terrno = TSDB_CODE_OPS_NOT_SUPPORT; break; } @@ -1257,12 +1371,19 @@ static int32_t mndAlterStb(SMnode *pMnode, SRpcMsg *pReq, const SMAlterStbReq *p if (code != 0) goto _OVER; code = -1; - pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_ALTER_STB, pReq); + pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to alter stb:%s", pTrans->id, pAlter->name); - mndTransSetDbInfo(pTrans, pDb); + mndTransSetDbName(pTrans, pDb->name); + if (needRsp) { + void* pCont = NULL; + int32_t contLen = 0; + if (mndBuildSMAlterStbRsp(pDb, pAlter, &stbObj, &pCont, &contLen)) goto _OVER; + mndTransSetRpcRsp(pTrans, pCont, contLen); + } + if (mndSetAlterStbRedoLogs(pMnode, pTrans, pDb, &stbObj) != 0) goto _OVER; if (mndSetAlterStbCommitLogs(pMnode, pTrans, pDb, &stbObj) != 0) goto _OVER; if (mndSetAlterStbRedoActions(pMnode, pTrans, pDb, &stbObj) != 0) goto _OVER; @@ -1403,11 +1524,11 @@ static int32_t mndSetDropStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj * static int32_t mndDropStb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb) { int32_t code = -1; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_STB, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq); if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop stb:%s", pTrans->id, pStb->name); - mndTransSetDbInfo(pTrans, pDb); + mndTransSetDbName(pTrans, pDb->name); if (mndSetDropStbRedoLogs(pMnode, pTrans, pStb) != 0) goto _OVER; if (mndSetDropStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER; @@ -1483,75 +1604,6 @@ static int32_t mndProcessVDropStbRsp(SRpcMsg *pRsp) { return 0; } -static int32_t mndBuildStbSchemaImp(SDbObj *pDb, SStbObj *pStb, const char *tbName, STableMetaRsp *pRsp) { - taosRLockLatch(&pStb->lock); - - int32_t totalCols = pStb->numOfColumns + pStb->numOfTags; - pRsp->pSchemas = taosMemoryCalloc(totalCols, sizeof(SSchema)); - if (pRsp->pSchemas == NULL) { - taosRUnLockLatch(&pStb->lock); - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - - strcpy(pRsp->dbFName, pStb->db); - strcpy(pRsp->tbName, tbName); - strcpy(pRsp->stbName, tbName); - pRsp->dbId = pDb->uid; - pRsp->numOfTags = pStb->numOfTags; - pRsp->numOfColumns = pStb->numOfColumns; - pRsp->precision = pDb->cfg.precision; - pRsp->tableType = TSDB_SUPER_TABLE; - pRsp->sversion = pStb->colVer; - pRsp->tversion = pStb->tagVer; - pRsp->suid = pStb->uid; - pRsp->tuid = pStb->uid; - - for (int32_t i = 0; i < pStb->numOfColumns; ++i) { - SSchema *pSchema = &pRsp->pSchemas[i]; - SSchema *pSrcSchema = &pStb->pColumns[i]; - memcpy(pSchema->name, pSrcSchema->name, TSDB_COL_NAME_LEN); - pSchema->type = pSrcSchema->type; - pSchema->colId = pSrcSchema->colId; - pSchema->bytes = pSrcSchema->bytes; - } - - for (int32_t i = 0; i < pStb->numOfTags; ++i) { - SSchema *pSchema = &pRsp->pSchemas[i + pStb->numOfColumns]; - SSchema *pSrcSchema = &pStb->pTags[i]; - memcpy(pSchema->name, pSrcSchema->name, TSDB_COL_NAME_LEN); - pSchema->type = pSrcSchema->type; - pSchema->colId = pSrcSchema->colId; - pSchema->bytes = pSrcSchema->bytes; - } - - taosRUnLockLatch(&pStb->lock); - return 0; -} - -static int32_t mndBuildStbSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp) { - char tbFName[TSDB_TABLE_FNAME_LEN] = {0}; - snprintf(tbFName, sizeof(tbFName), "%s.%s", dbFName, tbName); - - SDbObj *pDb = mndAcquireDb(pMnode, dbFName); - if (pDb == NULL) { - terrno = TSDB_CODE_MND_DB_NOT_SELECTED; - return -1; - } - - SStbObj *pStb = mndAcquireStb(pMnode, tbFName); - if (pStb == NULL) { - mndReleaseDb(pMnode, pDb); - terrno = TSDB_CODE_MND_INVALID_STB; - return -1; - } - - int32_t code = mndBuildStbSchemaImp(pDb, pStb, tbName, pRsp); - mndReleaseDb(pMnode, pDb); - mndReleaseStb(pMnode, pStb); - return code; -} - static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; int32_t code = -1; @@ -1597,7 +1649,7 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) { pReq->info.rspLen = rspLen; code = 0; - mDebug("stb:%s.%s, meta is retrieved", infoReq.dbFName, infoReq.tbName); + mTrace("%s.%s, meta is retrieved", infoReq.dbFName, infoReq.tbName); _OVER: if (code != 0) { diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index cbef1facdcd5c1a680c90b3f11936316e12a2a4f..5ee5b06a578f7c31ab18f66f2de1cdef2aa85a04 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -393,7 +393,16 @@ static int32_t mndCreateStream(SMnode *pMnode, SRpcMsg *pReq, SCMCreateStreamReq streamObj.trigger = pCreate->triggerType; streamObj.waterMark = pCreate->watermark; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_STREAM, pReq); + if (streamObj.targetSTbName[0]) { + pDb = mndAcquireDbByStb(pMnode, streamObj.targetSTbName); + if (pDb == NULL) { + terrno = TSDB_CODE_MND_DB_NOT_SELECTED; + return -1; + } + tstrncpy(streamObj.targetDb, pDb->name, TSDB_DB_FNAME_LEN); + } + + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) { mError("stream:%s, failed to create since %s", pCreate->name, terrstr()); return -1; diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 0ece5d29e525a649e8a02b0890eb7ae951008f7b..c7f8415b65db611500d3df1907405a1d07b4b3c2 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -42,6 +42,7 @@ static int32_t mndSubActionDelete(SSdb *pSdb, SMqSubscribeObj *); static int32_t mndSubActionUpdate(SSdb *pSdb, SMqSubscribeObj *pOldSub, SMqSubscribeObj *pNewSub); static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg); +static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg); static int32_t mndProcessSubscribeInternalRsp(SRpcMsg *pMsg); static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); @@ -75,6 +76,9 @@ int32_t mndInitSubscribe(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_VND_MQ_VG_CHANGE_RSP, mndProcessSubscribeInternalRsp); mndSetMsgHandle(pMnode, TDMT_VND_MQ_VG_DELETE_RSP, mndProcessSubscribeInternalRsp); mndSetMsgHandle(pMnode, TDMT_MND_MQ_DO_REBALANCE, mndProcessRebalanceReq); + mndSetMsgHandle(pMnode, TDMT_MND_MQ_DO_REBALANCE, mndProcessRebalanceReq); + mndSetMsgHandle(pMnode, TDMT_MND_MQ_DROP_CGROUP, mndProcessDropCgroupReq); + mndSetMsgHandle(pMnode, TDMT_MND_MQ_DROP_CGROUP_RSP, mndProcessSubscribeInternalRsp); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_SUBSCRIPTIONS, mndRetrieveSubscribe); mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_TOPICS, mndCancelGetNextSubscribe); @@ -89,10 +93,8 @@ static SMqSubscribeObj *mndCreateSub(SMnode *pMnode, const SMqTopicObj *pTopic, return NULL; } pSub->dbUid = pTopic->dbUid; + pSub->stbUid = pTopic->stbUid; pSub->subType = pTopic->subType; - pSub->withTbName = pTopic->withTbName; - pSub->withSchema = pTopic->withSchema; - pSub->withTag = pTopic->withTag; ASSERT(pSub->unassignedVgs->size == 0); ASSERT(taosHashGetSize(pSub->consumerHash) == 0); @@ -117,9 +119,7 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri req.vgId = pRebVg->pVgEp->vgId; req.qmsg = pRebVg->pVgEp->qmsg; req.subType = pSub->subType; - req.withTbName = pSub->withTbName; - req.withSchema = pSub->withSchema; - req.withTag = pSub->withTag; + req.suid = pSub->stbUid; strncpy(req.subKey, pSub->key, TSDB_SUBSCRIBE_KEY_LEN); int32_t tlen = sizeof(SMsgHead) + tEncodeSMqRebVgReq(NULL, &req); @@ -154,6 +154,7 @@ static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, const SM int32_t vgId = pRebVg->pVgEp->vgId; SVgObj *pVgObj = mndAcquireVgroup(pMnode, vgId); if (pVgObj == NULL) { + ASSERT(0); taosMemoryFree(buf); return -1; } @@ -389,8 +390,8 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR mInfo("rebalance calculation completed, rebalanced vg:"); for (int32_t i = 0; i < taosArrayGetSize(pOutput->rebVgs); i++) { SMqRebOutputVg *pOutputRebVg = taosArrayGet(pOutput->rebVgs, i); - mInfo("vg: %d moved from consumer %ld to consumer %ld", pOutputRebVg->pVgEp->vgId, pOutputRebVg->oldConsumerId, - pOutputRebVg->newConsumerId); + mInfo("vgId:%d, moved from consumer %" PRId64 " to consumer %" PRId64, pOutputRebVg->pVgEp->vgId, + pOutputRebVg->oldConsumerId, pOutputRebVg->newConsumerId); } // 9. clear @@ -400,10 +401,9 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR } static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOutputObj *pOutput) { - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_REBALANCE, pMsg); - if (pTrans == NULL) { - return -1; - } + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg); + if (pTrans == NULL) return -1; + // make txn: // 1. redo action: action to all vg const SArray *rebVgs = pOutput->rebVgs; @@ -448,6 +448,7 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu taosArrayPush(pConsumerNew->rebNewTopics, &topic); mndReleaseConsumer(pMnode, pConsumerOld); if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { + ASSERT(0); goto REB_FAIL; } } @@ -466,9 +467,11 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu taosArrayPush(pConsumerNew->rebRemovedTopics, &topic); mndReleaseConsumer(pMnode, pConsumerOld); if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { + ASSERT(0); goto REB_FAIL; } } +#if 0 if (consumerNum) { char topic[TSDB_TOPIC_FNAME_LEN]; char cgroup[TSDB_CGROUP_LEN]; @@ -483,17 +486,24 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu pTopic->refConsumerCnt = topicObj.refConsumerCnt; mInfo("subscribe topic %s unref %d consumer cgroup %s, refcnt %d", pTopic->name, consumerNum, cgroup, topicObj.refConsumerCnt); - if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) goto REB_FAIL; + if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) { + ASSERT(0); + goto REB_FAIL; + } } } +#endif // 4. TODO commit log: modification log // 5. set cb - mndTransSetCb(pTrans, MQ_REB_TRANS_START_FUNC, MQ_REB_TRANS_STOP_FUNC, NULL, 0); + mndTransSetCb(pTrans, TRANS_START_FUNC_MQ_REB, TRANS_STOP_FUNC_MQ_REB, NULL, 0); // 6. execution - if (mndTransPrepare(pMnode, pTrans) != 0) goto REB_FAIL; + if (mndTransPrepare(pMnode, pTrans) != 0) { + ASSERT(0); + goto REB_FAIL; + } mndTransDrop(pTrans); return 0; @@ -581,6 +591,63 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { return 0; } +static int32_t mndProcessDropCgroupReq(SRpcMsg *pReq) { + SMnode *pMnode = pReq->info.node; + SSdb *pSdb = pMnode->pSdb; + SMDropCgroupReq dropReq = {0}; + + if (tDeserializeSMDropCgroupReq(pReq->pCont, pReq->contLen, &dropReq) != 0) { + terrno = TSDB_CODE_INVALID_MSG; + return -1; + } + + SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, dropReq.cgroup, dropReq.topic); + if (pSub == NULL) { + if (dropReq.igNotExists) { + mDebug("cgroup:%s on topic:%s, not exist, ignore not exist is set", dropReq.cgroup, dropReq.topic); + return 0; + } else { + terrno = TSDB_CODE_MND_SUBSCRIBE_NOT_EXIST; + mError("topic:%s, cgroup:%s, failed to drop since %s", dropReq.topic, dropReq.cgroup, terrstr()); + return -1; + } + } + + if (taosHashGetSize(pSub->consumerHash) != 0) { + terrno = TSDB_CODE_MND_CGROUP_USED; + mError("cgroup:%s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); + mndReleaseSubscribe(pMnode, pSub); + return -1; + } + + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); + if (pTrans == NULL) { + mError("cgroup: %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); + mndReleaseSubscribe(pMnode, pSub); + return -1; + } + + mDebug("trans:%d, used to drop cgroup:%s on topic %s", pTrans->id, dropReq.cgroup, dropReq.topic); + + if (mndDropOffsetBySubKey(pMnode, pTrans, pSub->key) < 0) { + ASSERT(0); + mndReleaseSubscribe(pMnode, pSub); + return -1; + } + + if (mndSetDropSubCommitLogs(pMnode, pTrans, pSub) < 0) { + mError("cgroup %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); + mndReleaseSubscribe(pMnode, pSub); + return -1; + } + + mndTransPrepare(pMnode, pTrans); + + mndReleaseSubscribe(pMnode, pSub); + + return TSDB_CODE_ACTION_IN_PROGRESS; +} + void mndCleanupSubscribe(SMnode *pMnode) {} static SSdbRaw *mndSubActionEncode(SMqSubscribeObj *pSub) { @@ -735,7 +802,7 @@ static int32_t mndSetDropSubRedoLogs(SMnode *pMnode, STrans *pTrans, SMqSubscrib return 0; } -static int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub) { +int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub) { SSdbRaw *pCommitRaw = mndSubActionEncode(pSub); if (pCommitRaw == NULL) return -1; if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) return -1; @@ -886,7 +953,7 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock } // do not show for cleared subscription -#if 0 +#if 1 int32_t sz = taosArrayGetSize(pSub->unassignedVgs); for (int32_t i = 0; i < sz; i++) { SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i); diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index f34ab28cce4fa51a7f9596ffada04971d7e3c5d6..245f0938b906300af29bf3f6caf71c834877eaa1 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -17,27 +17,41 @@ #include "mndSync.h" #include "mndTrans.h" -int32_t mndSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { +int32_t mndSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { SMsgHead *pHead = pMsg->pCont; pHead->contLen = htonl(pHead->contLen); pHead->vgId = htonl(pHead->vgId); - return tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg); + return tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg); } int32_t mndSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { return tmsgSendReq(pEpSet, pMsg); } void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) { - SMnode *pMnode = pFsm->data; - SSdbRaw *pRaw = pMsg->pCont; - - mTrace("raw:%p, apply to sdb, ver:%" PRId64 " term:%" PRId64 " role:%s", pRaw, cbMeta.index, cbMeta.term, - syncStr(cbMeta.state)); - sdbWriteWithoutFree(pMnode->pSdb, pRaw); - sdbSetApplyIndex(pMnode->pSdb, cbMeta.index); - sdbSetApplyTerm(pMnode->pSdb, cbMeta.term); - if (cbMeta.state == TAOS_SYNC_STATE_LEADER) { - tsem_post(&pMnode->syncMgmt.syncSem); + SMnode *pMnode = pFsm->data; + SSyncMgmt *pMgmt = &pMnode->syncMgmt; + SSdbRaw *pRaw = pMsg->pCont; + + int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw); + pMgmt->errCode = cbMeta.code; + mTrace("trans:%d, is proposed, savedTransId:%d code:0x%x, ver:%" PRId64 " term:%" PRId64 " role:%s raw:%p", transId, + pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, syncStr(cbMeta.state), pRaw); + + if (pMgmt->errCode == 0) { + sdbWriteWithoutFree(pMnode->pSdb, pRaw); + sdbSetApplyIndex(pMnode->pSdb, cbMeta.index); + sdbSetApplyTerm(pMnode->pSdb, cbMeta.term); + } + + if (pMgmt->transId == transId) { + if (pMgmt->errCode != 0) { + mError("trans:%d, failed to propose since %s", transId, tstrerror(pMgmt->errCode)); + } + tsem_post(&pMgmt->syncSem); + } else { + if (cbMeta.index - sdbGetApplyIndex(pMnode->pSdb) > 100) { + sdbWriteFile(pMnode->pSdb); + } } } @@ -51,54 +65,79 @@ int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) { void mndRestoreFinish(struct SSyncFSM *pFsm) { SMnode *pMnode = pFsm->data; if (!pMnode->deploy) { - mInfo("mnode sync restore finished"); + mInfo("mnode sync restore finished, and will handle outstanding transactions"); mndTransPullup(pMnode); - pMnode->syncMgmt.restored = true; + mndSetRestore(pMnode, true); + } else { + mInfo("mnode sync restore finished, and will set ready after first deploy"); } } -int32_t mndSnapshotRead(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, void** ppIter, char** ppBuf, int32_t* len) { - /* - SMnode *pMnode = pFsm->data; - SSdbIter *pIter; - if (iter == NULL) { - pIter = sdbIterInit(pMnode->sdb) - } else { - pIter = iter; +void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) { + SMnode *pMnode = pFsm->data; + SSyncMgmt *pMgmt = &pMnode->syncMgmt; + + pMgmt->errCode = cbMeta.code; + mInfo("trans:-1, sync reconfig is proposed, savedTransId:%d code:0x%x, curTerm:%" PRId64 " term:%" PRId64, + pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term); + + if (pMgmt->transId == -1) { + if (pMgmt->errCode != 0) { + mError("trans:-1, failed to propose sync reconfig since %s", tstrerror(pMgmt->errCode)); + } + tsem_post(&pMgmt->syncSem); } - */ +} - return 0; +int32_t mndSnapshotStartRead(struct SSyncFSM *pFsm, void **ppReader) { + mInfo("start to read snapshot from sdb"); + SMnode *pMnode = pFsm->data; + return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader); } -int32_t mndSnapshotApply(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, char* pBuf, int32_t len) { +int32_t mndSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) { + mInfo("stop to read snapshot from sdb"); SMnode *pMnode = pFsm->data; - sdbWrite(pMnode->pSdb, (SSdbRaw*)pBuf); - return 0; + return sdbStopRead(pMnode->pSdb, pReader); } -void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) { - mInfo("mndReConfig cbMeta.code:%d, cbMeta.currentTerm:%" PRId64 ", cbMeta.term:%" PRId64 ", cbMeta.index:%" PRId64, - cbMeta.code, cbMeta.currentTerm, cbMeta.term, cbMeta.index); +int32_t mndSnapshotDoRead(struct SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) { SMnode *pMnode = pFsm->data; - pMnode->syncMgmt.errCode = cbMeta.code; - tsem_post(&pMnode->syncMgmt.syncSem); + return sdbDoRead(pMnode->pSdb, pReader, ppBuf, len); +} + +int32_t mndSnapshotStartWrite(struct SSyncFSM *pFsm, void **ppWriter) { + mInfo("start to apply snapshot to sdb"); + SMnode *pMnode = pFsm->data; + return sdbStartWrite(pMnode->pSdb, (SSdbIter **)ppWriter); +} + +int32_t mndSnapshotStopWrite(struct SSyncFSM *pFsm, void *pWriter, bool isApply) { + mInfo("stop to apply snapshot to sdb, apply:%d", isApply); + SMnode *pMnode = pFsm->data; + return sdbStopWrite(pMnode->pSdb, pWriter, isApply); +} + +int32_t mndSnapshotDoWrite(struct SSyncFSM *pFsm, void *pWriter, void *pBuf, int32_t len) { + SMnode *pMnode = pFsm->data; + return sdbDoWrite(pMnode->pSdb, pWriter, pBuf, len); } SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) { SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM)); pFsm->data = pMnode; - pFsm->FpCommitCb = mndSyncCommitMsg; pFsm->FpPreCommitCb = NULL; pFsm->FpRollBackCb = NULL; - - pFsm->FpGetSnapshot = mndSyncGetSnapshot; pFsm->FpRestoreFinishCb = mndRestoreFinish; - pFsm->FpSnapshotRead = mndSnapshotRead; - pFsm->FpSnapshotApply = mndSnapshotApply; pFsm->FpReConfigCb = mndReConfig; - + pFsm->FpGetSnapshot = mndSyncGetSnapshot; + pFsm->FpSnapshotStartRead = mndSnapshotStartRead; + pFsm->FpSnapshotStopRead = mndSnapshotStopRead; + pFsm->FpSnapshotDoRead = mndSnapshotDoRead; + pFsm->FpSnapshotStartWrite = mndSnapshotStartWrite; + pFsm->FpSnapshotStopWrite = mndSnapshotStopWrite; + pFsm->FpSnapshotDoWrite = mndSnapshotDoWrite; return pFsm; } @@ -132,8 +171,7 @@ int32_t mndInitSync(SMnode *pMnode) { SSyncCfg *pCfg = &syncInfo.syncCfg; pCfg->replicaNum = pMnode->replica; pCfg->myIndex = pMnode->selfIndex; - mInfo("start to open mnode sync, replica:%d myindex:%d standby:%d", pCfg->replicaNum, pCfg->myIndex, - pMgmt->standby); + mInfo("start to open mnode sync, replica:%d myindex:%d standby:%d", pCfg->replicaNum, pCfg->myIndex, pMgmt->standby); for (int32_t i = 0; i < pMnode->replica; ++i) { SNodeInfo *pNode = &pCfg->nodeInfo[i]; tstrncpy(pNode->nodeFqdn, pMnode->replicas[i].fqdn, sizeof(pNode->nodeFqdn)); @@ -165,15 +203,17 @@ void mndCleanupSync(SMnode *pMnode) { memset(pMgmt, 0, sizeof(SSyncMgmt)); } -int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw) { +int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) { SSyncMgmt *pMgmt = &pMnode->syncMgmt; - pMgmt->errCode = 0; - - SRpcMsg rsp = {.code = TDMT_MND_APPLY_MSG, .contLen = sdbGetRawTotalSize(pRaw)}; + SRpcMsg rsp = {.code = TDMT_MND_APPLY_MSG, .contLen = sdbGetRawTotalSize(pRaw)}; rsp.pCont = rpcMallocCont(rsp.contLen); if (rsp.pCont == NULL) return -1; memcpy(rsp.pCont, pRaw, rsp.contLen); + pMgmt->errCode = 0; + pMgmt->transId = transId; + mTrace("trans:%d, will be proposed", pMgmt->transId); + const bool isWeak = false; int32_t code = syncPropose(pMgmt->sync, &rsp, isWeak); if (code == 0) { @@ -187,7 +227,11 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw) { } rpcFreeCont(rsp.pCont); - if (code != 0) return code; + if (code != 0) { + mError("trans:%d, failed to propose, code:0x%x", pMgmt->transId, code); + return code; + } + return pMgmt->errCode; } @@ -195,23 +239,29 @@ void mndSyncStart(SMnode *pMnode) { SSyncMgmt *pMgmt = &pMnode->syncMgmt; syncSetMsgCb(pMgmt->sync, &pMnode->msgCb); - syncStart(pMgmt->sync); - -#if 0 if (pMgmt->standby) { syncStartStandBy(pMgmt->sync); } else { syncStart(pMgmt->sync); } -#endif - - mDebug("sync:%" PRId64 " is started", pMgmt->sync); + mDebug("mnode sync started, id:%" PRId64 " standby:%d", pMgmt->sync, pMgmt->standby); } void mndSyncStop(SMnode *pMnode) {} bool mndIsMaster(SMnode *pMnode) { SSyncMgmt *pMgmt = &pMnode->syncMgmt; + ESyncState state = syncGetMyRole(pMgmt->sync); - return (state == TAOS_SYNC_STATE_LEADER) && (pMnode->syncMgmt.restored); + if (state != TAOS_SYNC_STATE_LEADER) { + terrno = TSDB_CODE_SYN_NOT_LEADER; + return false; + } + + if (!pMnode->restored) { + terrno = TSDB_CODE_APP_NOT_READY; + return false; + } + + return true; } diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 2048c798475062055520fe25e0249f411615b81f..e0d565c9afb0fafe46c5ff3dd96a83ee9c76372d 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -15,6 +15,7 @@ #include "mndTopic.h" #include "mndAuth.h" +#include "mndConsumer.h" #include "mndDb.h" #include "mndDnode.h" #include "mndMnode.h" @@ -69,6 +70,56 @@ const char *mndTopicGetShowName(const char topic[TSDB_TOPIC_FNAME_LEN]) { return strchr(topic, '.') + 1; } +bool mndCheckColAndTagModifiable(SMnode *pMnode, int64_t suid, const SArray *colAndTagIds) { + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + bool found = false; + while (1) { + SMqTopicObj *pTopic = NULL; + pIter = sdbFetch(pSdb, SDB_TOPIC, pIter, (void **)&pTopic); + if (pIter == NULL) break; + if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) { + sdbRelease(pSdb, pTopic); + continue; + } + + SNode *pAst = NULL; + if (nodesStringToNode(pTopic->ast, &pAst) != 0) { + ASSERT(0); + return false; + } + + SHashObj *pColHash = NULL; + SNodeList *pNodeList; + nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList); + SNode *pNode = NULL; + FOREACH(pNode, pNodeList) { + SColumnNode *pCol = (SColumnNode *)pNode; + if (pCol->tableId != suid) goto NEXT; + if (pColHash == NULL) { + pColHash = taosHashInit(0, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + } + if (pCol->colId > 0) { + taosHashPut(pColHash, &pCol->colId, sizeof(int16_t), NULL, 0); + } + } + + for (int32_t i = 0; i < taosArrayGetSize(colAndTagIds); i++) { + int16_t *pColId = taosArrayGet(colAndTagIds, i); + if (taosHashGet(pColHash, pColId, sizeof(int16_t)) != NULL) { + found = true; + goto NEXT; + } + } + + NEXT: + sdbRelease(pSdb, pTopic); + nodesDestroyNode(pAst); + if (found) return false; + } + return true; +} + SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -95,11 +146,8 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) { SDB_SET_INT64(pRaw, dataPos, pTopic->dbUid, TOPIC_ENCODE_OVER); SDB_SET_INT32(pRaw, dataPos, pTopic->version, TOPIC_ENCODE_OVER); SDB_SET_INT8(pRaw, dataPos, pTopic->subType, TOPIC_ENCODE_OVER); - SDB_SET_INT8(pRaw, dataPos, pTopic->withTbName, TOPIC_ENCODE_OVER); - SDB_SET_INT8(pRaw, dataPos, pTopic->withSchema, TOPIC_ENCODE_OVER); - SDB_SET_INT8(pRaw, dataPos, pTopic->withTag, TOPIC_ENCODE_OVER); - SDB_SET_INT32(pRaw, dataPos, pTopic->consumerCnt, TOPIC_ENCODE_OVER); + SDB_SET_INT64(pRaw, dataPos, pTopic->stbUid, TOPIC_ENCODE_OVER); SDB_SET_INT32(pRaw, dataPos, pTopic->sqlLen, TOPIC_ENCODE_OVER); SDB_SET_BINARY(pRaw, dataPos, pTopic->sql, pTopic->sqlLen, TOPIC_ENCODE_OVER); SDB_SET_INT32(pRaw, dataPos, pTopic->astLen, TOPIC_ENCODE_OVER); @@ -121,8 +169,6 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) { SDB_SET_BINARY(pRaw, dataPos, swBuf, schemaLen, TOPIC_ENCODE_OVER); } - SDB_SET_INT32(pRaw, dataPos, pTopic->refConsumerCnt, TOPIC_ENCODE_OVER); - SDB_SET_RESERVE(pRaw, dataPos, MND_TOPIC_RESERVE_SIZE, TOPIC_ENCODE_OVER); SDB_SET_DATALEN(pRaw, dataPos, TOPIC_ENCODE_OVER); @@ -167,12 +213,8 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) { SDB_GET_INT64(pRaw, dataPos, &pTopic->dbUid, TOPIC_DECODE_OVER); SDB_GET_INT32(pRaw, dataPos, &pTopic->version, TOPIC_DECODE_OVER); SDB_GET_INT8(pRaw, dataPos, &pTopic->subType, TOPIC_DECODE_OVER); - SDB_GET_INT8(pRaw, dataPos, &pTopic->withTbName, TOPIC_DECODE_OVER); - SDB_GET_INT8(pRaw, dataPos, &pTopic->withSchema, TOPIC_DECODE_OVER); - SDB_GET_INT8(pRaw, dataPos, &pTopic->withTag, TOPIC_DECODE_OVER); - - SDB_GET_INT32(pRaw, dataPos, &pTopic->consumerCnt, TOPIC_DECODE_OVER); + SDB_GET_INT64(pRaw, dataPos, &pTopic->stbUid, TOPIC_DECODE_OVER); SDB_GET_INT32(pRaw, dataPos, &pTopic->sqlLen, TOPIC_DECODE_OVER); pTopic->sql = taosMemoryCalloc(pTopic->sqlLen, sizeof(char)); if (pTopic->sql == NULL) { @@ -221,8 +263,6 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) { pTopic->schema.pSchema = NULL; } - SDB_GET_INT32(pRaw, dataPos, &pTopic->refConsumerCnt, TOPIC_DECODE_OVER); - SDB_GET_RESERVE(pRaw, dataPos, MND_TOPIC_RESERVE_SIZE, TOPIC_DECODE_OVER); terrno = TSDB_CODE_SUCCESS; @@ -253,8 +293,6 @@ static int32_t mndTopicActionUpdate(SSdb *pSdb, SMqTopicObj *pOldTopic, SMqTopic atomic_exchange_64(&pOldTopic->updateTime, pNewTopic->updateTime); atomic_exchange_32(&pOldTopic->version, pNewTopic->version); - atomic_store_32(&pOldTopic->refConsumerCnt, pNewTopic->refConsumerCnt); - /*taosWLockLatch(&pOldTopic->lock);*/ // TODO handle update @@ -277,18 +315,6 @@ void mndReleaseTopic(SMnode *pMnode, SMqTopicObj *pTopic) { sdbRelease(pSdb, pTopic); } -#if 0 -static SDbObj *mndAcquireDbByTopic(SMnode *pMnode, char *topicName) { - SName name = {0}; - tNameFromString(&name, topicName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); - - char db[TSDB_TOPIC_FNAME_LEN] = {0}; - tNameGetFullDbName(&name, db); - - return mndAcquireDb(pMnode, db); -} -#endif - static SDDropTopicReq *mndBuildDropTopicMsg(SMnode *pMnode, SVgObj *pVgroup, SMqTopicObj *pTopic) { int32_t contLen = sizeof(SDDropTopicReq); @@ -307,11 +333,19 @@ static SDDropTopicReq *mndBuildDropTopicMsg(SMnode *pMnode, SVgObj *pVgroup, SMq } static int32_t mndCheckCreateTopicReq(SCMCreateTopicReq *pCreate) { - if (pCreate->name[0] == 0 || pCreate->sql == NULL || pCreate->sql[0] == 0 || pCreate->subscribeDbName[0] == 0) { - terrno = TSDB_CODE_MND_INVALID_TOPIC; - return -1; + terrno = TSDB_CODE_MND_INVALID_TOPIC; + + if (pCreate->sql == NULL) return -1; + + if (pCreate->subType == TOPIC_SUB_TYPE__COLUMN) { + if (pCreate->ast == NULL || pCreate->ast[0] == 0) return -1; + } else if (pCreate->subType == TOPIC_SUB_TYPE__TABLE) { + if (pCreate->subStbName[0] == 0) return -1; + } else if (pCreate->subType == TOPIC_SUB_TYPE__DB) { + if (pCreate->subDbName[0] == 0) return -1; } + terrno = TSDB_CODE_SUCCESS; return 0; } @@ -327,14 +361,11 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * topicObj.version = 1; topicObj.sql = strdup(pCreate->sql); topicObj.sqlLen = strlen(pCreate->sql) + 1; - topicObj.refConsumerCnt = 0; + topicObj.subType = pCreate->subType; - if (pCreate->ast && pCreate->ast[0]) { + if (pCreate->subType == TOPIC_SUB_TYPE__COLUMN) { topicObj.ast = strdup(pCreate->ast); topicObj.astLen = strlen(pCreate->ast) + 1; - topicObj.subType = TOPIC_SUB_TYPE__TABLE; - topicObj.withTbName = pCreate->withTbName; - topicObj.withSchema = pCreate->withSchema; SNode *pAst = NULL; if (nodesStringToNode(pCreate->ast, &pAst) != 0) { @@ -367,16 +398,18 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * taosMemoryFree(topicObj.sql); return -1; } - } else { - topicObj.ast = NULL; - topicObj.astLen = 0; - topicObj.physicalPlan = NULL; - topicObj.subType = TOPIC_SUB_TYPE__DB; - topicObj.withTbName = 1; - topicObj.withSchema = 1; - } - - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_TOPIC, pReq); + } else if (pCreate->subType == TOPIC_SUB_TYPE__TABLE) { + SStbObj *pStb = mndAcquireStb(pMnode, pCreate->subStbName); + topicObj.stbUid = pStb->uid; + } + /*} else if (pCreate->subType == TOPIC_SUB_TYPE__DB) {*/ + /*topicObj.ast = NULL;*/ + /*topicObj.astLen = 0;*/ + /*topicObj.physicalPlan = NULL;*/ + /*topicObj.withTbName = 1;*/ + /*topicObj.withSchema = 1;*/ + + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) { mError("topic:%s, failed to create since %s", pCreate->name, terrstr()); taosMemoryFreeClear(topicObj.ast); @@ -441,7 +474,7 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { goto CREATE_TOPIC_OVER; } - pDb = mndAcquireDb(pMnode, createTopicReq.subscribeDbName); + pDb = mndAcquireDb(pMnode, createTopicReq.subDbName); if (pDb == NULL) { terrno = TSDB_CODE_MND_DB_NOT_SELECTED; goto CREATE_TOPIC_OVER; @@ -492,8 +525,8 @@ static int32_t mndDropTopic(SMnode *pMnode, STrans *pTrans, SRpcMsg *pReq, SMqTo } static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { - SMnode *pMnode = pReq->info.node; - /*SSdb *pSdb = pMnode->pSdb;*/ + SMnode *pMnode = pReq->info.node; + SSdb *pSdb = pMnode->pSdb; SMDropTopicReq dropReq = {0}; if (tDeserializeSMDropTopicReq(pReq->pCont, pReq->contLen, &dropReq) != 0) { @@ -513,14 +546,38 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { } } + void *pIter = NULL; + SMqConsumerObj *pConsumer; + while (1) { + pIter = sdbFetch(pSdb, SDB_CONSUMER, pIter, (void **)&pConsumer); + if (pIter == NULL) break; + + if (pConsumer->status == MQ_CONSUMER_STATUS__LOST_REBD) continue; + int32_t sz = taosArrayGetSize(pConsumer->assignedTopics); + for (int32_t i = 0; i < sz; i++) { + char *name = taosArrayGetP(pConsumer->assignedTopics, i); + if (strcmp(name, pTopic->name) == 0) { + mndReleaseConsumer(pMnode, pConsumer); + mndReleaseTopic(pMnode, pTopic); + terrno = TSDB_CODE_MND_TOPIC_SUBSCRIBED; + mError("topic:%s, failed to drop since subscribed by consumer %ld from cgroup %s", dropReq.name, + pConsumer->consumerId, pConsumer->cgroup); + return -1; + } + } + sdbRelease(pSdb, pConsumer); + } + +#if 0 if (pTopic->refConsumerCnt != 0) { mndReleaseTopic(pMnode, pTopic); terrno = TSDB_CODE_MND_TOPIC_SUBSCRIBED; mError("topic:%s, failed to drop since %s", dropReq.name, terrstr()); return -1; } +#endif - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_TOPIC, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) { mError("topic:%s, failed to drop since %s", pTopic->name, terrstr()); return -1; diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index a7480f459aa5c03c5c5550b4d22767e0e5badc5a..bbee59090d1600693478d38fec9ff47082bcc032 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -37,19 +37,18 @@ static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction); static void mndTransDropLogs(SArray *pArray); static void mndTransDropActions(SArray *pArray); static void mndTransDropData(STrans *pTrans); -static int32_t mndTransExecuteLogs(SMnode *pMnode, SArray *pArray); static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray); static int32_t mndTransExecuteRedoLogs(SMnode *pMnode, STrans *pTrans); static int32_t mndTransExecuteUndoLogs(SMnode *pMnode, STrans *pTrans); static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans); static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans); -static int32_t mndTransExecuteCommitLogs(SMnode *pMnode, STrans *pTrans); +static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans); -static bool mndTransPerformCommitLogStage(SMnode *pMnode, STrans *pTrans); +static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans); @@ -83,40 +82,30 @@ int32_t mndInitTrans(SMnode *pMnode) { void mndCleanupTrans(SMnode *pMnode) {} -static SSdbRaw *mndTransActionEncode(STrans *pTrans) { - terrno = TSDB_CODE_OUT_OF_MEMORY; +static int32_t mndTransGetActionsSize(SArray *pArray) { + int32_t actionNum = taosArrayGetSize(pArray); + int32_t rawDataLen = 0; - int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE; - int32_t redoLogNum = taosArrayGetSize(pTrans->redoLogs); - int32_t undoLogNum = taosArrayGetSize(pTrans->undoLogs); - int32_t commitLogNum = taosArrayGetSize(pTrans->commitLogs); - int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions); - int32_t undoActionNum = taosArrayGetSize(pTrans->undoActions); - - for (int32_t i = 0; i < redoLogNum; ++i) { - SSdbRaw *pTmp = taosArrayGetP(pTrans->redoLogs, i); - rawDataLen += (sdbGetRawTotalSize(pTmp) + sizeof(int32_t)); - } - - for (int32_t i = 0; i < undoLogNum; ++i) { - SSdbRaw *pTmp = taosArrayGetP(pTrans->undoLogs, i); - rawDataLen += (sdbGetRawTotalSize(pTmp) + sizeof(int32_t)); + for (int32_t i = 0; i < actionNum; ++i) { + STransAction *pAction = taosArrayGet(pArray, i); + if (pAction->actionType) { + rawDataLen += (sdbGetRawTotalSize(pAction->pRaw) + sizeof(int32_t)); + } else { + rawDataLen += (sizeof(STransAction) + pAction->contLen); + } + rawDataLen += sizeof(pAction->actionType); } - for (int32_t i = 0; i < commitLogNum; ++i) { - SSdbRaw *pTmp = taosArrayGetP(pTrans->commitLogs, i); - rawDataLen += (sdbGetRawTotalSize(pTmp) + sizeof(int32_t)); - } + return rawDataLen; +} - for (int32_t i = 0; i < redoActionNum; ++i) { - STransAction *pAction = taosArrayGet(pTrans->redoActions, i); - rawDataLen += (sizeof(STransAction) + pAction->contLen); - } +static SSdbRaw *mndTransActionEncode(STrans *pTrans) { + terrno = TSDB_CODE_OUT_OF_MEMORY; - for (int32_t i = 0; i < undoActionNum; ++i) { - STransAction *pAction = taosArrayGet(pTrans->undoActions, i); - rawDataLen += (sizeof(STransAction) + pAction->contLen); - } + int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE; + rawDataLen += mndTransGetActionsSize(pTrans->redoActions); + rawDataLen += mndTransGetActionsSize(pTrans->undoActions); + rawDataLen += mndTransGetActionsSize(pTrans->commitActions); SSdbRaw *pRaw = sdbAllocRaw(SDB_TRANS, TRANS_VER_NUMBER, rawDataLen); if (pRaw == NULL) { @@ -126,66 +115,85 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) { int32_t dataPos = 0; SDB_SET_INT32(pRaw, dataPos, pTrans->id, _OVER) - - ETrnStage stage = pTrans->stage; - if (stage == TRN_STAGE_REDO_LOG || stage == TRN_STAGE_REDO_ACTION) { - stage = TRN_STAGE_PREPARE; - } else if (stage == TRN_STAGE_UNDO_ACTION || stage == TRN_STAGE_UNDO_LOG) { - stage = TRN_STAGE_ROLLBACK; - } else if (stage == TRN_STAGE_COMMIT_LOG || stage == TRN_STAGE_FINISHED) { - stage = TRN_STAGE_COMMIT; - } else { - } - - SDB_SET_INT16(pRaw, dataPos, stage, _OVER) + SDB_SET_INT16(pRaw, dataPos, pTrans->stage, _OVER) SDB_SET_INT16(pRaw, dataPos, pTrans->policy, _OVER) - SDB_SET_INT16(pRaw, dataPos, pTrans->type, _OVER) + SDB_SET_INT16(pRaw, dataPos, pTrans->conflict, _OVER) + SDB_SET_INT16(pRaw, dataPos, pTrans->exec, _OVER) SDB_SET_INT64(pRaw, dataPos, pTrans->createdTime, _OVER) - SDB_SET_INT64(pRaw, dataPos, pTrans->dbUid, _OVER) SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER) - SDB_SET_INT32(pRaw, dataPos, redoLogNum, _OVER) - SDB_SET_INT32(pRaw, dataPos, undoLogNum, _OVER) - SDB_SET_INT32(pRaw, dataPos, commitLogNum, _OVER) + SDB_SET_INT32(pRaw, dataPos, pTrans->redoActionPos, _OVER) + + int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions); + int32_t undoActionNum = taosArrayGetSize(pTrans->undoActions); + int32_t commitActionNum = taosArrayGetSize(pTrans->commitActions); SDB_SET_INT32(pRaw, dataPos, redoActionNum, _OVER) SDB_SET_INT32(pRaw, dataPos, undoActionNum, _OVER) - - for (int32_t i = 0; i < redoLogNum; ++i) { - SSdbRaw *pTmp = taosArrayGetP(pTrans->redoLogs, i); - int32_t len = sdbGetRawTotalSize(pTmp); - SDB_SET_INT32(pRaw, dataPos, len, _OVER) - SDB_SET_BINARY(pRaw, dataPos, (void *)pTmp, len, _OVER) - } - - for (int32_t i = 0; i < undoLogNum; ++i) { - SSdbRaw *pTmp = taosArrayGetP(pTrans->undoLogs, i); - int32_t len = sdbGetRawTotalSize(pTmp); - SDB_SET_INT32(pRaw, dataPos, len, _OVER) - SDB_SET_BINARY(pRaw, dataPos, (void *)pTmp, len, _OVER) - } - - for (int32_t i = 0; i < commitLogNum; ++i) { - SSdbRaw *pTmp = taosArrayGetP(pTrans->commitLogs, i); - int32_t len = sdbGetRawTotalSize(pTmp); - SDB_SET_INT32(pRaw, dataPos, len, _OVER) - SDB_SET_BINARY(pRaw, dataPos, (void *)pTmp, len, _OVER) - } + SDB_SET_INT32(pRaw, dataPos, commitActionNum, _OVER) for (int32_t i = 0; i < redoActionNum; ++i) { STransAction *pAction = taosArrayGet(pTrans->redoActions, i); - SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER) - SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER) SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER) - SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER) - SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER) + if (pAction->actionType) { + int32_t len = sdbGetRawTotalSize(pAction->pRaw); + SDB_SET_INT8(pRaw, dataPos, pAction->rawWritten, _OVER) + SDB_SET_INT32(pRaw, dataPos, len, _OVER) + SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER) + } else { + SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER) + SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->msgSent, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->msgReceived, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER) + SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER) + } } for (int32_t i = 0; i < undoActionNum; ++i) { STransAction *pAction = taosArrayGet(pTrans->undoActions, i); - SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER) - SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER) SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER) - SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER) - SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pCont, pAction->contLen, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER) + if (pAction->actionType) { + int32_t len = sdbGetRawTotalSize(pAction->pRaw); + SDB_SET_INT8(pRaw, dataPos, pAction->rawWritten, _OVER) + SDB_SET_INT32(pRaw, dataPos, len, _OVER) + SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER) + } else { + SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER) + SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->msgSent, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->msgReceived, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER) + SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER) + } + } + + for (int32_t i = 0; i < commitActionNum; ++i) { + STransAction *pAction = taosArrayGet(pTrans->commitActions, i); + SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER) + if (pAction->actionType) { + int32_t len = sdbGetRawTotalSize(pAction->pRaw); + SDB_SET_INT8(pRaw, dataPos, pAction->rawWritten, _OVER) + SDB_SET_INT32(pRaw, dataPos, len, _OVER) + SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER) + } else { + SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER) + SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->msgSent, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->msgReceived, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER) + SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER) + } } SDB_SET_INT32(pRaw, dataPos, pTrans->startFunc, _OVER) @@ -219,11 +227,9 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) { char *pData = NULL; int32_t dataLen = 0; int8_t sver = 0; - int32_t redoLogNum = 0; - int32_t undoLogNum = 0; - int32_t commitLogNum = 0; int32_t redoActionNum = 0; int32_t undoActionNum = 0; + int32_t commitActionNum = 0; int32_t dataPos = 0; STransAction action = {0}; @@ -244,86 +250,116 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) { int16_t stage = 0; int16_t policy = 0; - int16_t type = 0; + int16_t conflict = 0; + int16_t exec = 0; SDB_GET_INT16(pRaw, dataPos, &stage, _OVER) SDB_GET_INT16(pRaw, dataPos, &policy, _OVER) - SDB_GET_INT16(pRaw, dataPos, &type, _OVER) + SDB_GET_INT16(pRaw, dataPos, &conflict, _OVER) + SDB_GET_INT16(pRaw, dataPos, &exec, _OVER) pTrans->stage = stage; pTrans->policy = policy; - pTrans->type = type; + pTrans->conflict = conflict; + pTrans->exec = exec; SDB_GET_INT64(pRaw, dataPos, &pTrans->createdTime, _OVER) - SDB_GET_INT64(pRaw, dataPos, &pTrans->dbUid, _OVER) SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER) - SDB_GET_INT32(pRaw, dataPos, &redoLogNum, _OVER) - SDB_GET_INT32(pRaw, dataPos, &undoLogNum, _OVER) - SDB_GET_INT32(pRaw, dataPos, &commitLogNum, _OVER) + SDB_GET_INT32(pRaw, dataPos, &pTrans->redoActionPos, _OVER) SDB_GET_INT32(pRaw, dataPos, &redoActionNum, _OVER) SDB_GET_INT32(pRaw, dataPos, &undoActionNum, _OVER) + SDB_GET_INT32(pRaw, dataPos, &commitActionNum, _OVER) - pTrans->redoLogs = taosArrayInit(redoLogNum, sizeof(void *)); - pTrans->undoLogs = taosArrayInit(undoLogNum, sizeof(void *)); - pTrans->commitLogs = taosArrayInit(commitLogNum, sizeof(void *)); pTrans->redoActions = taosArrayInit(redoActionNum, sizeof(STransAction)); pTrans->undoActions = taosArrayInit(undoActionNum, sizeof(STransAction)); + pTrans->commitActions = taosArrayInit(commitActionNum, sizeof(STransAction)); - if (pTrans->redoLogs == NULL) goto _OVER; - if (pTrans->undoLogs == NULL) goto _OVER; - if (pTrans->commitLogs == NULL) goto _OVER; if (pTrans->redoActions == NULL) goto _OVER; if (pTrans->undoActions == NULL) goto _OVER; - - for (int32_t i = 0; i < redoLogNum; ++i) { - SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER) - pData = taosMemoryMalloc(dataLen); - if (pData == NULL) goto _OVER; - mTrace("raw:%p, is created", pData); - SDB_GET_BINARY(pRaw, dataPos, pData, dataLen, _OVER); - if (taosArrayPush(pTrans->redoLogs, &pData) == NULL) goto _OVER; - pData = NULL; - } - - for (int32_t i = 0; i < undoLogNum; ++i) { - SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER) - pData = taosMemoryMalloc(dataLen); - if (pData == NULL) goto _OVER; - mTrace("raw:%p, is created", pData); - SDB_GET_BINARY(pRaw, dataPos, pData, dataLen, _OVER); - if (taosArrayPush(pTrans->undoLogs, &pData) == NULL) goto _OVER; - pData = NULL; - } - - for (int32_t i = 0; i < commitLogNum; ++i) { - SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER) - pData = taosMemoryMalloc(dataLen); - if (pData == NULL) goto _OVER; - mTrace("raw:%p, is created", pData); - SDB_GET_BINARY(pRaw, dataPos, pData, dataLen, _OVER); - if (taosArrayPush(pTrans->commitLogs, &pData) == NULL) goto _OVER; - pData = NULL; - } + if (pTrans->commitActions == NULL) goto _OVER; for (int32_t i = 0; i < redoActionNum; ++i) { - SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER); - SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER) SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER) - SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER) - action.pCont = taosMemoryMalloc(action.contLen); - if (action.pCont == NULL) goto _OVER; - SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER); - if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER; - action.pCont = NULL; + SDB_GET_INT8(pRaw, dataPos, &action.actionType, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.stage, _OVER) + if (action.actionType) { + SDB_GET_INT8(pRaw, dataPos, &action.rawWritten, _OVER) + SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER) + action.pRaw = taosMemoryMalloc(dataLen); + if (action.pRaw == NULL) goto _OVER; + mTrace("raw:%p, is created", pData); + SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER); + if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER; + action.pRaw = NULL; + } else { + SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER); + SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.msgSent, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.msgReceived, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER) + action.pCont = taosMemoryMalloc(action.contLen); + if (action.pCont == NULL) goto _OVER; + SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER); + if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER; + action.pCont = NULL; + } } for (int32_t i = 0; i < undoActionNum; ++i) { - SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER); - SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER) SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER) - SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER) - action.pCont = taosMemoryMalloc(action.contLen); - if (action.pCont == NULL) goto _OVER; - SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER); - if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER; - action.pCont = NULL; + SDB_GET_INT8(pRaw, dataPos, &action.actionType, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.stage, _OVER) + if (action.actionType) { + SDB_GET_INT8(pRaw, dataPos, &action.rawWritten, _OVER) + SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER) + action.pRaw = taosMemoryMalloc(dataLen); + if (action.pRaw == NULL) goto _OVER; + mTrace("raw:%p, is created", pData); + SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER); + if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER; + action.pRaw = NULL; + } else { + SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER); + SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.msgSent, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.msgReceived, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER) + action.pCont = taosMemoryMalloc(action.contLen); + if (action.pCont == NULL) goto _OVER; + SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER); + if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER; + action.pCont = NULL; + } + } + + for (int32_t i = 0; i < commitActionNum; ++i) { + SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.actionType, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.stage, _OVER) + if (action.actionType) { + SDB_GET_INT8(pRaw, dataPos, &action.rawWritten, _OVER) + SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER) + action.pRaw = taosMemoryMalloc(dataLen); + if (action.pRaw == NULL) goto _OVER; + mTrace("raw:%p, is created", action.pRaw); + SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER); + if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER; + action.pRaw = NULL; + } else { + SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER); + SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.msgSent, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.msgReceived, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER) + action.pCont = taosMemoryMalloc(action.contLen); + if (action.pCont == NULL) goto _OVER; + SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER); + if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER; + action.pCont = NULL; + } } SDB_GET_INT32(pRaw, dataPos, &pTrans->startFunc, _OVER) @@ -343,7 +379,6 @@ _OVER: mError("trans:%d, failed to parse from raw:%p since %s", pTrans->id, pRaw, terrstr()); mndTransDropData(pTrans); taosMemoryFreeClear(pRow); - taosMemoryFreeClear(pData); taosMemoryFreeClear(action.pCont); return NULL; } @@ -356,20 +391,16 @@ static const char *mndTransStr(ETrnStage stage) { switch (stage) { case TRN_STAGE_PREPARE: return "prepare"; - case TRN_STAGE_REDO_LOG: - return "redoLog"; case TRN_STAGE_REDO_ACTION: return "redoAction"; - case TRN_STAGE_COMMIT: - return "commit"; - case TRN_STAGE_COMMIT_LOG: - return "commitLog"; - case TRN_STAGE_UNDO_ACTION: - return "undoAction"; - case TRN_STAGE_UNDO_LOG: - return "undoLog"; case TRN_STAGE_ROLLBACK: return "rollback"; + case TRN_STAGE_UNDO_ACTION: + return "undoAction"; + case TRN_STAGE_COMMIT: + return "commit"; + case TRN_STAGE_COMMIT_ACTION: + return "commitAction"; case TRN_STAGE_FINISHED: return "finished"; default: @@ -377,81 +408,6 @@ static const char *mndTransStr(ETrnStage stage) { } } -static const char *mndTransType(ETrnType type) { - switch (type) { - case TRN_TYPE_CREATE_USER: - return "create-user"; - case TRN_TYPE_ALTER_USER: - return "alter-user"; - case TRN_TYPE_DROP_USER: - return "drop-user"; - case TRN_TYPE_CREATE_FUNC: - return "create-func"; - case TRN_TYPE_DROP_FUNC: - return "drop-func"; - case TRN_TYPE_CREATE_SNODE: - return "create-snode"; - case TRN_TYPE_DROP_SNODE: - return "drop-snode"; - case TRN_TYPE_CREATE_QNODE: - return "create-qnode"; - case TRN_TYPE_DROP_QNODE: - return "drop-qnode"; - case TRN_TYPE_CREATE_BNODE: - return "create-bnode"; - case TRN_TYPE_DROP_BNODE: - return "drop-bnode"; - case TRN_TYPE_CREATE_MNODE: - return "create-mnode"; - case TRN_TYPE_DROP_MNODE: - return "drop-mnode"; - case TRN_TYPE_CREATE_TOPIC: - return "create-topic"; - case TRN_TYPE_DROP_TOPIC: - return "drop-topic"; - case TRN_TYPE_SUBSCRIBE: - return "subscribe"; - case TRN_TYPE_REBALANCE: - return "rebalance"; - case TRN_TYPE_COMMIT_OFFSET: - return "commit-offset"; - case TRN_TYPE_CREATE_STREAM: - return "create-stream"; - case TRN_TYPE_DROP_STREAM: - return "drop-stream"; - case TRN_TYPE_CONSUMER_LOST: - return "consumer-lost"; - case TRN_TYPE_CONSUMER_RECOVER: - return "consumer-recover"; - case TRN_TYPE_CREATE_DNODE: - return "create-qnode"; - case TRN_TYPE_DROP_DNODE: - return "drop-qnode"; - case TRN_TYPE_CREATE_DB: - return "create-db"; - case TRN_TYPE_ALTER_DB: - return "alter-db"; - case TRN_TYPE_DROP_DB: - return "drop-db"; - case TRN_TYPE_SPLIT_VGROUP: - return "split-vgroup"; - case TRN_TYPE_MERGE_VGROUP: - return "merge-vgroup"; - case TRN_TYPE_CREATE_STB: - return "create-stb"; - case TRN_TYPE_ALTER_STB: - return "alter-stb"; - case TRN_TYPE_DROP_STB: - return "drop-stb"; - case TRN_TYPE_CREATE_SMA: - return "create-sma"; - case TRN_TYPE_DROP_SMA: - return "drop-sma"; - default: - return "invalid"; - } -} - static void mndTransTestStartFunc(SMnode *pMnode, void *param, int32_t paramLen) { mInfo("test trans start, param:%s, len:%d", (char *)param, paramLen); } @@ -460,15 +416,15 @@ static void mndTransTestStopFunc(SMnode *pMnode, void *param, int32_t paramLen) mInfo("test trans stop, param:%s, len:%d", (char *)param, paramLen); } -static TransCbFp mndTransGetCbFp(ETrnFuncType ftype) { +static TransCbFp mndTransGetCbFp(ETrnFunc ftype) { switch (ftype) { - case TEST_TRANS_START_FUNC: + case TRANS_START_FUNC_TEST: return mndTransTestStartFunc; - case TEST_TRANS_STOP_FUNC: + case TRANS_STOP_FUNC_TEST: return mndTransTestStopFunc; - case MQ_REB_TRANS_START_FUNC: + case TRANS_START_FUNC_MQ_REB: return mndRebCntInc; - case MQ_REB_TRANS_STOP_FUNC: + case TRANS_STOP_FUNC_MQ_REB: return mndRebCntDec; default: return NULL; @@ -489,11 +445,9 @@ static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans) { } static void mndTransDropData(STrans *pTrans) { - mndTransDropLogs(pTrans->redoLogs); - mndTransDropLogs(pTrans->undoLogs); - mndTransDropLogs(pTrans->commitLogs); mndTransDropActions(pTrans->redoActions); mndTransDropActions(pTrans->undoActions); + mndTransDropActions(pTrans->commitActions); if (pTrans->rpcRsp != NULL) { taosMemoryFree(pTrans->rpcRsp); pTrans->rpcRsp = NULL; @@ -507,7 +461,7 @@ static void mndTransDropData(STrans *pTrans) { } static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) { - mDebug("trans:%d, perform delete action, row:%p stage:%s callfunc:%d", pTrans->id, pTrans, mndTransStr(pTrans->stage), + mTrace("trans:%d, perform delete action, row:%p stage:%s callfunc:%d", pTrans->id, pTrans, mndTransStr(pTrans->stage), callFunc); if (pTrans->stopFunc > 0 && callFunc) { TransCbFp fp = mndTransGetCbFp(pTrans->stopFunc); @@ -520,20 +474,35 @@ static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) { return 0; } -static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) { - if (pNew->stage == TRN_STAGE_COMMIT) { - pNew->stage = TRN_STAGE_COMMIT_LOG; - mTrace("trans:%d, stage from %s to %s", pNew->id, mndTransStr(TRN_STAGE_COMMIT), mndTransStr(TRN_STAGE_COMMIT_LOG)); - } - - if (pNew->stage == TRN_STAGE_ROLLBACK) { - pNew->stage = TRN_STAGE_FINISHED; - mTrace("trans:%d, stage from %s to %s", pNew->id, mndTransStr(TRN_STAGE_ROLLBACK), mndTransStr(TRN_STAGE_FINISHED)); +static void mndTransUpdateActions(SArray *pOldArray, SArray *pNewArray) { + for (int32_t i = 0; i < taosArrayGetSize(pOldArray); ++i) { + STransAction *pOldAction = taosArrayGet(pOldArray, i); + STransAction *pNewAction = taosArrayGet(pNewArray, i); + pOldAction->rawWritten = pNewAction->rawWritten; + pOldAction->msgSent = pNewAction->msgSent; + pOldAction->msgReceived = pNewAction->msgReceived; + pOldAction->errCode = pNewAction->errCode; } +} +static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) { mTrace("trans:%d, perform update action, old row:%p stage:%s, new row:%p stage:%s", pOld->id, pOld, mndTransStr(pOld->stage), pNew, mndTransStr(pNew->stage)); + mndTransUpdateActions(pOld->redoActions, pNew->redoActions); + mndTransUpdateActions(pOld->undoActions, pNew->undoActions); + mndTransUpdateActions(pOld->commitActions, pNew->commitActions); pOld->stage = pNew->stage; + pOld->redoActionPos = pNew->redoActionPos; + + if (pOld->stage == TRN_STAGE_COMMIT) { + pOld->stage = TRN_STAGE_COMMIT_ACTION; + mTrace("trans:%d, stage from commit to commitAction", pNew->id); + } + + if (pOld->stage == TRN_STAGE_ROLLBACK) { + pOld->stage = TRN_STAGE_FINISHED; + mTrace("trans:%d, stage from rollback to finished", pNew->id); + } return 0; } @@ -550,7 +519,7 @@ void mndReleaseTrans(SMnode *pMnode, STrans *pTrans) { sdbRelease(pSdb, pTrans); } -STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnType type, const SRpcMsg *pReq) { +STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict, const SRpcMsg *pReq) { STrans *pTrans = taosMemoryCalloc(1, sizeof(STrans)); if (pTrans == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -561,41 +530,33 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnType type, const S pTrans->id = sdbGetMaxId(pMnode->pSdb, SDB_TRANS); pTrans->stage = TRN_STAGE_PREPARE; pTrans->policy = policy; - pTrans->type = type; + pTrans->conflict = conflict; + pTrans->exec = TRN_EXEC_PRARLLEL; pTrans->createdTime = taosGetTimestampMs(); - if (pReq != NULL) pTrans->rpcInfo = pReq->info; - pTrans->redoLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *)); - pTrans->undoLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *)); - pTrans->commitLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *)); pTrans->redoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction)); pTrans->undoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction)); + pTrans->commitActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction)); - if (pTrans->redoLogs == NULL || pTrans->undoLogs == NULL || pTrans->commitLogs == NULL || - pTrans->redoActions == NULL || pTrans->undoActions == NULL) { + if (pTrans->redoActions == NULL || pTrans->undoActions == NULL || pTrans->commitActions == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; mError("failed to create transaction since %s", terrstr()); return NULL; } - mDebug("trans:%d, local object is created, data:%p", pTrans->id, pTrans); + if (pReq != NULL) pTrans->rpcInfo = pReq->info; + mTrace("trans:%d, local object is created, data:%p", pTrans->id, pTrans); return pTrans; } -static void mndTransDropLogs(SArray *pArray) { - int32_t size = taosArrayGetSize(pArray); - for (int32_t i = 0; i < size; ++i) { - SSdbRaw *pRaw = taosArrayGetP(pArray, i); - sdbFreeRaw(pRaw); - } - - taosArrayDestroy(pArray); -} - static void mndTransDropActions(SArray *pArray) { int32_t size = taosArrayGetSize(pArray); for (int32_t i = 0; i < size; ++i) { STransAction *pAction = taosArrayGet(pArray, i); - taosMemoryFreeClear(pAction->pCont); + if (pAction->actionType) { + taosMemoryFreeClear(pAction->pRaw); + } else { + taosMemoryFreeClear(pAction->pCont); + } } taosArrayDestroy(pArray); @@ -604,18 +565,15 @@ static void mndTransDropActions(SArray *pArray) { void mndTransDrop(STrans *pTrans) { if (pTrans != NULL) { mndTransDropData(pTrans); - mDebug("trans:%d, local object is freed, data:%p", pTrans->id, pTrans); + mTrace("trans:%d, local object is freed, data:%p", pTrans->id, pTrans); taosMemoryFreeClear(pTrans); } } -static int32_t mndTransAppendLog(SArray *pArray, SSdbRaw *pRaw) { - if (pArray == NULL || pRaw == NULL) { - terrno = TSDB_CODE_INVALID_PARA; - return -1; - } +static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction) { + pAction->id = taosArrayGetSize(pArray); - void *ptr = taosArrayPush(pArray, &pRaw); + void *ptr = taosArrayPush(pArray, pAction); if (ptr == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; @@ -624,27 +582,28 @@ static int32_t mndTransAppendLog(SArray *pArray, SSdbRaw *pRaw) { return 0; } -int32_t mndTransAppendRedolog(STrans *pTrans, SSdbRaw *pRaw) { return mndTransAppendLog(pTrans->redoLogs, pRaw); } - -int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw) { return mndTransAppendLog(pTrans->undoLogs, pRaw); } - -int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw) { return mndTransAppendLog(pTrans->commitLogs, pRaw); } +int32_t mndTransAppendRedolog(STrans *pTrans, SSdbRaw *pRaw) { + STransAction action = {.stage = TRN_STAGE_REDO_ACTION, .actionType = true, .pRaw = pRaw}; + return mndTransAppendAction(pTrans->redoActions, &action); +} -static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction) { - void *ptr = taosArrayPush(pArray, pAction); - if (ptr == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } +int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw) { + STransAction action = {.stage = TRN_STAGE_UNDO_ACTION, .actionType = true, .pRaw = pRaw}; + return mndTransAppendAction(pTrans->undoActions, &action); +} - return 0; +int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw) { + STransAction action = {.stage = TRN_STAGE_COMMIT_ACTION, .actionType = true, .pRaw = pRaw}; + return mndTransAppendAction(pTrans->commitActions, &action); } int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction) { + pAction->stage = TRN_STAGE_REDO_ACTION; return mndTransAppendAction(pTrans->redoActions, pAction); } int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction) { + pAction->stage = TRN_STAGE_UNDO_ACTION; return mndTransAppendAction(pTrans->undoActions, pAction); } @@ -653,17 +612,16 @@ void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen) { pTrans->rpcRspLen = contLen; } -void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen) { +void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen) { pTrans->startFunc = startFunc; pTrans->stopFunc = stopFunc; pTrans->param = param; pTrans->paramLen = paramLen; } -void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb) { - pTrans->dbUid = pDb->uid; - memcpy(pTrans->dbname, pDb->name, TSDB_DB_FNAME_LEN); -} +void mndTransSetDbName(STrans *pTrans, const char *dbname) { memcpy(pTrans->dbname, dbname, TSDB_DB_FNAME_LEN); } + +void mndTransSetSerial(STrans *pTrans) { pTrans->exec = TRN_EXEC_SERIAL; } static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) { SSdbRaw *pRaw = mndTransActionEncode(pTrans); @@ -673,8 +631,8 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) { } sdbSetRawStatus(pRaw, SDB_STATUS_READY); - mDebug("trans:%d, sync to other nodes", pTrans->id); - int32_t code = mndSyncPropose(pMnode, pRaw); + mDebug("trans:%d, sync to other mnodes", pTrans->id); + int32_t code = mndSyncPropose(pMnode, pRaw, pTrans->id); if (code != 0) { mError("trans:%d, failed to sync since %s", pTrans->id, terrstr()); sdbFreeRaw(pRaw); @@ -686,83 +644,50 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) { return 0; } -static bool mndIsBasicTrans(STrans *pTrans) { - return pTrans->type > TRN_TYPE_BASIC_SCOPE && pTrans->type < TRN_TYPE_BASIC_SCOPE_END; -} - -static bool mndIsGlobalTrans(STrans *pTrans) { - return pTrans->type > TRN_TYPE_GLOBAL_SCOPE && pTrans->type < TRN_TYPE_GLOBAL_SCOPE_END; -} - -static bool mndIsDbTrans(STrans *pTrans) { - return pTrans->type > TRN_TYPE_DB_SCOPE && pTrans->type < TRN_TYPE_DB_SCOPE_END; -} - -static bool mndIsStbTrans(STrans *pTrans) { - return pTrans->type > TRN_TYPE_STB_SCOPE && pTrans->type < TRN_TYPE_STB_SCOPE_END; -} - -static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNewTrans) { +static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) { STrans *pTrans = NULL; void *pIter = NULL; bool conflict = false; - if (mndIsBasicTrans(pNewTrans)) return conflict; + if (pNew->conflict == TRN_CONFLICT_NOTHING) return conflict; while (1) { pIter = sdbFetch(pMnode->pSdb, SDB_TRANS, pIter, (void **)&pTrans); if (pIter == NULL) break; - if (mndIsGlobalTrans(pNewTrans)) { - if (mndIsDbTrans(pTrans) || mndIsStbTrans(pTrans)) { - mError("trans:%d, can't execute since trans:%d in progress db:%s", pNewTrans->id, pTrans->id, pTrans->dbname); - conflict = true; - } else { - } + if (pNew->conflict == TRN_CONFLICT_GLOBAL) conflict = true; + if (pNew->conflict == TRN_CONFLICT_DB) { + if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true; + if (pTrans->conflict == TRN_CONFLICT_DB && strcmp(pNew->dbname, pTrans->dbname) == 0) conflict = true; + if (pTrans->conflict == TRN_CONFLICT_DB_INSIDE && strcmp(pNew->dbname, pTrans->dbname) == 0) conflict = true; } - - else if (mndIsDbTrans(pNewTrans)) { - if (mndIsGlobalTrans(pTrans)) { - mError("trans:%d, can't execute since trans:%d in progress", pNewTrans->id, pTrans->id); - conflict = true; - } else if (mndIsDbTrans(pTrans) || mndIsStbTrans(pTrans)) { - if (pNewTrans->dbUid == pTrans->dbUid) { - mError("trans:%d, can't execute since trans:%d in progress db:%s", pNewTrans->id, pTrans->id, pTrans->dbname); - conflict = true; - } - } else { - } - } - - else if (mndIsStbTrans(pNewTrans)) { - if (mndIsGlobalTrans(pTrans)) { - mError("trans:%d, can't execute since trans:%d in progress", pNewTrans->id, pTrans->id); - conflict = true; - } else if (mndIsDbTrans(pTrans)) { - if (pNewTrans->dbUid == pTrans->dbUid) { - mError("trans:%d, can't execute since trans:%d in progress db:%s", pNewTrans->id, pTrans->id, pTrans->dbname); - conflict = true; - } - } else { - } + if (pNew->conflict == TRN_CONFLICT_DB_INSIDE) { + if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true; + if (pTrans->conflict == TRN_CONFLICT_DB && strcmp(pNew->dbname, pTrans->dbname) == 0) conflict = true; } - + mError("trans:%d, can't execute since conflict with trans:%d, db:%s", pNew->id, pTrans->id, pTrans->dbname); sdbRelease(pMnode->pSdb, pTrans); } - sdbCancelFetch(pMnode->pSdb, pIter); - sdbRelease(pMnode->pSdb, pTrans); return conflict; } int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) { + if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) { + if (strlen(pTrans->dbname) == 0) { + terrno = TSDB_CODE_MND_TRANS_CONFLICT; + mError("trans:%d, failed to prepare conflict db not set", pTrans->id); + return -1; + } + } + if (mndCheckTransConflict(pMnode, pTrans)) { terrno = TSDB_CODE_MND_TRANS_CONFLICT; mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); return -1; } - if (taosArrayGetSize(pTrans->commitLogs) <= 0) { + if (taosArrayGetSize(pTrans->commitActions) <= 0) { terrno = TSDB_CODE_MND_TRANS_CLOG_IS_NULL; mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); return -1; @@ -793,8 +718,6 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) { } static int32_t mndTransCommit(SMnode *pMnode, STrans *pTrans) { - if (taosArrayGetSize(pTrans->commitLogs) == 0 && taosArrayGetSize(pTrans->redoActions) == 0) return 0; - mDebug("trans:%d, commit transaction", pTrans->id); if (mndTransSync(pMnode, pTrans) != 0) { mError("trans:%d, failed to commit since %s", pTrans->id, terrstr()); @@ -823,32 +746,35 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) { } if (pTrans->policy == TRN_POLICY_ROLLBACK) { - if (pTrans->stage == TRN_STAGE_UNDO_LOG || pTrans->stage == TRN_STAGE_UNDO_ACTION || - pTrans->stage == TRN_STAGE_ROLLBACK) { + if (pTrans->stage == TRN_STAGE_UNDO_ACTION || pTrans->stage == TRN_STAGE_ROLLBACK) { if (code == 0) code = TSDB_CODE_MND_TRANS_UNKNOW_ERROR; sendRsp = true; } } else { - if (pTrans->stage == TRN_STAGE_REDO_ACTION && pTrans->failedTimes > 6) { + if (pTrans->stage == TRN_STAGE_REDO_ACTION && pTrans->failedTimes > 3) { if (code == 0) code = TSDB_CODE_MND_TRANS_UNKNOW_ERROR; sendRsp = true; } } if (sendRsp && pTrans->rpcInfo.handle != NULL) { - void *rpcCont = rpcMallocCont(pTrans->rpcRspLen); - if (rpcCont != NULL) { - memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen); + mDebug("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage), + pTrans->rpcInfo.ahandle); + if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { + code = TSDB_CODE_RPC_INDIRECT_NETWORK_UNAVAIL; + } + SRpcMsg rspMsg = {.code = code, .info = pTrans->rpcInfo}; + + if (pTrans->rpcRspLen != 0) { + void *rpcCont = rpcMallocCont(pTrans->rpcRspLen); + if (rpcCont != NULL) { + memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen); + rspMsg.pCont = rpcCont; + rspMsg.contLen = pTrans->rpcRspLen; + } + taosMemoryFree(pTrans->rpcRsp); } - taosMemoryFree(pTrans->rpcRsp); - mDebug("trans:%d, send rsp, code:0x%x stage:%d app:%p", pTrans->id, code, pTrans->stage, pTrans->rpcInfo.ahandle); - SRpcMsg rspMsg = { - .code = code, - .pCont = rpcCont, - .contLen = pTrans->rpcRspLen, - .info = pTrans->rpcInfo, - }; tmsgSendRsp(&rspMsg); pTrans->rpcInfo.handle = NULL; pTrans->rpcRsp = NULL; @@ -893,148 +819,150 @@ void mndTransProcessRsp(SRpcMsg *pRsp) { if (pAction != NULL) { pAction->msgReceived = 1; pAction->errCode = pRsp->code; - if (pAction->errCode != 0) { - tstrncpy(pTrans->lastError, tstrerror(pAction->errCode), TSDB_TRANS_ERROR_LEN); - } } - mDebug("trans:%d, action:%d response is received, code:0x%x, accept:0x%04x", transId, action, pRsp->code, - pAction->acceptableCode); + mDebug("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x", transId, mndTransStr(pAction->stage), action, + pRsp->code, pAction->acceptableCode); mndTransExecute(pMnode, pTrans); _OVER: mndReleaseTrans(pMnode, pTrans); } -static int32_t mndTransExecuteLogs(SMnode *pMnode, SArray *pArray) { - SSdb *pSdb = pMnode->pSdb; - int32_t arraySize = taosArrayGetSize(pArray); +static void mndTransResetActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) { + int32_t numOfActions = taosArrayGetSize(pArray); - if (arraySize == 0) return 0; + for (int32_t action = 0; action < numOfActions; ++action) { + STransAction *pAction = taosArrayGet(pArray, action); + if (pAction->msgSent && pAction->msgReceived && + (pAction->errCode == 0 || pAction->errCode == pAction->acceptableCode)) + continue; + if (pAction->rawWritten && (pAction->errCode == 0 || pAction->errCode == pAction->acceptableCode)) continue; - int32_t code = 0; - for (int32_t i = 0; i < arraySize; ++i) { - SSdbRaw *pRaw = taosArrayGetP(pArray, i); - if (sdbWriteWithoutFree(pSdb, pRaw) != 0) { - code = ((terrno != 0) ? terrno : -1); - } + pAction->rawWritten = 0; + pAction->msgSent = 0; + pAction->msgReceived = 0; + pAction->errCode = 0; + mDebug("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), action); } - - terrno = code; - return code; } -static int32_t mndTransExecuteRedoLogs(SMnode *pMnode, STrans *pTrans) { - int32_t code = mndTransExecuteLogs(pMnode, pTrans->redoLogs); - if (code != 0) { - mError("failed to execute redoLogs since %s", terrstr()); - } - return code; -} +static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransAction *pAction) { + if (pAction->rawWritten) return 0; -static int32_t mndTransExecuteUndoLogs(SMnode *pMnode, STrans *pTrans) { - int32_t code = mndTransExecuteLogs(pMnode, pTrans->undoLogs); - if (code != 0) { - mError("failed to execute undoLogs since %s, return success", terrstr()); + int32_t code = sdbWriteWithoutFree(pMnode->pSdb, pAction->pRaw); + if (code == 0 || terrno == TSDB_CODE_SDB_OBJ_NOT_THERE) { + pAction->rawWritten = true; + pAction->errCode = 0; + code = 0; + mDebug("trans:%d, %s:%d write to sdb", pTrans->id, mndTransStr(pAction->stage), pAction->id); + } else { + pAction->errCode = (terrno != 0) ? terrno : code; + mError("trans:%d, %s:%d failed to write sdb since %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, + terrstr()); } - return 0; // return success in any case -} - -static int32_t mndTransExecuteCommitLogs(SMnode *pMnode, STrans *pTrans) { - int32_t code = mndTransExecuteLogs(pMnode, pTrans->commitLogs); - if (code != 0) { - mError("failed to execute commitLogs since %s", terrstr()); - } return code; } -static void mndTransResetActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) { - int32_t numOfActions = taosArrayGetSize(pArray); +static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction) { + if (pAction->msgSent) return 0; + if (!pMnode->deploy && !mndIsMaster(pMnode)) return -1; - for (int32_t action = 0; action < numOfActions; ++action) { - STransAction *pAction = taosArrayGet(pArray, action); - if (pAction == NULL) continue; - if (pAction->msgSent && pAction->msgReceived && pAction->errCode == 0) continue; + int64_t signature = pTrans->id; + signature = (signature << 32); + signature += pAction->id; - pAction->msgSent = 0; + SRpcMsg rpcMsg = {.msgType = pAction->msgType, .contLen = pAction->contLen, .info.ahandle = (void *)signature}; + rpcMsg.pCont = rpcMallocCont(pAction->contLen); + if (rpcMsg.pCont == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + memcpy(rpcMsg.pCont, pAction->pCont, pAction->contLen); + + int32_t code = tmsgSendReq(&pAction->epSet, &rpcMsg); + if (code == 0) { + pAction->msgSent = 1; pAction->msgReceived = 0; pAction->errCode = 0; - mDebug("trans:%d, action:%d execute status is reset", pTrans->id, action); + mDebug("trans:%d, %s:%d is sent to %s:%u", pTrans->id, mndTransStr(pAction->stage), pAction->id, + pAction->epSet.eps[pAction->epSet.inUse].fqdn, pAction->epSet.eps[pAction->epSet.inUse].port); + } else { + pAction->msgSent = 0; + pAction->msgReceived = 0; + pAction->errCode = (terrno != 0) ? terrno : code; + mError("trans:%d, %s:%d not send since %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, terrstr()); } + + return code; } -static int32_t mndTransSendActionMsg(SMnode *pMnode, STrans *pTrans, SArray *pArray) { +static int32_t mndTransExecSingleAction(SMnode *pMnode, STrans *pTrans, STransAction *pAction) { + if (pAction->actionType) { + return mndTransWriteSingleLog(pMnode, pTrans, pAction); + } else { + return mndTransSendSingleMsg(pMnode, pTrans, pAction); + } +} + +static int32_t mndTransExecSingleActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) { int32_t numOfActions = taosArrayGetSize(pArray); + int32_t code = 0; for (int32_t action = 0; action < numOfActions; ++action) { STransAction *pAction = taosArrayGet(pArray, action); - if (pAction == NULL) continue; - if (pAction->msgSent) continue; - - int64_t signature = pTrans->id; - signature = (signature << 32); - signature += action; - - SRpcMsg rpcMsg = {.msgType = pAction->msgType, .contLen = pAction->contLen, .info.ahandle = (void *)signature}; - rpcMsg.pCont = rpcMallocCont(pAction->contLen); - if (rpcMsg.pCont == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - memcpy(rpcMsg.pCont, pAction->pCont, pAction->contLen); - - if (tmsgSendReq(&pAction->epSet, &rpcMsg) == 0) { - mDebug("trans:%d, action:%d is sent to %s:%u", pTrans->id, action, pAction->epSet.eps[pAction->epSet.inUse].fqdn, - pAction->epSet.eps[pAction->epSet.inUse].port); - pAction->msgSent = 1; - pAction->msgReceived = 0; - pAction->errCode = 0; - } else { - pAction->msgSent = 0; - pAction->msgReceived = 0; - pAction->errCode = terrno; - mError("trans:%d, action:%d not send since %s", pTrans->id, action, terrstr()); - return -1; - } + code = mndTransExecSingleAction(pMnode, pTrans, pAction); + if (code != 0) break; } - return 0; + return code; } static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) { int32_t numOfActions = taosArrayGetSize(pArray); if (numOfActions == 0) return 0; - if (mndTransSendActionMsg(pMnode, pTrans, pArray) != 0) { + if (mndTransExecSingleActions(pMnode, pTrans, pArray) != 0) { return -1; } - int32_t numOfReceived = 0; - int32_t errCode = 0; + int32_t numOfExecuted = 0; + int32_t errCode = 0; + STransAction *pErrAction = NULL; for (int32_t action = 0; action < numOfActions; ++action) { STransAction *pAction = taosArrayGet(pArray, action); - if (pAction == NULL) continue; - if (pAction->msgSent && pAction->msgReceived) { - numOfReceived++; + if (pAction->msgReceived || pAction->rawWritten) { + numOfExecuted++; if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) { errCode = pAction->errCode; + pErrAction = pAction; } } } - if (numOfReceived == numOfActions) { + if (numOfExecuted == numOfActions) { if (errCode == 0) { + pTrans->lastErrorAction = 0; + pTrans->lastErrorNo = 0; + pTrans->lastErrorMsgType = 0; + memset(&pTrans->lastErrorEpset, 0, sizeof(pTrans->lastErrorEpset)); mDebug("trans:%d, all %d actions execute successfully", pTrans->id, numOfActions); return 0; } else { mError("trans:%d, all %d actions executed, code:0x%x", pTrans->id, numOfActions, errCode & 0XFFFF); + if (pErrAction != NULL) { + pTrans->lastErrorMsgType = pErrAction->msgType; + pTrans->lastErrorAction = pErrAction->id; + pTrans->lastErrorNo = pErrAction->errCode; + pTrans->lastErrorEpset = pErrAction->epSet; + } mndTransResetActions(pMnode, pTrans, pArray); terrno = errCode; return errCode; } } else { - mDebug("trans:%d, %d of %d actions executed", pTrans->id, numOfReceived, numOfActions); + mDebug("trans:%d, %d of %d actions executed", pTrans->id, numOfExecuted, numOfActions); return TSDB_CODE_ACTION_IN_PROGRESS; } } @@ -1055,35 +983,99 @@ static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans) { return code; } -static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) { - bool continueExec = true; - pTrans->stage = TRN_STAGE_REDO_LOG; - mDebug("trans:%d, stage from prepare to redoLog", pTrans->id); - return continueExec; +static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans) { + int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->commitActions); + if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { + mError("failed to execute commitActions since %s", terrstr()); + } + return code; } -static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans) { - bool continueExec = true; - int32_t code = mndTransExecuteRedoLogs(pMnode, pTrans); +static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans) { + int32_t code = 0; + int32_t numOfActions = taosArrayGetSize(pTrans->redoActions); + if (numOfActions == 0) return code; + if (pTrans->redoActionPos >= numOfActions) return code; + + for (int32_t action = pTrans->redoActionPos; action < numOfActions; ++action) { + STransAction *pAction = taosArrayGet(pTrans->redoActions, pTrans->redoActionPos); + + code = mndTransExecSingleAction(pMnode, pTrans, pAction); + if (code == 0) { + if (pAction->msgSent) { + if (pAction->msgReceived) { + if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) { + code = pAction->errCode; + pAction->msgSent = 0; + pAction->msgReceived = 0; + mDebug("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), action); + } + } else { + code = TSDB_CODE_ACTION_IN_PROGRESS; + } + } + if (pAction->rawWritten) { + if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) { + code = pAction->errCode; + } + } + } - if (code == 0) { - pTrans->code = 0; - pTrans->stage = TRN_STAGE_REDO_ACTION; - mDebug("trans:%d, stage from redoLog to redoAction", pTrans->id); - } else { - pTrans->code = terrno; - pTrans->stage = TRN_STAGE_UNDO_LOG; - mError("trans:%d, stage from redoLog to undoLog since %s", pTrans->id, terrstr()); + if (code == 0) { + pTrans->lastErrorAction = 0; + pTrans->lastErrorNo = 0; + pTrans->lastErrorMsgType = 0; + memset(&pTrans->lastErrorEpset, 0, sizeof(pTrans->lastErrorEpset)); + } else { + pTrans->lastErrorMsgType = pAction->msgType; + pTrans->lastErrorAction = action; + pTrans->lastErrorNo = pAction->errCode; + pTrans->lastErrorEpset = pAction->epSet; + } + + if (code == 0) { + pTrans->code = 0; + pTrans->redoActionPos++; + mDebug("trans:%d, %s:%d is executed and need sync to other mnodes", pTrans->id, mndTransStr(pAction->stage), + pAction->id); + code = mndTransSync(pMnode, pTrans); + if (code != 0) { + pTrans->code = terrno; + mError("trans:%d, %s:%d is executed and failed to sync to other mnodes since %s", pTrans->id, + mndTransStr(pAction->stage), pAction->id, terrstr()); + break; + } + } else if (code == TSDB_CODE_ACTION_IN_PROGRESS) { + mDebug("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id); + break; + } else { + terrno = code; + pTrans->code = code; + mError("trans:%d, %s:%d failed to execute since %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, + terrstr()); + break; + } } + return code; +} + +static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) { + bool continueExec = true; + pTrans->stage = TRN_STAGE_REDO_ACTION; + mDebug("trans:%d, stage from prepare to redoAction", pTrans->id); return continueExec; } static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { - if (!pMnode->deploy && !mndIsMaster(pMnode)) return false; - bool continueExec = true; - int32_t code = mndTransExecuteRedoActions(pMnode, pTrans); + int32_t code = 0; + + if (pTrans->exec == TRN_EXEC_SERIAL) { + code = mndTransExecuteRedoActionsSerial(pMnode, pTrans); + } else { + code = mndTransExecuteRedoActions(pMnode, pTrans); + } if (code == 0) { pTrans->code = 0; @@ -1115,8 +1107,8 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) { if (code == 0) { pTrans->code = 0; - pTrans->stage = TRN_STAGE_COMMIT_LOG; - mDebug("trans:%d, stage from commit to commitLog", pTrans->id); + pTrans->stage = TRN_STAGE_COMMIT_ACTION; + mDebug("trans:%d, stage from commit to commitAction", pTrans->id); continueExec = true; } else { pTrans->code = terrno; @@ -1135,35 +1127,19 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) { return continueExec; } -static bool mndTransPerformCommitLogStage(SMnode *pMnode, STrans *pTrans) { +static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans) { bool continueExec = true; - int32_t code = mndTransExecuteCommitLogs(pMnode, pTrans); + int32_t code = mndTransExecuteCommitActions(pMnode, pTrans); if (code == 0) { pTrans->code = 0; pTrans->stage = TRN_STAGE_FINISHED; - mDebug("trans:%d, stage from commitLog to finished", pTrans->id); + mDebug("trans:%d, stage from commitAction to finished", pTrans->id); continueExec = true; } else { pTrans->code = terrno; pTrans->failedTimes++; - mError("trans:%d, stage keep on commitLog since %s, failedTimes:%d", pTrans->id, terrstr(), pTrans->failedTimes); - continueExec = false; - } - - return continueExec; -} - -static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans) { - bool continueExec = true; - int32_t code = mndTransExecuteUndoLogs(pMnode, pTrans); - - if (code == 0) { - pTrans->stage = TRN_STAGE_ROLLBACK; - mDebug("trans:%d, stage from undoLog to rollback", pTrans->id); - continueExec = true; - } else { - mError("trans:%d, stage keep on undoLog since %s", pTrans->id, terrstr()); + mError("trans:%d, stage keep on commitAction since %s, failedTimes:%d", pTrans->id, terrstr(), pTrans->failedTimes); continueExec = false; } @@ -1171,14 +1147,12 @@ static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans) { } static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) { - if (!pMnode->deploy && !mndIsMaster(pMnode)) return false; - bool continueExec = true; int32_t code = mndTransExecuteUndoActions(pMnode, pTrans); if (code == 0) { - pTrans->stage = TRN_STAGE_UNDO_LOG; - mDebug("trans:%d, stage from undoAction to undoLog", pTrans->id); + pTrans->stage = TRN_STAGE_ROLLBACK; + mDebug("trans:%d, stage from undoAction to rollback", pTrans->id); continueExec = true; } else if (code == TSDB_CODE_ACTION_IN_PROGRESS) { mDebug("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code)); @@ -1223,8 +1197,7 @@ static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) { mError("trans:%d, failed to write sdb since %s", pTrans->id, terrstr()); } - mDebug("trans:%d, finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes); - + mDebug("trans:%d, execute finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes); return continueExec; } @@ -1237,24 +1210,18 @@ static void mndTransExecute(SMnode *pMnode, STrans *pTrans) { case TRN_STAGE_PREPARE: continueExec = mndTransPerformPrepareStage(pMnode, pTrans); break; - case TRN_STAGE_REDO_LOG: - continueExec = mndTransPerformRedoLogStage(pMnode, pTrans); - break; case TRN_STAGE_REDO_ACTION: continueExec = mndTransPerformRedoActionStage(pMnode, pTrans); break; - case TRN_STAGE_UNDO_LOG: - continueExec = mndTransPerformUndoLogStage(pMnode, pTrans); + case TRN_STAGE_COMMIT: + continueExec = mndTransPerformCommitStage(pMnode, pTrans); + break; + case TRN_STAGE_COMMIT_ACTION: + continueExec = mndTransPerformCommitActionStage(pMnode, pTrans); break; case TRN_STAGE_UNDO_ACTION: continueExec = mndTransPerformUndoActionStage(pMnode, pTrans); break; - case TRN_STAGE_COMMIT_LOG: - continueExec = mndTransPerformCommitLogStage(pMnode, pTrans); - break; - case TRN_STAGE_COMMIT: - continueExec = mndTransPerformCommitStage(pMnode, pTrans); - break; case TRN_STAGE_ROLLBACK: continueExec = mndTransPerformRollbackStage(pMnode, pTrans); break; @@ -1286,22 +1253,11 @@ int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans) { return -1; } - int32_t size = taosArrayGetSize(pArray); - - for (int32_t i = 0; i < size; ++i) { + for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) { STransAction *pAction = taosArrayGet(pArray, i); - if (pAction == NULL) continue; - - if (pAction->msgReceived == 0) { - mInfo("trans:%d, action:%d set processed for kill msg received", pTrans->id, i); - pAction->msgSent = 1; - pAction->msgReceived = 1; - pAction->errCode = 0; - } - if (pAction->errCode != 0) { - mInfo("trans:%d, action:%d set processed for kill msg received, errCode from %s to success", pTrans->id, i, - tstrerror(pAction->errCode)); + mInfo("trans:%d, %s:%d set processed for kill msg received, errCode from %s to success", pTrans->id, + mndTransStr(pAction->stage), i, tstrerror(pAction->errCode)); pAction->msgSent = 1; pAction->msgReceived = 1; pAction->errCode = 0; @@ -1337,9 +1293,7 @@ static int32_t mndProcessKillTransReq(SRpcMsg *pReq) { pTrans = mndAcquireTrans(pMnode, killReq.transId); if (pTrans == NULL) { - terrno = TSDB_CODE_MND_TRANS_NOT_EXIST; - mError("trans:%d, failed to kill since %s", killReq.transId, terrstr()); - return -1; + goto _OVER; } code = mndKillTrans(pMnode, pTrans); @@ -1347,9 +1301,9 @@ static int32_t mndProcessKillTransReq(SRpcMsg *pReq) { _OVER: if (code != 0) { mError("trans:%d, failed to kill since %s", killReq.transId, terrstr()); - return -1; } + mndReleaseUser(pMnode, pUser); mndReleaseTrans(pMnode, pTrans); return code; } @@ -1415,11 +1369,6 @@ static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)dbname, false); - char type[TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_WITH_MAXSIZE_TO_VARSTR(type, mndTransType(pTrans->type), pShow->pMeta->pSchemas[cols].bytes); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataAppend(pColInfo, numOfRows, (const char *)type, false); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->failedTimes, false); @@ -1427,7 +1376,20 @@ static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->lastExecTime, false); char lastError[TSDB_TRANS_ERROR_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_WITH_MAXSIZE_TO_VARSTR(lastError, pTrans->lastError, pShow->pMeta->pSchemas[cols].bytes); + char detail[TSDB_TRANS_ERROR_LEN] = {0}; + if (pTrans->lastErrorNo != 0) { + int32_t len = snprintf(detail, sizeof(detail), "action:%d errno:0x%x(%s) ", pTrans->lastErrorAction, + pTrans->lastErrorNo & 0xFFFF, tstrerror(pTrans->lastErrorNo)); + SEpSet epset = pTrans->lastErrorEpset; + if (epset.numOfEps > 0) { + len += snprintf(detail + len, sizeof(detail) - len, "msgType:%s numOfEps:%d inUse:%d ", + TMSG_INFO(pTrans->lastErrorMsgType), epset.numOfEps, epset.inUse); + } + for (int32_t i = 0; i < pTrans->lastErrorEpset.numOfEps; ++i) { + len += snprintf(detail + len, sizeof(detail) - len, "ep:%d-%s:%u ", i, epset.eps[i].fqdn, epset.eps[i].port); + } + } + STR_WITH_MAXSIZE_TO_VARSTR(lastError, detail, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)lastError, false); diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index cc6364c4571b7b56b096d282c4f8f29a7b624dca..345d756f4399a46b4d4abfa8db1ea74b2271b01e 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -77,12 +77,9 @@ static int32_t mndCreateDefaultUser(SMnode *pMnode, char *acct, char *user, char if (pRaw == NULL) return -1; sdbSetRawStatus(pRaw, SDB_STATUS_READY); - mDebug("user:%s, will be created while deploy sdb, raw:%p", userObj.user, pRaw); + mDebug("user:%s, will be created when deploying, raw:%p", userObj.user, pRaw); -#if 0 - return sdbWrite(pMnode->pSdb, pRaw); -#else - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_USER, NULL); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, NULL); if (pTrans == NULL) { mError("user:%s, failed to create since %s", userObj.user, terrstr()); return -1; @@ -104,7 +101,6 @@ static int32_t mndCreateDefaultUser(SMnode *pMnode, char *acct, char *user, char mndTransDrop(pTrans); return 0; -#endif } static int32_t mndCreateDefaultUsers(SMnode *pMnode) { @@ -291,7 +287,7 @@ static int32_t mndCreateUser(SMnode *pMnode, char *acct, SCreateUserReq *pCreate userObj.updateTime = userObj.createdTime; userObj.superUser = pCreate->superUser; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_USER, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) { mError("user:%s, failed to create since %s", pCreate->user, terrstr()); return -1; @@ -371,7 +367,7 @@ _OVER: } static int32_t mndAlterUser(SMnode *pMnode, SUserObj *pOld, SUserObj *pNew, SRpcMsg *pReq) { - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_ALTER_USER, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) { mError("user:%s, failed to alter since %s", pOld->user, terrstr()); return -1; @@ -578,7 +574,7 @@ _OVER: } static int32_t mndDropUser(SMnode *pMnode, SRpcMsg *pReq, SUserObj *pUser) { - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_USER, pReq); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); if (pTrans == NULL) { mError("user:%s, failed to drop since %s", pUser->user, terrstr()); return -1; diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index e05b38a7c0345293eb53caeab2eb680f6d113651..2577febf6611ffbdbeb8d4021df3292e99c7873b 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -51,9 +51,10 @@ int32_t mndInitVgroup(SMnode *pMnode) { }; mndSetMsgHandle(pMnode, TDMT_DND_CREATE_VNODE_RSP, mndProcessCreateVnodeRsp); - mndSetMsgHandle(pMnode, TDMT_VND_ALTER_VNODE_RSP, mndProcessAlterVnodeRsp); + mndSetMsgHandle(pMnode, TDMT_VND_ALTER_REPLICA_RSP, mndProcessAlterVnodeRsp); + mndSetMsgHandle(pMnode, TDMT_VND_ALTER_CONFIG_RSP, mndProcessAlterVnodeRsp); mndSetMsgHandle(pMnode, TDMT_DND_DROP_VNODE_RSP, mndProcessDropVnodeRsp); - mndSetMsgHandle(pMnode, TDMT_VND_COMPACT_VNODE_RSP, mndProcessCompactVnodeRsp); + mndSetMsgHandle(pMnode, TDMT_VND_COMPACT_RSP, mndProcessCompactVnodeRsp); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_VGROUP, mndRetrieveVgroups); mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_VGROUP, mndCancelGetNextVgroup); @@ -188,10 +189,10 @@ void mndReleaseVgroup(SMnode *pMnode, SVgObj *pVgroup) { sdbRelease(pSdb, pVgroup); } -void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen) { +void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen, + bool standby) { SCreateVnodeReq createReq = {0}; createReq.vgId = pVgroup->vgId; - createReq.dnodeId = pDnode->id; memcpy(createReq.db, pDb->name, TSDB_DB_FNAME_LEN); createReq.dbUid = pDb->uid; createReq.vgVersion = pVgroup->version; @@ -218,6 +219,9 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg createReq.hashMethod = pDb->cfg.hashMethod; createReq.numOfRetensions = pDb->cfg.numOfRetensions; createReq.pRetensions = pDb->cfg.pRetensions; + createReq.standby = standby; + createReq.isTsma = pVgroup->isTsma; + createReq.pTsma = pVgroup->pTsma; for (int32_t v = 0; v < pVgroup->replica; ++v) { SReplica *pReplica = &createReq.replicas[v]; @@ -274,7 +278,6 @@ void *mndBuildAlterVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_ alterReq.strict = pDb->cfg.strict; alterReq.cacheLastRow = pDb->cfg.cacheLastRow; alterReq.replica = pVgroup->replica; - alterReq.selfIndex = -1; for (int32_t v = 0; v < pVgroup->replica; ++v) { SReplica *pReplica = &alterReq.replicas[v]; @@ -290,13 +293,6 @@ void *mndBuildAlterVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_ mndReleaseDnode(pMnode, pVgidDnode); } -#if 0 - if (alterReq.selfIndex == -1) { - terrno = TSDB_CODE_MND_APP_ERROR; - return NULL; - } -#endif - int32_t contLen = tSerializeSAlterVnodeReq(NULL, 0, &alterReq); if (contLen < 0) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -501,7 +497,7 @@ int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups) { *ppVgroups = pVgroups; code = 0; - mInfo("db:%s, %d vgroups is alloced, replica:%d", pDb->name, pDb->cfg.numOfVgroups, pDb->cfg.replications); + mInfo("db:%s, total %d vgroups is alloced, replica:%d", pDb->name, pDb->cfg.numOfVgroups, pDb->cfg.replications); _OVER: if (code != 0) taosMemoryFree(pVgroups); @@ -536,10 +532,10 @@ int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray) { SVnodeGid *pVgid = &pVgroup->vnodeGid[maxPos]; pVgid->dnodeId = pDnode->id; - pVgid->role = TAOS_SYNC_STATE_FOLLOWER; + pVgid->role = TAOS_SYNC_STATE_ERROR; pDnode->numOfVnodes++; - mInfo("db:%s, vgId:%d, vn:%d dnode:%d is added", pVgroup->dbName, pVgroup->vgId, maxPos, pVgid->dnodeId); + mInfo("db:%s, vgId:%d, vnode_index:%d dnode:%d is added", pVgroup->dbName, pVgroup->vgId, maxPos, pVgid->dnodeId); maxPos++; if (maxPos == 3) return 0; } @@ -549,14 +545,13 @@ int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray) { } int32_t mndRemoveVnodeFromVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray, SVnodeGid *del1, SVnodeGid *del2) { - int32_t removedNum = 0; - taosArraySort(pArray, (__compar_fn_t)mndCompareDnodeVnodes); for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) { SDnodeObj *pDnode = taosArrayGet(pArray, i); mDebug("dnode:%d, equivalent vnodes:%d", pDnode->id, pDnode->numOfVnodes); } + int32_t removedNum = 0; for (int32_t d = taosArrayGetSize(pArray) - 1; d >= 0; --d) { SDnodeObj *pDnode = taosArrayGet(pArray, d); @@ -662,6 +657,7 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p int32_t numOfRows = 0; SVgObj *pVgroup = NULL; int32_t cols = 0; + int64_t curMs = taosGetTimestampMs(); SDbObj *pDb = NULL; if (strlen(pShow->db) > 0) { @@ -701,12 +697,15 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p if (i < pVgroup->replica) { colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->vnodeGid[i].dnodeId, false); + bool online = false; + SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgroup->vnodeGid[i].dnodeId); + if (pDnode != NULL) { + online = mndIsDnodeOnline(pMnode, pDnode, curMs); + mndReleaseDnode(pMnode, pDnode); + } + char buf1[20] = {0}; - SDnodeObj *pDnodeObj = mndAcquireDnode(pMnode, pVgroup->vnodeGid[i].dnodeId); - ASSERT(pDnodeObj != NULL); - bool isOffLine = !mndIsDnodeOnline(pMnode, pDnodeObj, taosGetTimestampMs()); - const char *role = isOffLine ? "OFFLINE" : syncStr(pVgroup->vnodeGid[i].role); - + const char *role = online ? syncStr(pVgroup->vnodeGid[i].role) : "offline"; STR_WITH_MAXSIZE_TO_VARSTR(buf1, role, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); diff --git a/source/dnode/mnode/impl/test/acct/CMakeLists.txt b/source/dnode/mnode/impl/test/acct/CMakeLists.txt index 40f8b0726e28446170a71bbbccde979376448fbb..d72292e34bd605ec91b16788fadd9f1ff1c68cc4 100644 --- a/source/dnode/mnode/impl/test/acct/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/acct/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME acctTest - COMMAND acctTest -) +if(NOT TD_WINDOWS) + add_test( + NAME acctTest + COMMAND acctTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/db/CMakeLists.txt b/source/dnode/mnode/impl/test/db/CMakeLists.txt index 3f6a80835ffa7b2a0a6fcdcff21e1cfd39a02c5f..e28cdd4f61824c04f62513868a9010113140fd31 100644 --- a/source/dnode/mnode/impl/test/db/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/db/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME dbTest - COMMAND dbTest -) +if(NOT TD_WINDOWS) + add_test( + NAME dbTest + COMMAND dbTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/func/CMakeLists.txt b/source/dnode/mnode/impl/test/func/CMakeLists.txt index ecb4f851be9d95a7c894d1e2ef2b3d9ce83067d3..2a8eb0a39d89275ae204e6405de2b774b4412619 100644 --- a/source/dnode/mnode/impl/test/func/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/func/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME funcTest - COMMAND funcTest -) +if(NOT TD_WINDOWS) + add_test( + NAME funcTest + COMMAND funcTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/profile/CMakeLists.txt b/source/dnode/mnode/impl/test/profile/CMakeLists.txt index 8b811ebfed3a56ab139ecfc81f3556af2f9bb032..b6586192b2b4c6e428c2f00fddb11527a1747707 100644 --- a/source/dnode/mnode/impl/test/profile/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/profile/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME profileTest - COMMAND profileTest -) +if(NOT TD_WINDOWS) + add_test( + NAME profileTest + COMMAND profileTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/sdb/sdbTest.cpp b/source/dnode/mnode/impl/test/sdb/sdbTest.cpp index df535c4456615b8b501236f2c7ad1684c2f4ac6f..43be55dd1de822d098475747a7b5b6452f379058 100644 --- a/source/dnode/mnode/impl/test/sdb/sdbTest.cpp +++ b/source/dnode/mnode/impl/test/sdb/sdbTest.cpp @@ -492,7 +492,7 @@ TEST_F(MndTestSdb, 01_Write_Str) { ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2); ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1); - ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2 ); + ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2); sdbSetApplyIndex(pSdb, -1); ASSERT_EQ(sdbGetApplyIndex(pSdb), -1); ASSERT_EQ(mnode.insertTimes, 2); @@ -895,7 +895,35 @@ TEST_F(MndTestSdb, 01_Read_Str) { ASSERT_EQ(code, TSDB_CODE_SDB_OBJ_CREATING); } + { + SSdbIter *pReader = NULL; + SSdbIter *pWritter = NULL; + void *pBuf = NULL; + int32_t len = 0; + int32_t code = 0; + + code = sdbStartRead(pSdb, &pReader); + ASSERT_EQ(code, 0); + code = sdbStartWrite(pSdb, &pWritter); + ASSERT_EQ(code, 0); + + while (sdbDoRead(pSdb, pReader, &pBuf, &len) == 0) { + if (pBuf != NULL && len != 0) { + sdbDoWrite(pSdb, pWritter, pBuf, len); + taosMemoryFree(pBuf); + } else { + break; + } + } + + sdbStopRead(pSdb, pReader); + sdbStopWrite(pSdb, pWritter, true); + } + + ASSERT_EQ(sdbGetSize(pSdb, SDB_CONSUMER), 1); + ASSERT_EQ(sdbGetTableVer(pSdb, SDB_CONSUMER), 4); + sdbCleanup(pSdb); - ASSERT_EQ(mnode.insertTimes, 5); - ASSERT_EQ(mnode.deleteTimes, 5); + ASSERT_EQ(mnode.insertTimes, 9); + ASSERT_EQ(mnode.deleteTimes, 9); } \ No newline at end of file diff --git a/source/dnode/mnode/impl/test/show/CMakeLists.txt b/source/dnode/mnode/impl/test/show/CMakeLists.txt index 69e93e7086147de77676ea02017a6ce5533acf42..9b4e21501ed478e527adfa69a5a2297e173876e1 100644 --- a/source/dnode/mnode/impl/test/show/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/show/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME showTest - COMMAND showTest -) +if(NOT TD_WINDOWS) + add_test( + NAME showTest + COMMAND showTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/sma/CMakeLists.txt b/source/dnode/mnode/impl/test/sma/CMakeLists.txt index 3f9ec123a80e88371a98fa54c99342726831372d..fd596c5021674bb9d4ec185924129b0fd3bbade8 100644 --- a/source/dnode/mnode/impl/test/sma/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/sma/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME smaTest - COMMAND smaTest -) +if(NOT TD_WINDOWS) + add_test( + NAME smaTest + COMMAND smaTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/stb/CMakeLists.txt b/source/dnode/mnode/impl/test/stb/CMakeLists.txt index d2fe3879979f4f52a215a3d44e25e912be3abb90..857c404c1c299767685fa1572a7f5a0b6463c939 100644 --- a/source/dnode/mnode/impl/test/stb/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/stb/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME stbTest - COMMAND stbTest -) +if(NOT TD_WINDOWS) + add_test( + NAME stbTest + COMMAND stbTest + ) +endif(NOT TD_WINDOWS) \ No newline at end of file diff --git a/source/dnode/mnode/impl/test/trans/trans2.cpp b/source/dnode/mnode/impl/test/trans/trans2.cpp index b78f1c7021ef44313a2a6393ecc58294921f2a18..022c82c73d66ab39f9cf07aeb34642278018722d 100644 --- a/source/dnode/mnode/impl/test/trans/trans2.cpp +++ b/source/dnode/mnode/impl/test/trans/trans2.cpp @@ -11,6 +11,8 @@ #include +#if 0 + #include "mndTrans.h" #include "mndUser.h" #include "tcache.h" @@ -103,7 +105,7 @@ class MndTestTrans2 : public ::testing::Test { void SetUp() override {} void TearDown() override {} - int32_t CreateUserLog(const char *acct, const char *user, ETrnType type, SDbObj *pDb) { + int32_t CreateUserLog(const char *acct, const char *user, ETrnConflct conflict, SDbObj *pDb) { SUserObj userObj = {0}; taosEncryptPass_c((uint8_t *)"taosdata", strlen("taosdata"), userObj.pass); tstrncpy(userObj.user, user, TSDB_USER_LEN); @@ -113,7 +115,7 @@ class MndTestTrans2 : public ::testing::Test { userObj.superUser = 1; SRpcMsg rpcMsg = {0}; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, type, &rpcMsg); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, conflict, &rpcMsg); SSdbRaw *pRedoRaw = mndUserActionEncode(&userObj); mndTransAppendRedolog(pTrans, pRedoRaw); sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY); @@ -123,10 +125,10 @@ class MndTestTrans2 : public ::testing::Test { sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED); char *param = strdup("====> test log <====="); - mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1); + mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1); if (pDb != NULL) { - mndTransSetDbInfo(pTrans, pDb); + mndTransSetDbName(pTrans, pDb->name); } int32_t code = mndTransPrepare(pMnode, pTrans); @@ -135,7 +137,7 @@ class MndTestTrans2 : public ::testing::Test { return code; } - int32_t CreateUserAction(const char *acct, const char *user, bool hasUndoAction, ETrnPolicy policy, ETrnType type, + int32_t CreateUserAction(const char *acct, const char *user, bool hasUndoAction, ETrnPolicy policy, ETrnConflct conflict, SDbObj *pDb) { SUserObj userObj = {0}; taosEncryptPass_c((uint8_t *)"taosdata", strlen("taosdata"), userObj.pass); @@ -146,7 +148,7 @@ class MndTestTrans2 : public ::testing::Test { userObj.superUser = 1; SRpcMsg rpcMsg = {0}; - STrans *pTrans = mndTransCreate(pMnode, policy, type, &rpcMsg); + STrans *pTrans = mndTransCreate(pMnode, policy, conflict, &rpcMsg); SSdbRaw *pRedoRaw = mndUserActionEncode(&userObj); mndTransAppendRedolog(pTrans, pRedoRaw); sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY); @@ -156,7 +158,7 @@ class MndTestTrans2 : public ::testing::Test { sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED); char *param = strdup("====> test action <====="); - mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1); + mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1); { STransAction action = {0}; @@ -199,7 +201,7 @@ class MndTestTrans2 : public ::testing::Test { } if (pDb != NULL) { - mndTransSetDbInfo(pTrans, pDb); + mndTransSetDbName(pTrans, pDb->name); } int32_t code = mndTransPrepare(pMnode, pTrans); @@ -218,7 +220,7 @@ class MndTestTrans2 : public ::testing::Test { userObj.superUser = 1; SRpcMsg rpcMsg = {0}; - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_USER, &rpcMsg); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, &rpcMsg); SSdbRaw *pRedoRaw = mndUserActionEncode(&userObj); mndTransAppendRedolog(pTrans, pRedoRaw); sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY); @@ -228,7 +230,7 @@ class MndTestTrans2 : public ::testing::Test { sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED); char *param = strdup("====> test log <====="); - mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1); + mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1); int32_t code = mndTransPrepare(pMnode, pTrans); mndTransDrop(pTrans); @@ -528,3 +530,5 @@ TEST_F(MndTestTrans2, 04_Conflict) { mndReleaseUser(pMnode, pUser); } } + +#endif \ No newline at end of file diff --git a/source/dnode/mnode/impl/test/user/CMakeLists.txt b/source/dnode/mnode/impl/test/user/CMakeLists.txt index b39ea0e73f728cacc648f6eb0723328e028c05f4..ed4d96461742a77fd4a2ba3d0b9cd070c2f00c43 100644 --- a/source/dnode/mnode/impl/test/user/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/user/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME userTest - COMMAND userTest -) +if(NOT TD_WINDOWS) + add_test( + NAME userTest + COMMAND userTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/sdb/inc/sdb.h b/source/dnode/mnode/sdb/inc/sdb.h index 3d9148360a08ede04e527cd4318fa233689ddf98..4a00befa1e8ec1b4ef4ff20a51a066ed08cf1883 100644 --- a/source/dnode/mnode/sdb/inc/sdb.h +++ b/source/dnode/mnode/sdb/inc/sdb.h @@ -166,9 +166,9 @@ typedef struct SSdbRow { typedef struct SSdb { SMnode *pMnode; char *currDir; - char *syncDir; char *tmpDir; int64_t lastCommitVer; + int64_t lastCommitTerm; int64_t curVer; int64_t curTerm; int64_t tableVer[SDB_MAX]; @@ -182,11 +182,13 @@ typedef struct SSdb { SdbDeployFp deployFps[SDB_MAX]; SdbEncodeFp encodeFps[SDB_MAX]; SdbDecodeFp decodeFps[SDB_MAX]; + TdThreadMutex filelock; } SSdb; typedef struct SSdbIter { TdFilePtr file; - int64_t readlen; + int64_t total; + char *name; } SSdbIter; typedef struct { @@ -299,6 +301,7 @@ void sdbRelease(SSdb *pSdb, void *pObj); * @return void* The next iterator of the table. */ void *sdbFetch(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj); +void *sdbFetchAll(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj, ESdbStatus *status) ; /** * @brief Cancel a traversal @@ -380,11 +383,17 @@ SSdbRow *sdbAllocRow(int32_t objSize); void *sdbGetRowObj(SSdbRow *pRow); void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc); -SSdbIter *sdbIterInit(SSdb *pSdb); -SSdbIter *sdbIterRead(SSdb *pSdb, SSdbIter *iter, char **ppBuf, int32_t *len); +int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter); +int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter); +int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len); + +int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter); +int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply); +int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len); const char *sdbTableName(ESdbType type); void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper); +int32_t sdbGetIdFromRaw(SSdb *pSdb, SSdbRaw *pRaw); #ifdef __cplusplus } diff --git a/source/dnode/mnode/sdb/inc/sdbInt.h b/source/dnode/mnode/sdb/inc/sdbInt.h deleted file mode 100644 index c49d6e8fb287619d9503282dd2e164ed432ce823..0000000000000000000000000000000000000000 --- a/source/dnode/mnode/sdb/inc/sdbInt.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef _TD_SDB_INT_H_ -#define _TD_SDB_INT_H_ - -#include "os.h" - -#include "sdb.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// clang-format off -#define mFatal(...) { if (mDebugFlag & DEBUG_FATAL) { taosPrintLog("MND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} -#define mError(...) { if (mDebugFlag & DEBUG_ERROR) { taosPrintLog("MND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} -#define mWarn(...) { if (mDebugFlag & DEBUG_WARN) { taosPrintLog("MND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} -#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND ", DEBUG_INFO, 255, __VA_ARGS__); }} -#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }} -#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }} -// clang-format on - -typedef struct SSdbRaw { - int8_t type; - int8_t status; - int8_t sver; - int8_t reserved; - int32_t dataLen; - char pData[]; -} SSdbRaw; - -typedef struct SSdbRow { - ESdbType type; - ESdbStatus status; - int32_t refCount; - char pObj[]; -} SSdbRow; - -const char *sdbTableName(ESdbType type); -void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper); - -void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc); - -#ifdef __cplusplus -} -#endif - -#endif /*_TD_SDB_INT_H_*/ diff --git a/source/dnode/mnode/sdb/src/sdb.c b/source/dnode/mnode/sdb/src/sdb.c index d289e30d7b4c68e85a5bc48048b52536f8e150e9..0526ea5c2d65cee2b57d6312b92b90830bad0b8b 100644 --- a/source/dnode/mnode/sdb/src/sdb.c +++ b/source/dnode/mnode/sdb/src/sdb.c @@ -55,7 +55,9 @@ SSdb *sdbInit(SSdbOpt *pOption) { pSdb->curVer = -1; pSdb->curTerm = -1; pSdb->lastCommitVer = -1; + pSdb->lastCommitTerm = -1; pSdb->pMnode = pOption->pMnode; + taosThreadMutexInit(&pSdb->filelock, NULL); mDebug("sdb init successfully"); return pSdb; } @@ -69,11 +71,8 @@ void sdbCleanup(SSdb *pSdb) { taosMemoryFreeClear(pSdb->currDir); } - if (pSdb->syncDir != NULL) { - taosMemoryFreeClear(pSdb->syncDir); - } - if (pSdb->tmpDir != NULL) { + taosRemoveDir(pSdb->tmpDir); taosMemoryFreeClear(pSdb->tmpDir); } @@ -104,6 +103,7 @@ void sdbCleanup(SSdb *pSdb) { mDebug("sdb table:%s is cleaned up", sdbTableName(i)); } + taosThreadMutexDestroy(&pSdb->filelock); taosMemoryFree(pSdb); mDebug("sdb is cleaned up"); } diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c index 25cda199568592ba809e76c92e32107a30a163da..83135491a993e5f8106ed05409255951342c0ac7 100644 --- a/source/dnode/mnode/sdb/src/sdbFile.c +++ b/source/dnode/mnode/sdb/src/sdbFile.c @@ -22,13 +22,14 @@ #define SDB_RESERVE_SIZE 512 #define SDB_FILE_VER 1 -static int32_t sdbRunDeployFp(SSdb *pSdb) { +static int32_t sdbDeployData(SSdb *pSdb) { mDebug("start to deploy sdb"); for (int32_t i = SDB_MAX - 1; i >= 0; --i) { SdbDeployFp fp = pSdb->deployFps[i]; if (fp == NULL) continue; + mDebug("start to deploy sdb:%s", sdbTableName(i)); if ((*fp)(pSdb->pMnode) != 0) { mError("failed to deploy sdb:%s since %s", sdbTableName(i), terrstr()); return -1; @@ -39,6 +40,40 @@ static int32_t sdbRunDeployFp(SSdb *pSdb) { return 0; } +static void sdbResetData(SSdb *pSdb) { + mDebug("start to reset sdb"); + + for (ESdbType i = 0; i < SDB_MAX; ++i) { + SHashObj *hash = pSdb->hashObjs[i]; + if (hash == NULL) continue; + + SSdbRow **ppRow = taosHashIterate(hash, NULL); + while (ppRow != NULL) { + SSdbRow *pRow = *ppRow; + if (pRow == NULL) continue; + + sdbFreeRow(pSdb, pRow, true); + ppRow = taosHashIterate(hash, ppRow); + } + } + + for (ESdbType i = 0; i < SDB_MAX; ++i) { + SHashObj *hash = pSdb->hashObjs[i]; + if (hash == NULL) continue; + + taosHashClear(pSdb->hashObjs[i]); + pSdb->tableVer[i] = 0; + pSdb->maxId[i] = 0; + mDebug("sdb:%s is reset", sdbTableName(i)); + } + + pSdb->curVer = -1; + pSdb->curTerm = -1; + pSdb->lastCommitVer = -1; + pSdb->lastCommitTerm = -1; + mDebug("sdb reset successfully"); +} + static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) { int64_t sver = 0; int32_t ret = taosReadFile(pFile, &sver, sizeof(int64_t)); @@ -169,35 +204,33 @@ static int32_t sdbWriteFileHead(SSdb *pSdb, TdFilePtr pFile) { return 0; } -int32_t sdbReadFile(SSdb *pSdb) { +static int32_t sdbReadFileImp(SSdb *pSdb) { int64_t offset = 0; int32_t code = 0; int32_t readLen = 0; int64_t ret = 0; + char file[PATH_MAX] = {0}; + + snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); + mDebug("start to read sdb file:%s", file); SSdbRaw *pRaw = taosMemoryMalloc(WAL_MAX_SIZE + 100); if (pRaw == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - mError("failed read file since %s", terrstr()); + mError("failed read sdb file since %s", terrstr()); return -1; } - char file[PATH_MAX] = {0}; - snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); - mDebug("start to read file:%s", file); - TdFilePtr pFile = taosOpenFile(file, TD_FILE_READ); if (pFile == NULL) { taosMemoryFree(pRaw); terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to read file:%s since %s", file, terrstr()); + mError("failed to read sdb file:%s since %s", file, terrstr()); return 0; } if (sdbReadFileHead(pSdb, pFile) != 0) { - mError("failed to read file:%s head since %s", file, terrstr()); - pSdb->curVer = -1; - pSdb->curTerm = -1; + mError("failed to read sdb file:%s head since %s", file, terrstr()); taosMemoryFree(pRaw); taosCloseFile(&pFile); return -1; @@ -213,13 +246,13 @@ int32_t sdbReadFile(SSdb *pSdb) { if (ret < 0) { code = TAOS_SYSTEM_ERROR(errno); - mError("failed to read file:%s since %s", file, tstrerror(code)); + mError("failed to read sdb file:%s since %s", file, tstrerror(code)); break; } if (ret != readLen) { code = TSDB_CODE_FILE_CORRUPTED; - mError("failed to read file:%s since %s", file, tstrerror(code)); + mError("failed to read sdb file:%s since %s", file, tstrerror(code)); break; } @@ -227,34 +260,36 @@ int32_t sdbReadFile(SSdb *pSdb) { ret = taosReadFile(pFile, pRaw->pData, readLen); if (ret < 0) { code = TAOS_SYSTEM_ERROR(errno); - mError("failed to read file:%s since %s", file, tstrerror(code)); + mError("failed to read sdb file:%s since %s", file, tstrerror(code)); break; } if (ret != readLen) { code = TSDB_CODE_FILE_CORRUPTED; - mError("failed to read file:%s since %s", file, tstrerror(code)); + mError("failed to read sdb file:%s since %s", file, tstrerror(code)); break; } int32_t totalLen = sizeof(SSdbRaw) + pRaw->dataLen + sizeof(int32_t); if ((!taosCheckChecksumWhole((const uint8_t *)pRaw, totalLen)) != 0) { code = TSDB_CODE_CHECKSUM_ERROR; - mError("failed to read file:%s since %s", file, tstrerror(code)); + mError("failed to read sdb file:%s since %s", file, tstrerror(code)); break; } code = sdbWriteWithoutFree(pSdb, pRaw); if (code != 0) { - mError("failed to read file:%s since %s", file, terrstr()); + mError("failed to read sdb file:%s since %s", file, terrstr()); goto _OVER; } } code = 0; pSdb->lastCommitVer = pSdb->curVer; + pSdb->lastCommitTerm = pSdb->curTerm; memcpy(pSdb->tableVer, tableVer, sizeof(tableVer)); - mDebug("read file:%s successfully, ver:%" PRId64, file, pSdb->lastCommitVer); + mDebug("read sdb file:%s successfully, ver:%" PRId64 " term:%" PRId64, file, pSdb->lastCommitVer, + pSdb->lastCommitTerm); _OVER: taosCloseFile(&pFile); @@ -264,6 +299,20 @@ _OVER: return code; } +int32_t sdbReadFile(SSdb *pSdb) { + taosThreadMutexLock(&pSdb->filelock); + + sdbResetData(pSdb); + int32_t code = sdbReadFileImp(pSdb); + if (code != 0) { + mError("failed to read sdb file since %s", terrstr()); + sdbResetData(pSdb); + } + + taosThreadMutexUnlock(&pSdb->filelock); + return code; +} + static int32_t sdbWriteFileImp(SSdb *pSdb) { int32_t code = 0; @@ -272,18 +321,19 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) { char curfile[PATH_MAX] = {0}; snprintf(curfile, sizeof(curfile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); - mDebug("start to write file:%s, current ver:%" PRId64 " term:%" PRId64 ", commit ver:%" PRId64, curfile, pSdb->curVer, - pSdb->curTerm, pSdb->lastCommitVer); + mDebug("start to write sdb file, current ver:%" PRId64 " term:%" PRId64 ", commit ver:%" PRId64 " term:%" PRId64 + " file:%s", + pSdb->curVer, pSdb->curTerm, pSdb->lastCommitVer, pSdb->lastCommitTerm, curfile); TdFilePtr pFile = taosOpenFile(tmpfile, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); if (pFile == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to open file:%s for write since %s", tmpfile, terrstr()); + mError("failed to open sdb file:%s for write since %s", tmpfile, terrstr()); return -1; } if (sdbWriteFileHead(pSdb, pFile) != 0) { - mError("failed to write file:%s head since %s", tmpfile, terrstr()); + mError("failed to write sdb file:%s head since %s", tmpfile, terrstr()); taosCloseFile(&pFile); return -1; } @@ -292,7 +342,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) { SdbEncodeFp encodeFp = pSdb->encodeFps[i]; if (encodeFp == NULL) continue; - mTrace("write %s to file, total %d rows", sdbTableName(i), sdbGetSize(pSdb, i)); + mTrace("write %s to sdb file, total %d rows", sdbTableName(i), sdbGetSize(pSdb, i)); SHashObj *hash = pSdb->hashObjs[i]; TdThreadRwlock *pLock = &pSdb->locks[i]; @@ -348,7 +398,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) { code = taosFsyncFile(pFile); if (code != 0) { code = TAOS_SYSTEM_ERROR(errno); - mError("failed to sync file:%s since %s", tmpfile, tstrerror(code)); + mError("failed to sync sdb file:%s since %s", tmpfile, tstrerror(code)); } } @@ -358,15 +408,17 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) { code = taosRenameFile(tmpfile, curfile); if (code != 0) { code = TAOS_SYSTEM_ERROR(errno); - mError("failed to write file:%s since %s", curfile, tstrerror(code)); + mError("failed to write sdb file:%s since %s", curfile, tstrerror(code)); } } if (code != 0) { - mError("failed to write file:%s since %s", curfile, tstrerror(code)); + mError("failed to write sdb file:%s since %s", curfile, tstrerror(code)); } else { pSdb->lastCommitVer = pSdb->curVer; - mDebug("write file:%s successfully, ver:%" PRId64 " term:%" PRId64, curfile, pSdb->lastCommitVer, pSdb->curTerm); + pSdb->lastCommitTerm = pSdb->curTerm; + mDebug("write sdb file successfully, ver:%" PRId64 " term:%" PRId64 " file:%s", pSdb->lastCommitVer, + pSdb->lastCommitTerm, curfile); } terrno = code; @@ -378,80 +430,187 @@ int32_t sdbWriteFile(SSdb *pSdb) { return 0; } - return sdbWriteFileImp(pSdb); + taosThreadMutexLock(&pSdb->filelock); + int32_t code = sdbWriteFileImp(pSdb); + if (code != 0) { + mError("failed to write sdb file since %s", terrstr()); + } + taosThreadMutexUnlock(&pSdb->filelock); + return code; } int32_t sdbDeploy(SSdb *pSdb) { - if (sdbRunDeployFp(pSdb) != 0) { + if (sdbDeployData(pSdb) != 0) { return -1; } - if (sdbWriteFileImp(pSdb) != 0) { + if (sdbWriteFile(pSdb) != 0) { return -1; } return 0; } -SSdbIter *sdbIterInit(SSdb *pSdb) { - char datafile[PATH_MAX] = {0}; - char tmpfile[PATH_MAX] = {0}; - snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); - snprintf(tmpfile, sizeof(datafile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP); - - if (taosCopyFile(datafile, tmpfile) != 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to copy file %s to %s since %s", datafile, tmpfile, terrstr()); - return NULL; - } - +static SSdbIter *sdbCreateIter(SSdb *pSdb) { SSdbIter *pIter = taosMemoryCalloc(1, sizeof(SSdbIter)); if (pIter == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - pIter->file = taosOpenFile(tmpfile, TD_FILE_READ); - if (pIter->file == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to read snapshot file:%s since %s", tmpfile, terrstr()); + char name[PATH_MAX + 100] = {0}; + snprintf(name, sizeof(name), "%s%ssdb.data.%" PRIu64, pSdb->tmpDir, TD_DIRSEP, (uint64_t)pIter); + pIter->name = strdup(name); + if (pIter->name == NULL) { taosMemoryFree(pIter); + terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - mDebug("start to read snapshot file:%s, iter:%p", tmpfile, pIter); return pIter; } -SSdbIter *sdbIterRead(SSdb *pSdb, SSdbIter *pIter, char **ppBuf, int32_t *buflen) { - const int32_t maxlen = 100; +static void sdbCloseIter(SSdbIter *pIter) { + if (pIter == NULL) return; + + if (pIter->file != NULL) { + taosCloseFile(&pIter->file); + pIter->file = NULL; + } + + if (pIter->name != NULL) { + taosRemoveFile(pIter->name); + taosMemoryFree(pIter->name); + pIter->name = NULL; + } + + mInfo("sdbiter:%p, is closed, total:%" PRId64, pIter, pIter->total); + taosMemoryFree(pIter); +} + +int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) { + SSdbIter *pIter = sdbCreateIter(pSdb); + if (pIter == NULL) return -1; + + char datafile[PATH_MAX] = {0}; + snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); + + taosThreadMutexLock(&pSdb->filelock); + if (taosCopyFile(datafile, pIter->name) < 0) { + taosThreadMutexUnlock(&pSdb->filelock); + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to copy sdb file %s to %s since %s", datafile, pIter->name, terrstr()); + sdbCloseIter(pIter); + return -1; + } + taosThreadMutexUnlock(&pSdb->filelock); - char *pBuf = taosMemoryCalloc(1, maxlen); + pIter->file = taosOpenFile(pIter->name, TD_FILE_READ); + if (pIter->file == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to open sdb file:%s since %s", pIter->name, terrstr()); + sdbCloseIter(pIter); + return -1; + } + + *ppIter = pIter; + mInfo("sdbiter:%p, is created to read snapshot, file:%s", pIter, pIter->name); + return 0; +} + +int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter) { + sdbCloseIter(pIter); + return 0; +} + +int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len) { + int32_t maxlen = 100; + void *pBuf = taosMemoryCalloc(1, maxlen); if (pBuf == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; + return -1; } int32_t readlen = taosReadFile(pIter->file, pBuf, maxlen); - if (readlen == 0) { - mTrace("read snapshot to the end, readlen:%" PRId64, pIter->readlen); - taosMemoryFree(pBuf); - taosCloseFile(&pIter->file); - taosMemoryFree(pIter); - pIter = NULL; - } else if (readlen < 0) { + if (readlen < 0 || readlen > maxlen) { terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to read snapshot since %s, readlen:%" PRId64, terrstr(), pIter->readlen); + mError("sdbiter:%p, failed to read snapshot since %s, total:%" PRId64, pIter, terrstr(), pIter->total); + *ppBuf = NULL; + *len = 0; taosMemoryFree(pBuf); - taosCloseFile(&pIter->file); - taosMemoryFree(pIter); - pIter = NULL; - } else { - pIter->readlen += readlen; - mTrace("read snapshot, readlen:%" PRId64, pIter->readlen); + return -1; + } else if (readlen == 0) { + mInfo("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total); + *ppBuf = NULL; + *len = 0; + taosMemoryFree(pBuf); + return 0; + } else { // (readlen <= maxlen) + pIter->total += readlen; + mInfo("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total); *ppBuf = pBuf; - *buflen = readlen; + *len = readlen; + return 0; } +} - return pIter; +int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter) { + SSdbIter *pIter = sdbCreateIter(pSdb); + if (pIter == NULL) return -1; + + pIter->file = taosOpenFile(pIter->name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); + if (pIter->file == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to open %s since %s", pIter->name, terrstr()); + return -1; + } + + *ppIter = pIter; + mInfo("sdbiter:%p, is created to write snapshot, file:%s", pIter, pIter->name); + return 0; +} + +int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply) { + int32_t code = 0; + + if (!isApply) { + sdbCloseIter(pIter); + mInfo("sdbiter:%p, not apply to sdb", pIter); + return 0; + } + + taosFsyncFile(pIter->file); + taosCloseFile(&pIter->file); + pIter->file = NULL; + + char datafile[PATH_MAX] = {0}; + snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); + if (taosRenameFile(pIter->name, datafile) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("sdbiter:%p, failed to rename file %s to %s since %s", pIter, pIter->name, datafile, terrstr()); + sdbCloseIter(pIter); + return -1; + } + + sdbCloseIter(pIter); + if (sdbReadFile(pSdb) != 0) { + mError("sdbiter:%p, failed to read from %s since %s", pIter, datafile, terrstr()); + return -1; + } + + mInfo("sdbiter:%p, successfully applyed to sdb", pIter); + return 0; } + +int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len) { + int32_t writelen = taosWriteFile(pIter->file, pBuf, len); + if (writelen != len) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to write len:%d since %s, total:%" PRId64, len, terrstr(), pIter->total); + return -1; + } + + pIter->total += writelen; + mInfo("sdbiter:%p, write:%d bytes to snapshot, total:%" PRId64, pIter, writelen, pIter->total); + return 0; +} \ No newline at end of file diff --git a/source/dnode/mnode/sdb/src/sdbHash.c b/source/dnode/mnode/sdb/src/sdbHash.c index abf35b71a91ea368b6d1bbc8e0927be59642ce6d..162da2bd0aaa3e2400f14cefa0596b5022e7afbe 100644 --- a/source/dnode/mnode/sdb/src/sdbHash.c +++ b/source/dnode/mnode/sdb/src/sdbHash.c @@ -368,6 +368,34 @@ void *sdbFetch(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj) { return ppRow; } +void *sdbFetchAll(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj, ESdbStatus *status) { + *ppObj = NULL; + + SHashObj *hash = sdbGetHash(pSdb, type); + if (hash == NULL) return NULL; + + TdThreadRwlock *pLock = &pSdb->locks[type]; + taosThreadRwlockRdlock(pLock); + + SSdbRow **ppRow = taosHashIterate(hash, pIter); + while (ppRow != NULL) { + SSdbRow *pRow = *ppRow; + if (pRow == NULL) { + ppRow = taosHashIterate(hash, ppRow); + continue; + } + + atomic_add_fetch_32(&pRow->refCount, 1); + sdbPrintOper(pSdb, pRow, "fetch"); + *ppObj = pRow->pObj; + *status = pRow->status; + break; + } + taosThreadRwlockUnlock(pLock); + + return ppRow; +} + void sdbCancelFetch(SSdb *pSdb, void *pIter) { if (pIter == NULL) return; SSdbRow *pRow = *(SSdbRow **)pIter; diff --git a/source/dnode/mnode/sdb/src/sdbRaw.c b/source/dnode/mnode/sdb/src/sdbRaw.c index ba3b00c12dab08825d0060657f503f6daaa17936..90643a54a9de42d4f505fdcb4f1d25ef95b80ac7 100644 --- a/source/dnode/mnode/sdb/src/sdbRaw.c +++ b/source/dnode/mnode/sdb/src/sdbRaw.c @@ -16,6 +16,16 @@ #define _DEFAULT_SOURCE #include "sdb.h" +int32_t sdbGetIdFromRaw(SSdb *pSdb, SSdbRaw *pRaw) { + EKeyType keytype = pSdb->keyTypes[pRaw->type]; + if (keytype == SDB_KEY_INT32) { + int32_t id = *((int32_t *)(pRaw->pData)); + return id; + } else { + return -2; + } +} + SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen) { SSdbRaw *pRaw = taosMemoryCalloc(1, dataLen + sizeof(SSdbRaw)); if (pRaw == NULL) { diff --git a/source/dnode/qnode/src/qnode.c b/source/dnode/qnode/src/qnode.c index 929643fcdf91ef7ba0d6a02b8a07de34f0209d54..438982ac6ae2ca13f2244acd978bdc58c723d6de 100644 --- a/source/dnode/qnode/src/qnode.c +++ b/source/dnode/qnode/src/qnode.c @@ -40,46 +40,58 @@ void qndClose(SQnode *pQnode) { taosMemoryFree(pQnode); } -int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { return 0; } +int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { + SReadHandle handle = {.pMsgCb = &pQnode->msgCb}; + SQWorkerStat stat = {0}; + + int32_t code = qWorkerGetStat(&handle, pQnode->pQuery, &stat); + if (code) { + return code; + } + + pLoad->numOfQueryInQueue = stat.numOfQueryInQueue; + pLoad->numOfFetchInQueue = stat.numOfFetchInQueue; + pLoad->timeInQueryQueue = stat.timeInQueryQueue; + pLoad->timeInFetchQueue = stat.timeInFetchQueue; + pLoad->cacheDataSize = stat.cacheDataSize; + pLoad->numOfProcessedQuery = stat.queryProcessed; + pLoad->numOfProcessedCQuery = stat.cqueryProcessed; + pLoad->numOfProcessedFetch = stat.fetchProcessed; + pLoad->numOfProcessedDrop = stat.dropProcessed; + pLoad->numOfProcessedHb = stat.hbProcessed; + + return 0; +} -int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg) { +int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg) { int32_t code = -1; SReadHandle handle = {.pMsgCb = &pQnode->msgCb}; qTrace("message in qnode queue is processing"); switch (pMsg->msgType) { case TDMT_VND_QUERY: - code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg); + code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_QUERY_CONTINUE: - code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg); + code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_FETCH: - code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_FETCH_RSP: - code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg); - break; - case TDMT_VND_RES_READY: - code = qWorkerProcessReadyMsg(pQnode, pQnode->pQuery, pMsg); - break; - case TDMT_VND_TASKS_STATUS: - code = qWorkerProcessStatusMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_CANCEL_TASK: - code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_DROP_TASK: - code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg, ts); break; - case TDMT_VND_TABLE_META: - // code = vnodeGetTableMeta(pQnode, pMsg); - // break; case TDMT_VND_CONSUME: // code = tqProcessConsumeReq(pQnode->pTq, pMsg); // break; case TDMT_VND_QUERY_HEARTBEAT: - code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg, ts); break; default: qError("unknown msg type:%d in qnode queue", pMsg->msgType); diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index d988f97188b9330e1229368554b0f75a5713025b..f3aefdba029484aaa155848e364de32743efaa3c 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -32,12 +32,10 @@ target_sources( "src/sma/smaEnv.c" "src/sma/smaOpen.c" "src/sma/smaRollup.c" - "src/sma/smaTimeRange.c" + "src/sma/smaTimeRange2.c" # tsdb - # "src/tsdb/tsdbTDBImpl.c" "src/tsdb/tsdbCommit.c" - "src/tsdb/tsdbCommit2.c" "src/tsdb/tsdbFile.c" "src/tsdb/tsdbFS.c" "src/tsdb/tsdbOpen.c" @@ -45,16 +43,18 @@ target_sources( "src/tsdb/tsdbMemTable2.c" "src/tsdb/tsdbRead.c" "src/tsdb/tsdbReadImpl.c" - # "src/tsdb/tsdbSma.c" "src/tsdb/tsdbWrite.c" "src/tsdb/tsdbSnapshot.c" # tq "src/tq/tq.c" - "src/tq/tqCommit.c" + "src/tq/tqExec.c" + "src/tq/tqMeta.c" + "src/tq/tqRead.c" "src/tq/tqOffset.c" "src/tq/tqPush.c" - "src/tq/tqRead.c" + "src/tq/tqSink.c" + "src/tq/tqCommit.c" ) target_include_directories( vnode diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 2b713ff980beb6c5ea7aff502b0128f422256d58..35b2b935467507e7b5662a1493fec2c38805abc2 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -68,6 +68,7 @@ void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId); int32_t vnodeSnapshotReaderOpen(SVnode *pVnode, SVSnapshotReader **ppReader, int64_t sver, int64_t ever); int32_t vnodeSnapshotReaderClose(SVSnapshotReader *pReader); int32_t vnodeSnapshotRead(SVSnapshotReader *pReader, const void **ppData, uint32_t *nData); +int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen); // meta typedef struct SMeta SMeta; // todo: remove @@ -78,7 +79,19 @@ void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags); void metaReaderClear(SMetaReader *pReader); int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid); int32_t metaReadNext(SMetaReader *pReader); -const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t cid); +const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *tagVal); + +typedef struct SMetaFltParam { + tb_uid_t suid; + int16_t cid; + int16_t type; + char *val; + bool reverse; + int (*filterFunc)(void *a, void *b, int16_t type); + +} SMetaFltParam; + +int32_t metaFilteTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *results); #if 1 // refact APIs below (TODO) typedef SVCreateTbReq STbCfg; @@ -105,13 +118,16 @@ tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STab void *pMemRef); int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo); bool isTsdbCacheLastRow(tsdbReaderT *pReader); -int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list); +int32_t tsdbGetAllTableList(SMeta *pMeta, uint64_t uid, SArray *list); +int32_t tsdbGetCtbIdList(SMeta *pMeta, int64_t suid, SArray *list); +void *tsdbGetIdx(SMeta *pMeta); int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT *pHandle); -bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle); -void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo); + +bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle); +void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo); int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT *pTsdbReadHandle, SColumnDataAgg ***pBlockStatis, bool *allHave); SArray *tsdbRetrieveDataBlock(tsdbReaderT *pTsdbReadHandle, SArray *pColumnIdList); -void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond); +void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond, int32_t tWinIdx); void tsdbCleanupReadHandle(tsdbReaderT queryHandle); // tq @@ -131,6 +147,9 @@ bool tqNextDataBlockFilterOut(STqReadHandle *pHandle, SHashObj *filterOutUids int32_t tqRetrieveDataBlock(SArray **ppCols, STqReadHandle *pHandle, uint64_t *pGroupId, uint64_t *pUid, int32_t *pNumOfRows, int16_t *pNumOfCols); +// sma +int32_t smaGetTSmaDays(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days); + // need to reposition // structs @@ -157,12 +176,15 @@ struct SVnodeCfg { uint64_t szBuf; bool isHeap; bool isWeak; + int8_t isTsma; + int8_t isRsma; + int8_t hashMethod; + int8_t standby; STsdbCfg tsdbCfg; SWalCfg walCfg; SSyncCfg syncCfg; uint32_t hashBegin; uint32_t hashEnd; - int8_t hashMethod; }; typedef struct { diff --git a/source/dnode/vnode/src/inc/meta.h b/source/dnode/vnode/src/inc/meta.h index 3340bbb91ce0f8ed29b2ef48fc325472676b56e1..b610676c19db7b9cdb9528b3d8044e883d811780 100644 --- a/source/dnode/vnode/src/inc/meta.h +++ b/source/dnode/vnode/src/inc/meta.h @@ -16,8 +16,8 @@ #ifndef _TD_VNODE_META_H_ #define _TD_VNODE_META_H_ -#include "vnodeInt.h" #include "index.h" +#include "vnodeInt.h" #ifdef __cplusplus extern "C" { @@ -28,12 +28,12 @@ typedef struct SMetaDB SMetaDB; // metaDebug ================== // clang-format off -#define metaFatal(...) do { if (metaDebugFlag & DEBUG_FATAL) { taosPrintLog("META FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0) -#define metaError(...) do { if (metaDebugFlag & DEBUG_ERROR) { taosPrintLog("META ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0) -#define metaWarn(...) do { if (metaDebugFlag & DEBUG_WARN) { taosPrintLog("META WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0) -#define metaInfo(...) do { if (metaDebugFlag & DEBUG_INFO) { taosPrintLog("META ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0) -#define metaDebug(...) do { if (metaDebugFlag & DEBUG_DEBUG) { taosPrintLog("META ", DEBUG_DEBUG, metaDebugFlag, __VA_ARGS__); }} while(0) -#define metaTrace(...) do { if (metaDebugFlag & DEBUG_TRACE) { taosPrintLog("META ", DEBUG_TRACE, metaDebugFlag, __VA_ARGS__); }} while(0) +#define metaFatal(...) do { if (metaDebugFlag & DEBUG_FATAL) { taosPrintLog("MTA FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0) +#define metaError(...) do { if (metaDebugFlag & DEBUG_ERROR) { taosPrintLog("MTA ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0) +#define metaWarn(...) do { if (metaDebugFlag & DEBUG_WARN) { taosPrintLog("MTA WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0) +#define metaInfo(...) do { if (metaDebugFlag & DEBUG_INFO) { taosPrintLog("MTA ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0) +#define metaDebug(...) do { if (metaDebugFlag & DEBUG_DEBUG) { taosPrintLog("MTA ", DEBUG_DEBUG, metaDebugFlag, __VA_ARGS__); }} while(0) +#define metaTrace(...) do { if (metaDebugFlag & DEBUG_TRACE) { taosPrintLog("MTA ", DEBUG_TRACE, metaDebugFlag, __VA_ARGS__); }} while(0) // clang-format on // metaOpen ================== @@ -45,8 +45,6 @@ int32_t metaULock(SMeta* pMeta); int metaEncodeEntry(SEncoder* pCoder, const SMetaEntry* pME); int metaDecodeEntry(SDecoder* pCoder, SMetaEntry* pME); -// metaTable ================== - // metaQuery ================== int metaGetTableEntryByVersion(SMetaReader* pReader, int64_t version, tb_uid_t uid); @@ -118,6 +116,10 @@ typedef struct { int64_t smaUid; } SSmaIdxKey; +// metaTable ================== +int metaCreateTagIdxKey(tb_uid_t suid, int32_t cid, const void* pTagData, int32_t nTagData, int8_t type, tb_uid_t uid, + STagIdxKey** ppTagIdxKey, int32_t* nTagIdxKey); + #ifndef META_REFACT // SMetaDB int metaOpenDB(SMeta* pMeta); diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h index 0601df61e71317aed596d6f200cb8314156430f5..4ca62f1de9fbe9183d74e9df1dfeca8fbde2e0fb 100644 --- a/source/dnode/vnode/src/inc/sma.h +++ b/source/dnode/vnode/src/inc/sma.h @@ -219,12 +219,14 @@ static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SDisk void *tdFreeRSmaInfo(SRSmaInfo *pInfo); int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg); -int32_t tdUpdateExpiredWindowImpl(SSma *pSma, SSubmitReq *pMsg, int64_t version); +int32_t tdUpdateExpiredWindowImpl(SSma *pSma, const SSubmitReq *pMsg, int64_t version); // TODO: This is the basic params, and should wrap the params to a queryHandle. int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult); +int32_t tdGetTSmaDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days); + #ifdef __cplusplus } #endif -#endif /*_TD_VNODE_SMA_H_*/ \ No newline at end of file +#endif /*_TD_VNODE_SMA_H_*/ diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 06ff6329e0b3ddc69cc50ec1becc9541e3939ca5..7cd82b0ac37446fa413715150210dbf951485006 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -44,56 +44,93 @@ extern "C" { typedef struct STqOffsetCfg STqOffsetCfg; typedef struct STqOffsetStore STqOffsetStore; +// tqRead + struct STqReadHandle { int64_t ver; - SHashObj* tbIdHash; const SSubmitReq* pMsg; SSubmitBlk* pBlock; SSubmitMsgIter msgIter; SSubmitBlkIter blkIter; - SMeta* pVnodeMeta; - SArray* pColIdList; // SArray - int32_t sver; - int64_t cachedSchemaUid; - SSchemaWrapper* pSchemaWrapper; - STSchema* pSchema; + + SMeta* pVnodeMeta; + SHashObj* tbIdHash; + SArray* pColIdList; // SArray + + int32_t cachedSchemaVer; + int64_t cachedSchemaUid; + SSchemaWrapper* pSchemaWrapper; + STSchema* pSchema; }; +// tqPush + typedef struct { - int64_t consumerId; - int32_t epoch; - int32_t skipLogNum; - int64_t reqOffset; + // msg info + int64_t consumerId; + int64_t reqOffset; + int64_t processedVer; + int32_t epoch; + int32_t skipLogNum; + // rpc info + int64_t reqId; + SRpcHandleInfo rpcInfo; + tmr_h timerId; + int8_t tmrStopped; + // exec + int8_t inputStatus; + int8_t execStatus; + SStreamQ inputQ; SRWLatch lock; - SRpcMsg* handle; } STqPushHandle; +// tqExec + typedef struct { - char subKey[TSDB_SUBSCRIBE_KEY_LEN]; - int64_t consumerId; - int32_t epoch; - int8_t subType; - int8_t withTbName; - int8_t withSchema; - int8_t withTag; - char* qmsg; - SHashObj* pDropTbUid; - STqPushHandle pushHandle; - // SRWLatch lock; - SWalReadHandle* pWalReader; - // task number should be the same with fetch thread + char* qmsg; + qTaskInfo_t task[5]; +} STqExecCol; + +typedef struct { + int64_t suid; +} STqExecTb; + +typedef struct { + SHashObj* pFilterOutTbUid; +} STqExecDb; + +typedef struct { + int8_t subType; + STqReadHandle* pExecReader[5]; - qTaskInfo_t task[5]; -} STqExec; + union { + STqExecCol execCol; + STqExecTb execTb; + STqExecDb execDb; + } exec; +} STqExecHandle; + +typedef struct { + // info + char subKey[TSDB_SUBSCRIBE_KEY_LEN]; + int64_t consumerId; + int32_t epoch; + + // reader + SWalReadHandle* pWalReader; -int32_t tEncodeSTqExec(SEncoder* pEncoder, const STqExec* pExec); -int32_t tDecodeSTqExec(SDecoder* pDecoder, STqExec* pExec); + // push + STqPushHandle pushHandle; + + // exec + STqExecHandle execHandle; +} STqHandle; struct STQ { char* path; - SHashObj* pushMgr; // consumerId -> STqExec* - SHashObj* execs; // subKey -> STqExec - SHashObj* pStreamTasks; + SHashObj* pushMgr; // consumerId -> STqHandle* + SHashObj* handles; // subKey -> STqHandle + SHashObj* pStreamTasks; // taksId -> SStreamTask SVnode* pVnode; SWal* pWal; TDB* pMetaStore; @@ -107,18 +144,29 @@ typedef struct { static STqMgmt tqMgmt = {0}; -// init once -int tqInit(); -void tqCleanUp(); +// tqRead +int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead** pHeadWithCkSum); -// tqOffset -STqOffsetStore* STqOffsetOpen(STqOffsetCfg*); -void STqOffsetClose(STqOffsetStore*); +// tqExec +int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkRsp* pRsp, int32_t workerId); +int32_t tqSendPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataBlkRsp* pRsp); -int64_t tqOffsetFetch(STqOffsetStore* pStore, const char* subscribeKey); -int32_t tqOffsetCommit(STqOffsetStore* pStore, const char* subscribeKey, int64_t offset); -int32_t tqOffsetPersist(STqOffsetStore* pStore, const char* subscribeKey); -int32_t tqOffsetPersistAll(STqOffsetStore* pStore); +// tqMeta +int32_t tqMetaOpen(STQ* pTq); +int32_t tqMetaClose(STQ* pTq); +int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle); +int32_t tqMetaDeleteHandle(STQ* pTq, const char* key); + +// tqSink +void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data); + +// tqOffset +STqOffsetStore* tqOffsetOpen(STqOffsetCfg*); +void tqOffsetClose(STqOffsetStore*); +int64_t tqOffsetFetch(STqOffsetStore* pStore, const char* subscribeKey); +int32_t tqOffsetCommit(STqOffsetStore* pStore, const char* subscribeKey, int64_t offset); +int32_t tqOffsetPersist(STqOffsetStore* pStore, const char* subscribeKey); +int32_t tqOffsetPersistAll(STqOffsetStore* pStore); #ifdef __cplusplus } diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 6d3d23cc208092d779c0067348de8279db337f75..a62b4c4409ae0a4561c5150e4d6bd669698e666c 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -32,14 +32,27 @@ extern "C" { #define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TSDB ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0) // clang-format on +typedef struct TSDBROW TSDBROW; +typedef struct TSDBKEY TSDBKEY; +typedef struct SDelOp SDelOp; + +static int tsdbKeyCmprFn(const void *p1, const void *p2); + +// tsdbMemTable2.c ============================================================================================== +typedef struct SMemTable SMemTable; + +int32_t tsdbMemTableCreate2(STsdb *pTsdb, SMemTable **ppMemTable); +void tsdbMemTableDestroy2(SMemTable *pMemTable); + // tsdbMemTable ================ +typedef struct STsdbRow STsdbRow; typedef struct STbData STbData; typedef struct STsdbMemTable STsdbMemTable; typedef struct SMergeInfo SMergeInfo; typedef struct STable STable; int tsdbMemTableCreate(STsdb *pTsdb, STsdbMemTable **ppMemTable); -void tsdbMemTableDestroy(STsdb *pTsdb, STsdbMemTable *pMemTable); +void tsdbMemTableDestroy(STsdbMemTable *pMemTable); int tsdbLoadDataFromCache(STsdb *pTsdb, STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, SDataCols *pCols, TKEY *filterKeys, int nFilterKeys, bool keepDup, SMergeInfo *pMergeInfo); @@ -79,13 +92,14 @@ struct STsdb { struct STable { uint64_t tid; uint64_t uid; - STSchema *pSchema; + STSchema *pSchema; // latest schema + STSchema *pCacheSchema; // cached cache }; #define TABLE_TID(t) (t)->tid #define TABLE_UID(t) (t)->uid -int tsdbPrepareCommit(STsdb *pTsdb); +int tsdbPrepareCommit(STsdb *pTsdb); typedef enum { TSDB_FILE_HEAD = 0, // .head TSDB_FILE_DATA, // .data @@ -181,13 +195,15 @@ int tsdbUnlockRepo(STsdb *pTsdb); static FORCE_INLINE STSchema *tsdbGetTableSchemaImpl(STsdb *pTsdb, STable *pTable, bool lock, bool copy, int32_t version) { - - if ((version != -1) && (schemaVersion(pTable->pSchema) != version)) { - taosMemoryFreeClear(pTable->pSchema); - pTable->pSchema = metaGetTbTSchema(REPO_META(pTsdb), pTable->uid, version); + if ((version < 0) || (schemaVersion(pTable->pSchema) == version)) { + return pTable->pSchema; } - return pTable->pSchema; + if (!pTable->pCacheSchema || (schemaVersion(pTable->pCacheSchema) != version)) { + taosMemoryFreeClear(pTable->pCacheSchema); + pTable->pCacheSchema = metaGetTbTSchema(REPO_META(pTsdb), pTable->uid, version); + } + return pTable->pCacheSchema; } // tsdbMemTable.h @@ -842,6 +858,42 @@ static FORCE_INLINE int tsdbUnLockFS(STsdbFS *pFs) { return 0; } +struct TSDBROW { + int64_t version; + STSRow2 tsRow; +}; + +struct TSDBKEY { + int64_t version; + TSKEY ts; +}; + +struct SDelOp { + int64_t version; + TSKEY sKey; // included + TSKEY eKey; // included + SDelOp *pNext; +}; + +static FORCE_INLINE int tsdbKeyCmprFn(const void *p1, const void *p2) { + TSDBKEY *pKey1 = (TSDBKEY *)p1; + TSDBKEY *pKey2 = (TSDBKEY *)p2; + + if (pKey1->ts < pKey2->ts) { + return -1; + } else if (pKey1->ts > pKey2->ts) { + return 1; + } + + if (pKey1->version < pKey2->version) { + return -1; + } else if (pKey1->version > pKey2->version) { + return 1; + } + + return 0; +} + #endif #ifdef __cplusplus diff --git a/source/dnode/vnode/src/inc/vnd.h b/source/dnode/vnode/src/inc/vnd.h index eb3382ac4cd46a602a214b09b5a8debeaf15087f..5f4f7e70daf089d22fa4e80978e787d10dc08c09 100644 --- a/source/dnode/vnode/src/inc/vnd.h +++ b/source/dnode/vnode/src/inc/vnd.h @@ -81,9 +81,10 @@ int32_t vnodeSyncCommit(SVnode* pVnode); int32_t vnodeAsyncCommit(SVnode* pVnode); // vnodeSync.c -int32_t vnodeSyncOpen(SVnode* pVnode, char* path); -void vnodeSyncStart(SVnode* pVnode); -void vnodeSyncClose(SVnode* pVnode); +int32_t vnodeSyncOpen(SVnode* pVnode, char* path); +void vnodeSyncStart(SVnode* pVnode); +void vnodeSyncClose(SVnode* pVnode); +int32_t vnodeSyncAlter(SVnode* pVnode, SRpcMsg* pMsg); #ifdef __cplusplus } diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index d38ff716abecd9b553823a08299709e204392353..d3b5f29aac9e1ed96d8fe3569065ecaa8a1a1706 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -87,10 +87,11 @@ int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* p int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq); int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq); int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids); -int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq); +int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp *pMetaRsp); SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, bool isinline); STSchema* metaGetTbTSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver); int metaGetTableEntryByName(SMetaReader* pReader, const char* name); +tb_uid_t metaGetTableEntryUidByName(SMeta* pMeta, const char* name); int metaGetTbNum(SMeta* pMeta); SMCtbCursor* metaOpenCtbCursor(SMeta* pMeta, tb_uid_t uid); void metaCloseCtbCursor(SMCtbCursor* pCtbCur); @@ -102,6 +103,7 @@ SArray* metaGetSmaTbUids(SMeta* pMeta); int32_t metaSnapshotReaderOpen(SMeta* pMeta, SMetaSnapshotReader** ppReader, int64_t sver, int64_t ever); int32_t metaSnapshotReaderClose(SMetaSnapshotReader* pReader); int32_t metaSnapshotRead(SMetaSnapshotReader* pReader, void** ppData, uint32_t* nData); +void* metaGetIdx(SMeta* pMeta); int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg); int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid); @@ -123,6 +125,8 @@ int32_t tsdbSnapshotReaderClose(STsdbSnapshotReader* pReader); int32_t tsdbSnapshotRead(STsdbSnapshotReader* pReader, void** ppData, uint32_t* nData); // tq +int tqInit(); +void tqCleanUp(); STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal); void tqClose(STQ*); int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver); @@ -143,11 +147,11 @@ int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg); int32_t smaOpen(SVnode* pVnode); int32_t smaClose(SSma* pSma); -int32_t tdUpdateExpireWindow(SSma* pSma, SSubmitReq* pMsg, int64_t version); +int32_t tdUpdateExpireWindow(SSma* pSma, const SSubmitReq* pMsg, int64_t version); int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg); int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg); -int32_t tdProcessRSmaCreate(SSma* pSma, SMeta* pMeta, SVCreateStbReq* pReq, SMsgCb* pMsgCb); +int32_t tdProcessRSmaCreate(SVnode* pVnode, SVCreateStbReq* pReq); int32_t tdProcessRSmaSubmit(SSma* pSma, void* pMsg, int32_t inputType); int32_t tdFetchTbUidList(SSma* pSma, STbUidStore** ppStore, tb_uid_t suid, tb_uid_t uid); int32_t tdUpdateTbUidList(SSma* pSma, STbUidStore* pUidStore); @@ -237,6 +241,8 @@ struct SVnode { #define VND_RSMA1(vnd) ((vnd)->pSma->pRSmaTsdb1) #define VND_RSMA2(vnd) ((vnd)->pSma->pRSmaTsdb2) #define VND_RETENTIONS(vnd) (&(vnd)->config.tsdbCfg.retentions) +#define VND_IS_RSMA(v) ((v)->config.isRsma == 1) +#define VND_IS_TSMA(v) ((v)->config.isTsma == 1) struct STbUidStore { tb_uid_t suid; @@ -269,11 +275,6 @@ struct SSma { #define SMA_RSMA_TSDB1(s) ((s)->pRSmaTsdb1) #define SMA_RSMA_TSDB2(s) ((s)->pRSmaTsdb2) -static FORCE_INLINE bool vnodeIsRollup(SVnode* pVnode) { - SRetention* pRetention = &(pVnode->config.tsdbCfg.retentions[0]); - return (pRetention->freq > 0 && pRetention->keep > 0); -} - // sma void smaHandleRes(void* pVnode, int64_t smaId, const SArray* data); diff --git a/source/dnode/vnode/src/meta/metaEntry.c b/source/dnode/vnode/src/meta/metaEntry.c index be2ddfc32f83fcf0d6b5500fb21cdec632c27aa8..db99257ea707d68858887d34cdc29077e099eec3 100644 --- a/source/dnode/vnode/src/meta/metaEntry.c +++ b/source/dnode/vnode/src/meta/metaEntry.c @@ -30,7 +30,8 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) { if (tEncodeI64(pCoder, pME->ctbEntry.ctime) < 0) return -1; if (tEncodeI32(pCoder, pME->ctbEntry.ttlDays) < 0) return -1; if (tEncodeI64(pCoder, pME->ctbEntry.suid) < 0) return -1; - if (tEncodeBinary(pCoder, pME->ctbEntry.pTags, kvRowLen(pME->ctbEntry.pTags)) < 0) return -1; + debugCheckTags((STag*)pME->ctbEntry.pTags); // TODO: remove after debug + if (tEncodeTag(pCoder, (const STag *)pME->ctbEntry.pTags) < 0) return -1; } else if (pME->type == TSDB_NORMAL_TABLE) { if (tEncodeI64(pCoder, pME->ntbEntry.ctime) < 0) return -1; if (tEncodeI32(pCoder, pME->ntbEntry.ttlDays) < 0) return -1; @@ -47,7 +48,6 @@ int metaEncodeEntry(SEncoder *pCoder, const SMetaEntry *pME) { } int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) { - uint32_t len; if (tStartDecode(pCoder) < 0) return -1; if (tDecodeI64(pCoder, &pME->version) < 0) return -1; @@ -62,7 +62,8 @@ int metaDecodeEntry(SDecoder *pCoder, SMetaEntry *pME) { if (tDecodeI64(pCoder, &pME->ctbEntry.ctime) < 0) return -1; if (tDecodeI32(pCoder, &pME->ctbEntry.ttlDays) < 0) return -1; if (tDecodeI64(pCoder, &pME->ctbEntry.suid) < 0) return -1; - if (tDecodeBinary(pCoder, &pME->ctbEntry.pTags, &len) < 0) return -1; // (TODO) + if (tDecodeTag(pCoder, (STag **)&pME->ctbEntry.pTags) < 0) return -1; // (TODO) + debugCheckTags((STag*)pME->ctbEntry.pTags); // TODO: remove after debug } else if (pME->type == TSDB_NORMAL_TABLE) { if (tDecodeI64(pCoder, &pME->ntbEntry.ctime) < 0) return -1; if (tDecodeI32(pCoder, &pME->ntbEntry.ttlDays) < 0) return -1; diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index f23e7f88056d6a397a5979bda11dd4f080ba0212..86637d28504bf7169c4b3f2ab4ae145af1e10661 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -53,42 +53,42 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) { // open env ret = tdbOpen(pMeta->path, pVnode->config.szPage, pVnode->config.szCache, &pMeta->pEnv); if (ret < 0) { - metaError("vgId:%d failed to open meta env since %s", TD_VID(pVnode), tstrerror(terrno)); + metaError("vgId:%d, failed to open meta env since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // open pTbDb ret = tdbTbOpen("table.db", sizeof(STbDbKey), -1, tbDbKeyCmpr, pMeta->pEnv, &pMeta->pTbDb); if (ret < 0) { - metaError("vgId:%d failed to open meta table db since %s", TD_VID(pVnode), tstrerror(terrno)); + metaError("vgId:%d, failed to open meta table db since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // open pSkmDb ret = tdbTbOpen("schema.db", sizeof(SSkmDbKey), -1, skmDbKeyCmpr, pMeta->pEnv, &pMeta->pSkmDb); if (ret < 0) { - metaError("vgId:%d failed to open meta schema db since %s", TD_VID(pVnode), tstrerror(terrno)); + metaError("vgId:%d, failed to open meta schema db since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // open pUidIdx ret = tdbTbOpen("uid.idx", sizeof(tb_uid_t), sizeof(int64_t), uidIdxKeyCmpr, pMeta->pEnv, &pMeta->pUidIdx); if (ret < 0) { - metaError("vgId:%d failed to open meta uid idx since %s", TD_VID(pVnode), tstrerror(terrno)); + metaError("vgId:%d, failed to open meta uid idx since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // open pNameIdx ret = tdbTbOpen("name.idx", -1, sizeof(tb_uid_t), NULL, pMeta->pEnv, &pMeta->pNameIdx); if (ret < 0) { - metaError("vgId:%d failed to open meta name index since %s", TD_VID(pVnode), tstrerror(terrno)); + metaError("vgId:%d, failed to open meta name index since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // open pCtbIdx ret = tdbTbOpen("ctb.idx", sizeof(SCtbIdxKey), 0, ctbIdxKeyCmpr, pMeta->pEnv, &pMeta->pCtbIdx); if (ret < 0) { - metaError("vgId:%d failed to open meta child table index since %s", TD_VID(pVnode), tstrerror(terrno)); + metaError("vgId:%d, failed to open meta child table index since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } @@ -100,14 +100,14 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) { taosMkDir(indexFullPath); ret = indexOpen(indexOptsCreate(), indexFullPath, (SIndex **)&pMeta->pTagIvtIdx); if (ret < 0) { - metaError("vgId:%d failed to open meta tag index since %s", TD_VID(pVnode), tstrerror(terrno)); + metaError("vgId:%d, failed to open meta tag index since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } #else ret = tdbTbOpen("tag.idx", -1, 0, tagIdxKeyCmpr, pMeta->pEnv, &pMeta->pTagIdx); if (ret < 0) { - metaError("vgId:%d failed to open meta tag index since %s", TD_VID(pVnode), tstrerror(terrno)); + metaError("vgId:%d, failed to open meta tag index since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } #endif @@ -115,24 +115,24 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) { // open pTtlIdx ret = tdbTbOpen("ttl.idx", sizeof(STtlIdxKey), 0, ttlIdxKeyCmpr, pMeta->pEnv, &pMeta->pTtlIdx); if (ret < 0) { - metaError("vgId:%d failed to open meta ttl index since %s", TD_VID(pVnode), tstrerror(terrno)); + metaError("vgId:%d, failed to open meta ttl index since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // open pSmaIdx ret = tdbTbOpen("sma.idx", sizeof(SSmaIdxKey), 0, smaIdxKeyCmpr, pMeta->pEnv, &pMeta->pSmaIdx); if (ret < 0) { - metaError("vgId:%d failed to open meta sma index since %s", TD_VID(pVnode), tstrerror(terrno)); + metaError("vgId:%d, failed to open meta sma index since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // open index if (metaOpenIdx(pMeta) < 0) { - metaError("vgId:%d failed to open meta index since %s", TD_VID(pVnode), tstrerror(terrno)); + metaError("vgId:%d, failed to open meta index since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } - metaDebug("vgId:%d meta is opened", TD_VID(pVnode)); + metaDebug("vgId:%d, meta is opened", TD_VID(pVnode)); *ppMeta = pMeta; return 0; diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index c19190e68a6bd54a106a9de1278d8870989864dc..21a55d646331dd2ecb0a8c69c22edf4e6bd0f8cf 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -31,7 +31,7 @@ void metaReaderClear(SMetaReader *pReader) { } int metaGetTableEntryByVersion(SMetaReader *pReader, int64_t version, tb_uid_t uid) { - SMeta *pMeta = pReader->pMeta; + SMeta * pMeta = pReader->pMeta; STbDbKey tbDbKey = {.version = version, .uid = uid}; // query table.db @@ -54,7 +54,7 @@ _err: } int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) { - SMeta *pMeta = pReader->pMeta; + SMeta * pMeta = pReader->pMeta; int64_t version; // query uid.idx @@ -68,7 +68,7 @@ int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) { } int metaGetTableEntryByName(SMetaReader *pReader, const char *name) { - SMeta *pMeta = pReader->pMeta; + SMeta * pMeta = pReader->pMeta; tb_uid_t uid; // query name.idx @@ -81,6 +81,19 @@ int metaGetTableEntryByName(SMetaReader *pReader, const char *name) { return metaGetTableEntryByUid(pReader, uid); } +tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) { + void * pData = NULL; + int nData = 0; + tb_uid_t uid = 0; + + if (tdbTbGet(pMeta->pNameIdx, name, strlen(name) + 1, &pData, &nData) == 0) { + uid = *(tb_uid_t *)pData; + tdbFree(pData); + } + + return 0; +} + int metaReadNext(SMetaReader *pReader) { SMeta *pMeta = pReader->pMeta; @@ -121,7 +134,7 @@ void metaCloseTbCursor(SMTbCursor *pTbCur) { int metaTbCursorNext(SMTbCursor *pTbCur) { int ret; - void *pBuf; + void * pBuf; STbCfg tbCfg; for (;;) { @@ -142,52 +155,61 @@ int metaTbCursorNext(SMTbCursor *pTbCur) { } SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, bool isinline) { - void *pKey = NULL; - void *pVal = NULL; - int kLen = 0; - int vLen = 0; - int ret; - SSkmDbKey skmDbKey; - SSchemaWrapper *pSW = NULL; - SSchema *pSchema = NULL; - void *pBuf; - SDecoder coder = {0}; - - // fetch - skmDbKey.uid = uid; - skmDbKey.sver = sver; - pKey = &skmDbKey; - kLen = sizeof(skmDbKey); + void * pData = NULL; + int nData = 0; + int64_t version; + SSchemaWrapper schema = {0}; + SSchemaWrapper *pSchema = NULL; + SDecoder dc = {0}; + metaRLock(pMeta); - ret = tdbTbGet(pMeta->pSkmDb, pKey, kLen, &pVal, &vLen); - metaULock(pMeta); - if (ret < 0) { - return NULL; - } + if (sver < 0) { + if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData) < 0) { + goto _err; + } - // decode - pBuf = pVal; - pSW = taosMemoryMalloc(sizeof(SSchemaWrapper)); + version = *(int64_t *)pData; - tDecoderInit(&coder, pVal, vLen); - tDecodeSSchemaWrapper(&coder, pSW); - pSchema = taosMemoryMalloc(sizeof(SSchema) * pSW->nCols); - memcpy(pSchema, pSW->pSchema, sizeof(SSchema) * pSW->nCols); - tDecoderClear(&coder); + tdbTbGet(pMeta->pTbDb, &(STbDbKey){.uid = uid, .version = version}, sizeof(STbDbKey), &pData, &nData); - pSW->pSchema = pSchema; + SMetaEntry me = {0}; + tDecoderInit(&dc, pData, nData); + metaDecodeEntry(&dc, &me); + if (me.type == TSDB_SUPER_TABLE) { + pSchema = tCloneSSchemaWrapper(&me.stbEntry.schemaRow); + } else if (me.type == TSDB_NORMAL_TABLE) { + pSchema = tCloneSSchemaWrapper(&me.ntbEntry.schemaRow); + } else { + ASSERT(0); + } + tDecoderClear(&dc); + } else { + if (tdbTbGet(pMeta->pSkmDb, &(SSkmDbKey){.uid = uid, .sver = sver}, sizeof(SSkmDbKey), &pData, &nData) < 0) { + goto _err; + } - tdbFree(pVal); + tDecoderInit(&dc, pData, nData); + tDecodeSSchemaWrapper(&dc, &schema); + pSchema = tCloneSSchemaWrapper(&schema); + tDecoderClear(&dc); + } - return pSW; + metaULock(pMeta); + tdbFree(pData); + return pSchema; + +_err: + metaULock(pMeta); + tdbFree(pData); + return NULL; } struct SMCtbCursor { - SMeta *pMeta; - TBC *pCur; + SMeta * pMeta; + TBC * pCur; tb_uid_t suid; - void *pKey; - void *pVal; + void * pKey; + void * pVal; int kLen; int vLen; }; @@ -259,10 +281,10 @@ tb_uid_t metaCtbCursorNext(SMCtbCursor *pCtbCur) { STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver) { tb_uid_t quid; SMetaReader mr = {0}; - STSchema *pTSchema = NULL; + STSchema * pTSchema = NULL; SSchemaWrapper *pSW = NULL; STSchemaBuilder sb = {0}; - SSchema *pSchema; + SSchema * pSchema; metaReaderInit(&mr, pMeta, 0); metaGetTableEntryByUid(&mr, uid); @@ -278,7 +300,7 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver) { pSW = metaGetTableSchema(pMeta, quid, sver, 0); if (!pSW) return NULL; - tdInitTSchemaBuilder(&sb, sver); + tdInitTSchemaBuilder(&sb, pSW->version); for (int i = 0; i < pSW->nCols; i++) { pSchema = pSW->pSchema + i; tdAddColToSchema(&sb, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes); @@ -299,11 +321,11 @@ int metaGetTbNum(SMeta *pMeta) { } typedef struct { - SMeta *pMeta; - TBC *pCur; + SMeta * pMeta; + TBC * pCur; tb_uid_t uid; - void *pKey; - void *pVal; + void * pKey; + void * pVal; int kLen; int vLen; } SMSmaCursor; @@ -375,7 +397,7 @@ tb_uid_t metaSmaCursorNext(SMSmaCursor *pSmaCur) { STSmaWrapper *metaGetSmaInfoByTable(SMeta *pMeta, tb_uid_t uid, bool deepCopy) { STSmaWrapper *pSW = NULL; - SArray *pSmaIds = NULL; + SArray * pSmaIds = NULL; if (!(pSmaIds = metaGetSmaIdsByTable(pMeta, uid))) { return NULL; @@ -399,11 +421,11 @@ STSmaWrapper *metaGetSmaInfoByTable(SMeta *pMeta, tb_uid_t uid, bool deepCopy) { metaReaderInit(&mr, pMeta, 0); int64_t smaId; int smaIdx = 0; - STSma *pTSma = NULL; + STSma * pTSma = NULL; for (int i = 0; i < pSW->number; ++i) { smaId = *(tb_uid_t *)taosArrayGet(pSmaIds, i); if (metaGetTableEntryByUid(&mr, smaId) < 0) { - metaWarn("vgId:%d no entry for tbId: %" PRIi64 ", smaId: %" PRIi64, TD_VID(pMeta->pVnode), uid, smaId); + metaWarn("vgId:%d, no entry for tbId: %" PRIi64 ", smaId: %" PRIi64, TD_VID(pMeta->pVnode), uid, smaId); continue; } pTSma = pSW->tSma + smaIdx; @@ -442,16 +464,16 @@ STSmaWrapper *metaGetSmaInfoByTable(SMeta *pMeta, tb_uid_t uid, bool deepCopy) { _err: metaReaderClear(&mr); taosArrayDestroy(pSmaIds); - tdFreeTSmaWrapper(pSW, deepCopy); + tFreeTSmaWrapper(pSW, deepCopy); return NULL; } STSma *metaGetSmaInfoByIndex(SMeta *pMeta, int64_t indexUid) { - STSma *pTSma = NULL; + STSma * pTSma = NULL; SMetaReader mr = {0}; metaReaderInit(&mr, pMeta, 0); if (metaGetTableEntryByUid(&mr, indexUid) < 0) { - metaWarn("vgId:%d failed to get table entry for smaId: %" PRIi64, TD_VID(pMeta->pVnode), indexUid); + metaWarn("vgId:%d, failed to get table entry for smaId: %" PRIi64, TD_VID(pMeta->pVnode), indexUid); metaReaderClear(&mr); return NULL; } @@ -469,7 +491,7 @@ STSma *metaGetSmaInfoByIndex(SMeta *pMeta, int64_t indexUid) { } SArray *metaGetSmaIdsByTable(SMeta *pMeta, tb_uid_t uid) { - SArray *pUids = NULL; + SArray * pUids = NULL; SSmaIdxKey *pSmaIdxKey = NULL; SMSmaCursor *pCur = metaOpenSmaCursor(pMeta, uid); @@ -507,7 +529,7 @@ SArray *metaGetSmaIdsByTable(SMeta *pMeta, tb_uid_t uid) { } SArray *metaGetSmaTbUids(SMeta *pMeta) { - SArray *pUids = NULL; + SArray * pUids = NULL; SSmaIdxKey *pSmaIdxKey = NULL; tb_uid_t lastUid = 0; @@ -551,7 +573,109 @@ SArray *metaGetSmaTbUids(SMeta *pMeta) { #endif -const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t cid) { +const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *val) { ASSERT(pEntry->type == TSDB_CHILD_TABLE); - return tdGetKVRowValOfCol((const SKVRow)pEntry->ctbEntry.pTags, cid); -} \ No newline at end of file + STag *tag = (STag *)pEntry->ctbEntry.pTags; + if (type == TSDB_DATA_TYPE_JSON) { + if (tag->nTag == 0) { + return NULL; + } + return tag; + } + bool find = tTagGet(tag, val); + + if (!find) { + return NULL; + } + return val; +} + +typedef struct { + SMeta * pMeta; + TBC * pCur; + tb_uid_t suid; + int16_t cid; + int16_t type; + void * pKey; + void * pVal; + int32_t kLen; + int32_t vLen; +} SIdxCursor; + +int32_t metaFilteTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) { + SIdxCursor *pCursor = NULL; + + int32_t ret = 0, valid = 0; + pCursor = (SIdxCursor *)taosMemoryCalloc(1, sizeof(SIdxCursor)); + pCursor->pMeta = pMeta; + pCursor->suid = param->suid; + pCursor->cid = param->cid; + pCursor->type = param->type; + + metaRLock(pMeta); + ret = tdbTbcOpen(pMeta->pTagIdx, &pCursor->pCur, NULL); + if (ret < 0) { + goto END; + } + STagIdxKey *pKey = NULL; + int32_t nKey = 0; + + int32_t nTagData = 0; + void * tagData = NULL; + + if (IS_VAR_DATA_TYPE(param->type)) { + tagData = varDataVal(param->val); + nTagData = varDataLen(param->val); + } else { + tagData = param->val; + nTagData = tDataTypes[param->type].bytes; + } + ret = metaCreateTagIdxKey(pCursor->suid, pCursor->cid, tagData, nTagData, pCursor->type, + param->reverse ? INT64_MAX : INT64_MIN, &pKey, &nKey); + if (ret != 0) { + goto END; + } + int cmp = 0; + if (tdbTbcMoveTo(pCursor->pCur, pKey, nKey, &cmp) < 0) { + goto END; + } + + void * entryKey = NULL, *entryVal = NULL; + int32_t nEntryKey, nEntryVal; + while (1) { + valid = tdbTbcGet(pCursor->pCur, (const void **)&entryKey, &nEntryKey, (const void **)&entryVal, &nEntryVal); + if (valid < 0) { + break; + } + STagIdxKey *p = entryKey; + if (p != NULL) { + int32_t cmp = (*param->filterFunc)(p->data, pKey->data, pKey->type); + if (cmp == 0) { + // match + tb_uid_t tuid = 0; + if (IS_VAR_DATA_TYPE(pKey->type)) { + tuid = *(tb_uid_t *)(p->data + varDataTLen(p->data)); + } else { + tuid = *(tb_uid_t *)(p->data + tDataTypes[pCursor->type].bytes); + } + taosArrayPush(pUids, &tuid); + } else if (cmp == 1) { + // not match but should continue to iter + } else { + // not match and no more result + break; + } + } + valid = param->reverse ? tdbTbcMoveToPrev(pCursor->pCur) : tdbTbcMoveToNext(pCursor->pCur); + if (valid < 0) { + break; + } + } +END: + if (pCursor->pMeta) metaULock(pCursor->pMeta); + if (pCursor->pCur) tdbTbcClose(pCursor->pCur); + + taosMemoryFree(pCursor); + + return ret; +} diff --git a/source/dnode/vnode/src/meta/metaSma.c b/source/dnode/vnode/src/meta/metaSma.c index 75595d83a64941e0caf6f2f399345c09a226286e..fde9d30346da782129739592ab3c34bfdb964379 100644 --- a/source/dnode/vnode/src/meta/metaSma.c +++ b/source/dnode/vnode/src/meta/metaSma.c @@ -57,12 +57,12 @@ int32_t metaCreateTSma(SMeta *pMeta, int64_t version, SSmaCfg *pCfg) { if (metaHandleSmaEntry(pMeta, &me) < 0) goto _err; - metaDebug("vgId:%d tsma is created, name:%s uid: %" PRId64, TD_VID(pMeta->pVnode), pCfg->indexName, pCfg->indexUid); + metaDebug("vgId:%d, tsma is created, name:%s uid: %" PRId64, TD_VID(pMeta->pVnode), pCfg->indexName, pCfg->indexUid); return 0; _err: - metaError("vgId:%d failed to create tsma: %s uid: %" PRId64 " since %s", TD_VID(pMeta->pVnode), pCfg->indexName, + metaError("vgId:%d, failed to create tsma: %s uid: %" PRId64 " since %s", TD_VID(pMeta->pVnode), pCfg->indexName, pCfg->indexUid, tstrerror(terrno)); return -1; } diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 7182f496c4d6410a705a82dba1c92ff6561a5faf..b45fccaf2901a15b95640ffcb7b7eb2728ed2063 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -25,15 +25,33 @@ static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME); static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry); static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type); +static int metaUpdateMetaRsp(tb_uid_t uid, char* tbName, SSchemaWrapper *pSchema, STableMetaRsp *pMetaRsp) { + pMetaRsp->pSchemas = taosMemoryMalloc(pSchema->nCols * sizeof(SSchema)); + if (NULL == pMetaRsp->pSchemas) { + terrno = TSDB_CODE_VND_OUT_OF_MEMORY; + return -1; + } + + strcpy(pMetaRsp->tbName, tbName); + pMetaRsp->numOfColumns = pSchema->nCols; + pMetaRsp->tableType = TSDB_NORMAL_TABLE; + pMetaRsp->sversion = pSchema->version; + pMetaRsp->tuid = uid; + + memcpy(pMetaRsp->pSchemas, pSchema->pSchema, pSchema->nCols * sizeof(SSchema)); + + return 0; +} + int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { SMetaEntry me = {0}; int kLen = 0; int vLen = 0; const void *pKey = NULL; const void *pVal = NULL; - void *pBuf = NULL; + void * pBuf = NULL; int32_t szBuf = 0; - void *p = NULL; + void * p = NULL; SMetaReader mr = {0}; // validate req @@ -61,12 +79,12 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { if (metaHandleEntry(pMeta, &me) < 0) goto _err; - metaDebug("vgId:%d super table is created, name:%s uid: %" PRId64, TD_VID(pMeta->pVnode), pReq->name, pReq->suid); + metaDebug("vgId:%d, super table is created, name:%s uid: %" PRId64, TD_VID(pMeta->pVnode), pReq->name, pReq->suid); return 0; _err: - metaError("vgId:%d failed to create super table: %s uid: %" PRId64 " since %s", TD_VID(pMeta->pVnode), pReq->name, + metaError("vgId:%d, failed to create super table: %s uid: %" PRId64 " since %s", TD_VID(pMeta->pVnode), pReq->name, pReq->suid, tstrerror(terrno)); return -1; } @@ -87,7 +105,7 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq) { } // drop all child tables - TBC *pCtbIdxc = NULL; + TBC * pCtbIdxc = NULL; SArray *pArray = taosArrayInit(8, sizeof(tb_uid_t)); tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, &pMeta->txn); @@ -135,15 +153,15 @@ _drop_super_table: _exit: tdbFree(pKey); tdbFree(pData); - metaDebug("vgId:%d super table %s uid:%" PRId64 " is dropped", TD_VID(pMeta->pVnode), pReq->name, pReq->suid); + metaDebug("vgId:%d, super table %s uid:%" PRId64 " is dropped", TD_VID(pMeta->pVnode), pReq->name, pReq->suid); return 0; } int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { SMetaEntry oStbEntry = {0}; SMetaEntry nStbEntry = {0}; - TBC *pUidIdxc = NULL; - TBC *pTbDbc = NULL; + TBC * pUidIdxc = NULL; + TBC * pTbDbc = NULL; const void *pData; int nData; int64_t oversion; @@ -251,18 +269,18 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) { if (metaHandleEntry(pMeta, &me) < 0) goto _err; - metaDebug("vgId:%d table %s uid %" PRId64 " is created, type:%" PRId8, TD_VID(pMeta->pVnode), pReq->name, pReq->uid, + metaDebug("vgId:%d, table %s uid %" PRId64 " is created, type:%" PRId8, TD_VID(pMeta->pVnode), pReq->name, pReq->uid, pReq->type); return 0; _err: - metaError("vgId:%d failed to create table:%s type:%s since %s", TD_VID(pMeta->pVnode), pReq->name, + metaError("vgId:%d, failed to create table:%s type:%s since %s", TD_VID(pMeta->pVnode), pReq->name, pReq->type == TSDB_CHILD_TABLE ? "child table" : "normal table", tstrerror(terrno)); return -1; } int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids) { - void *pData = NULL; + void * pData = NULL; int nData = 0; int rc = 0; tb_uid_t uid; @@ -288,7 +306,7 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi } static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { - void *pData = NULL; + void * pData = NULL; int nData = 0; int rc = 0; int64_t version; @@ -323,15 +341,16 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { return 0; } -static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { - void *pVal = NULL; + +static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq, STableMetaRsp *pMetaRsp) { + void * pVal = NULL; int nVal = 0; - const void *pData = NULL; + const void * pData = NULL; int nData = 0; int ret = 0; tb_uid_t uid; int64_t oversion; - SSchema *pColumn = NULL; + SSchema * pColumn = NULL; SMetaEntry entry = {0}; SSchemaWrapper *pSchema; int c; @@ -463,6 +482,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl metaULock(pMeta); + metaUpdateMetaRsp(uid, pAlterTbReq->tbName, pSchema, pMetaRsp); + if (pNewSchema) taosMemoryFree(pNewSchema); tDecoderClear(&dc); tdbTbcClose(pTbDbc); @@ -479,7 +500,7 @@ _err: static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { SMetaEntry ctbEntry = {0}; SMetaEntry stbEntry = {0}; - void *pVal = NULL; + void * pVal = NULL; int nVal = 0; int ret; int c; @@ -510,7 +531,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA oversion = *(int64_t *)pData; // search table.db - TBC *pTbDbc = NULL; + TBC * pTbDbc = NULL; SDecoder dc1 = {0}; SDecoder dc2 = {0}; @@ -534,7 +555,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA metaDecodeEntry(&dc2, &stbEntry); SSchemaWrapper *pTagSchema = &stbEntry.stbEntry.schemaTag; - SSchema *pColumn = NULL; + SSchema * pColumn = NULL; int32_t iCol = 0; for (;;) { pColumn = NULL; @@ -563,29 +584,39 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA } memcpy((void *)ctbEntry.ctbEntry.pTags, pAlterTbReq->pTagVal, pAlterTbReq->nTagVal); } else { - SKVRowBuilder kvrb = {0}; - const SKVRow pOldTag = (const SKVRow)ctbEntry.ctbEntry.pTags; - SKVRow pNewTag = NULL; - - tdInitKVRowBuilder(&kvrb); + const STag *pOldTag = (const STag *)ctbEntry.ctbEntry.pTags; + STag * pNewTag = NULL; + SArray * pTagArray = taosArrayInit(pTagSchema->nCols, sizeof(STagVal)); + if (!pTagArray) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } for (int32_t i = 0; i < pTagSchema->nCols; i++) { SSchema *pCol = &pTagSchema->pSchema[i]; if (iCol == i) { - tdAddColToKVRow(&kvrb, pCol->colId, pAlterTbReq->pTagVal, pAlterTbReq->nTagVal); + STagVal val = {0}; + val.type = pCol->type; + val.cid = pCol->colId; + if (IS_VAR_DATA_TYPE(pCol->type)) { + val.pData = pAlterTbReq->pTagVal; + val.nData = pAlterTbReq->nTagVal; + } else { + memcpy(&val.i64, pAlterTbReq->pTagVal, pAlterTbReq->nTagVal); + } + taosArrayPush(pTagArray, &val); } else { - void *p = tdGetKVRowValOfCol(pOldTag, pCol->colId); - if (p) { - if (IS_VAR_DATA_TYPE(pCol->type)) { - tdAddColToKVRow(&kvrb, pCol->colId, p, varDataTLen(p)); - } else { - tdAddColToKVRow(&kvrb, pCol->colId, p, pCol->bytes); - } + STagVal val = {.cid = pCol->colId}; + if (tTagGet(pOldTag, &val)) { + taosArrayPush(pTagArray, &val); } } } - - ctbEntry.ctbEntry.pTags = tdGetKVRowFromBuilder(&kvrb); - tdDestroyKVRowBuilder(&kvrb); + if ((terrno = tTagNew(pTagArray, pTagSchema->version, false, &pNewTag)) < 0) { + taosArrayDestroy(pTagArray); + goto _err; + } + ctbEntry.ctbEntry.pTags = (uint8_t *)pNewTag; + taosArrayDestroy(pTagArray); } // save to table.db @@ -619,13 +650,13 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p return 0; } -int metaAlterTable(SMeta *pMeta, int64_t version, SVAlterTbReq *pReq) { +int metaAlterTable(SMeta *pMeta, int64_t version, SVAlterTbReq *pReq, STableMetaRsp *pMetaRsp) { switch (pReq->action) { case TSDB_ALTER_TABLE_ADD_COLUMN: case TSDB_ALTER_TABLE_DROP_COLUMN: case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: - return metaAlterTableColumn(pMeta, version, pReq); + return metaAlterTableColumn(pMeta, version, pReq, pMetaRsp); case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: return metaUpdateTableTagVal(pMeta, version, pReq); case TSDB_ALTER_TABLE_UPDATE_OPTIONS: @@ -639,8 +670,8 @@ int metaAlterTable(SMeta *pMeta, int64_t version, SVAlterTbReq *pReq) { static int metaSaveToTbDb(SMeta *pMeta, const SMetaEntry *pME) { STbDbKey tbDbKey; - void *pKey = NULL; - void *pVal = NULL; + void * pKey = NULL; + void * pVal = NULL; int kLen = 0; int vLen = 0; SEncoder coder = {0}; @@ -721,18 +752,13 @@ static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME) { return tdbTbInsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), NULL, 0, &pMeta->txn); } -static int metaCreateTagIdxKey(tb_uid_t suid, int32_t cid, const void *pTagData, int8_t type, tb_uid_t uid, - STagIdxKey **ppTagIdxKey, int32_t *nTagIdxKey) { - int32_t nTagData = 0; - - if (pTagData) { - if (IS_VAR_DATA_TYPE(type)) { - nTagData = varDataTLen(pTagData); - } else { - nTagData = tDataTypes[type].bytes; - } +int metaCreateTagIdxKey(tb_uid_t suid, int32_t cid, const void *pTagData, int32_t nTagData, int8_t type, tb_uid_t uid, + STagIdxKey **ppTagIdxKey, int32_t *nTagIdxKey) { + if (IS_VAR_DATA_TYPE(type)) { + *nTagIdxKey = sizeof(STagIdxKey) + nTagData + VARSTR_HEADER_SIZE + sizeof(tb_uid_t); + } else { + *nTagIdxKey = sizeof(STagIdxKey) + nTagData + sizeof(tb_uid_t); } - *nTagIdxKey = sizeof(STagIdxKey) + nTagData + sizeof(tb_uid_t); *ppTagIdxKey = (STagIdxKey *)taosMemoryMalloc(*nTagIdxKey); if (*ppTagIdxKey == NULL) { @@ -744,8 +770,16 @@ static int metaCreateTagIdxKey(tb_uid_t suid, int32_t cid, const void *pTagData, (*ppTagIdxKey)->cid = cid; (*ppTagIdxKey)->isNull = (pTagData == NULL) ? 1 : 0; (*ppTagIdxKey)->type = type; - if (nTagData) memcpy((*ppTagIdxKey)->data, pTagData, nTagData); - *(tb_uid_t *)((*ppTagIdxKey)->data + nTagData) = uid; + + // refactor + if (IS_VAR_DATA_TYPE(type)) { + memcpy((*ppTagIdxKey)->data, (uint16_t *)&nTagData, VARSTR_HEADER_SIZE); + memcpy((*ppTagIdxKey)->data + VARSTR_HEADER_SIZE, pTagData, nTagData); + *(tb_uid_t *)((*ppTagIdxKey)->data + VARSTR_HEADER_SIZE + nTagData) = uid; + } else { + memcpy((*ppTagIdxKey)->data, pTagData, nTagData); + *(tb_uid_t *)((*ppTagIdxKey)->data + nTagData) = uid; + } return 0; } @@ -755,14 +789,15 @@ static void metaDestroyTagIdxKey(STagIdxKey *pTagIdxKey) { } static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { - void *pData = NULL; + void * pData = NULL; int nData = 0; STbDbKey tbDbKey = {0}; SMetaEntry stbEntry = {0}; - STagIdxKey *pTagIdxKey = NULL; + STagIdxKey * pTagIdxKey = NULL; int32_t nTagIdxKey; const SSchema *pTagColumn; // = &stbEntry.stbEntry.schema.pSchema[0]; - const void *pTagData = NULL; // + const void * pTagData = NULL; // + int32_t nTagData = 0; SDecoder dc = {0}; // get super table @@ -775,7 +810,21 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { metaDecodeEntry(&dc, &stbEntry); pTagColumn = &stbEntry.stbEntry.schemaTag.pSchema[0]; - pTagData = tdGetKVRowValOfCol((const SKVRow)pCtbEntry->ctbEntry.pTags, pTagColumn->colId); + + STagVal tagVal = {.cid = pTagColumn->colId}; + if (pTagColumn->type != TSDB_DATA_TYPE_JSON) { + tTagGet((const STag *)pCtbEntry->ctbEntry.pTags, &tagVal); + if (IS_VAR_DATA_TYPE(pTagColumn->type)) { + pTagData = tagVal.pData; + nTagData = (int32_t)tagVal.nData; + } else { + pTagData = &(tagVal.i64); + nTagData = tDataTypes[pTagColumn->type].bytes; + } + } else { + // pTagData = pCtbEntry->ctbEntry.pTags; + // nTagData = ((const STag *)pCtbEntry->ctbEntry.pTags)->len; + } // update tag index #ifdef USE_INVERTED_INDEX @@ -790,8 +839,8 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { int ret = indexPut((SIndex *)pMeta->pTagIvtIdx, tmGroup, tuid); indexMultiTermDestroy(tmGroup); #else - if (metaCreateTagIdxKey(pCtbEntry->ctbEntry.suid, pTagColumn->colId, pTagData, pTagColumn->type, pCtbEntry->uid, - &pTagIdxKey, &nTagIdxKey) < 0) { + if (metaCreateTagIdxKey(pCtbEntry->ctbEntry.suid, pTagColumn->colId, pTagData, nTagData, pTagColumn->type, + pCtbEntry->uid, &pTagIdxKey, &nTagIdxKey) < 0) { return -1; } tdbTbInsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, &pMeta->txn); @@ -804,7 +853,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME) { SEncoder coder = {0}; - void *pVal = NULL; + void * pVal = NULL; int vLen = 0; int rcode = 0; SSkmDbKey skmDbKey = {0}; @@ -880,3 +929,11 @@ _err: metaULock(pMeta); return -1; } +// refactor later +void *metaGetIdx(SMeta *pMeta) { +#ifdef USE_INVERTED_INDEX + return pMeta->pTagIvtIdx; +#else + return pMeta->pTagIdx; +#endif +} diff --git a/source/dnode/vnode/src/sma/sma.c b/source/dnode/vnode/src/sma/sma.c index 0e7ce385a1c2aa225d21201af2fcc7f0ffd72d79..04f65275d7d20ab41564a8e8c6e67c908b3bc649 100644 --- a/source/dnode/vnode/src/sma/sma.c +++ b/source/dnode/vnode/src/sma/sma.c @@ -15,13 +15,12 @@ #include "sma.h" - // TODO: Who is responsible for resource allocate and release? int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg) { int32_t code = TSDB_CODE_SUCCESS; if ((code = tdProcessTSmaInsertImpl(pSma, indexUid, msg)) < 0) { - smaWarn("vgId:%d insert tsma data failed since %s", SMA_VID(pSma), tstrerror(terrno)); + smaWarn("vgId:%d, insert tsma data failed since %s", SMA_VID(pSma), tstrerror(terrno)); } // TODO: destroy SSDataBlocks(msg) return code; @@ -31,16 +30,16 @@ int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg) { int32_t code = TSDB_CODE_SUCCESS; if ((code = tdProcessTSmaCreateImpl(pSma, version, msg)) < 0) { - smaWarn("vgId:%d create tsma failed since %s", SMA_VID(pSma), tstrerror(terrno)); + smaWarn("vgId:%d, create tsma failed since %s", SMA_VID(pSma), tstrerror(terrno)); } // TODO: destroy SSDataBlocks(msg) return code; } -int32_t tdUpdateExpireWindow(SSma* pSma, SSubmitReq* pMsg, int64_t version) { +int32_t tdUpdateExpireWindow(SSma* pSma, const SSubmitReq* pMsg, int64_t version) { int32_t code = TSDB_CODE_SUCCESS; if ((code = tdUpdateExpiredWindowImpl(pSma, pMsg, version)) < 0) { - smaWarn("vgId:%d update expired sma window failed since %s", SMA_VID(pSma), tstrerror(terrno)); + smaWarn("vgId:%d, update expired sma window failed since %s", SMA_VID(pSma), tstrerror(terrno)); } return code; } @@ -48,7 +47,15 @@ int32_t tdUpdateExpireWindow(SSma* pSma, SSubmitReq* pMsg, int64_t version) { int32_t tdGetTSmaData(SSma* pSma, char* pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult) { int32_t code = TSDB_CODE_SUCCESS; if ((code = tdGetTSmaDataImpl(pSma, pData, indexUid, querySKey, nMaxResult)) < 0) { - smaWarn("vgId:%d get tSma data failed since %s", SMA_VID(pSma), tstrerror(terrno)); + smaWarn("vgId:%d, get tSma data failed since %s", SMA_VID(pSma), tstrerror(terrno)); + } + return code; +} + +int32_t smaGetTSmaDays(SVnodeCfg* pCfg, void* pCont, uint32_t contLen, int32_t *days) { + int32_t code = TSDB_CODE_SUCCESS; + if ((code = tdGetTSmaDaysImpl(pCfg, pCont, contLen, days)) < 0) { + smaWarn("vgId:%d get tSma days failed since %s", pCfg->vgId, tstrerror(terrno)); } return code; } diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c index 8285b74e509f53a8ed3a9d2e5745d2f56135087e..179f573e8d72c3ae6938edb31e61ef6b9ec8a675 100644 --- a/source/dnode/vnode/src/sma/smaEnv.c +++ b/source/dnode/vnode/src/sma/smaEnv.c @@ -222,7 +222,7 @@ int32_t tdRefSmaStat(SSma *pSma, SSmaStat *pStat) { if (!pStat) return 0; int ref = T_REF_INC(pStat); - smaDebug("vgId:%d ref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref); + smaDebug("vgId:%d, ref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref); return 0; } @@ -230,7 +230,7 @@ int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) { if (!pStat) return 0; int ref = T_REF_DEC(pStat); - smaDebug("vgId:%d unref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref); + smaDebug("vgId:%d, unref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref); return 0; } @@ -278,7 +278,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType) { void *tdFreeSmaStatItem(SSmaStatItem *pSmaStatItem) { if (pSmaStatItem) { - tdDestroyTSma(pSmaStatItem->pTSma); + tDestroyTSma(pSmaStatItem->pTSma); taosMemoryFreeClear(pSmaStatItem->pTSma); taosHashCleanup(pSmaStatItem->expiredWindows); taosMemoryFreeClear(pSmaStatItem); @@ -321,7 +321,7 @@ int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) { int32_t tdLockSma(SSma *pSma) { int code = taosThreadMutexLock(&pSma->mutex); if (code != 0) { - smaError("vgId:%d failed to lock td since %s", SMA_VID(pSma), strerror(errno)); + smaError("vgId:%d, failed to lock td since %s", SMA_VID(pSma), strerror(errno)); terrno = TAOS_SYSTEM_ERROR(code); return -1; } @@ -334,7 +334,7 @@ int32_t tdUnLockSma(SSma *pSma) { pSma->locked = false; int code = taosThreadMutexUnlock(&pSma->mutex); if (code != 0) { - smaError("vgId:%d failed to unlock td since %s", SMA_VID(pSma), strerror(errno)); + smaError("vgId:%d, failed to unlock td since %s", SMA_VID(pSma), strerror(errno)); terrno = TAOS_SYSTEM_ERROR(code); return -1; } @@ -376,7 +376,7 @@ int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType) { if (did.level < 0 || did.id < 0) { tdUnLockSma(pSma); - smaError("vgId:%d init sma env failed since invalid did(%d,%d)", SMA_VID(pSma), did.level, did.id); + smaError("vgId:%d, init sma env failed since invalid did(%d,%d)", SMA_VID(pSma), did.level, did.id); return TSDB_CODE_FAILED; } diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c index 2a74fe78cbc66a3873857347df010190554e1e76..dde6578054ac43965b9c2300dd2d118baea1d25e 100644 --- a/source/dnode/vnode/src/sma/smaOpen.c +++ b/source/dnode/vnode/src/sma/smaOpen.c @@ -104,7 +104,7 @@ int32_t smaOpen(SVnode *pVnode) { taosThreadMutexInit(&pSma->mutex, NULL); pSma->locked = false; - if (vnodeIsRollup(pVnode)) { + if (VND_IS_RSMA(pVnode)) { STsdbKeepCfg keepCfg = {0}; for (int i = 0; i < TSDB_RETENTION_MAX; ++i) { if (i == TSDB_RETENTION_L0) { diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 88af049d0bd298e58e51286e0980fd13a7872734..80c8d20572bc9ef6658d3bc46116874e9ff68a42 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -18,7 +18,7 @@ static FORCE_INLINE int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid); static FORCE_INLINE int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids); static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, qTaskInfo_t *taskInfo, - STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid, int8_t level); + STSchema *pTSchema, tb_uid_t suid, int8_t level); struct SRSmaInfo { void *taskInfo[TSDB_RETENTION_L2]; // qTaskInfo_t @@ -58,30 +58,30 @@ static FORCE_INLINE int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SA if (!suid || !tbUids) { terrno = TSDB_CODE_INVALID_PTR; - smaError("vgId:%d failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno)); + smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno)); return TSDB_CODE_FAILED; } pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), suid, sizeof(tb_uid_t)); if (!pRSmaInfo || !(pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) { - smaError("vgId:%d failed to get rsma info for uid:%" PRIi64, SMA_VID(pSma), *suid); + smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64, SMA_VID(pSma), *suid); terrno = TSDB_CODE_TDB_INVALID_SMA_STAT; return TSDB_CODE_FAILED; } if (pRSmaInfo->taskInfo[0] && (qUpdateQualifiedTableId(pRSmaInfo->taskInfo[0], tbUids, true) != 0)) { - smaError("vgId:%d update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno)); + smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno)); return TSDB_CODE_FAILED; } else { - smaDebug("vgId:%d update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma), + smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma), pRSmaInfo->taskInfo[0], *suid, *(int64_t *)taosArrayGet(tbUids, 0)); } if (pRSmaInfo->taskInfo[1] && (qUpdateQualifiedTableId(pRSmaInfo->taskInfo[1], tbUids, true) != 0)) { - smaError("vgId:%d update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno)); + smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno)); return TSDB_CODE_FAILED; } else { - smaDebug("vgId:%d update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma), + smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma), pRSmaInfo->taskInfo[1], *suid, *(int64_t *)taosArrayGet(tbUids, 0)); } @@ -165,16 +165,19 @@ int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_ui * @param pReq * @return int32_t */ -int32_t tdProcessRSmaCreate(SSma *pSma, SMeta *pMeta, SVCreateStbReq *pReq, SMsgCb *pMsgCb) { +int32_t tdProcessRSmaCreate(SVnode *pVnode, SVCreateStbReq *pReq) { + SSma *pSma = pVnode->pSma; + SMeta *pMeta = pVnode->pMeta; + SMsgCb *pMsgCb = &pVnode->msgCb; if (!pReq->rollup) { - smaTrace("vgId:%d return directly since no rollup for stable %s %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid); + smaTrace("vgId:%d, return directly since no rollup for stable %s %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid); return TSDB_CODE_SUCCESS; } SRSmaParam *param = &pReq->pRSmaParam; if ((param->qmsg1Len == 0) && (param->qmsg2Len == 0)) { - smaWarn("vgId:%d no qmsg1/qmsg2 for rollup stable %s %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid); + smaWarn("vgId:%d, no qmsg1/qmsg2 for rollup stable %s %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid); return TSDB_CODE_SUCCESS; } @@ -189,7 +192,7 @@ int32_t tdProcessRSmaCreate(SSma *pSma, SMeta *pMeta, SVCreateStbReq *pReq, SMsg pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), &pReq->suid, sizeof(tb_uid_t)); if (pRSmaInfo) { - smaWarn("vgId:%d rsma info already exists for stb: %s, %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid); + smaWarn("vgId:%d, rsma info already exists for stb: %s, %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid); return TSDB_CODE_SUCCESS; } @@ -210,6 +213,7 @@ int32_t tdProcessRSmaCreate(SSma *pSma, SMeta *pMeta, SVCreateStbReq *pReq, SMsg .reader = pReadHandle, .meta = pMeta, .pMsgCb = pMsgCb, + .vnode = pVnode, }; if (param->qmsg1) { @@ -234,7 +238,7 @@ int32_t tdProcessRSmaCreate(SSma *pSma, SMeta *pMeta, SVCreateStbReq *pReq, SMsg TSDB_CODE_SUCCESS) { return TSDB_CODE_FAILED; } else { - smaDebug("vgId:%d register rsma info succeed for suid:%" PRIi64, SMA_VID(pSma), pReq->suid); + smaDebug("vgId:%d, register rsma info succeed for suid:%" PRIi64, SMA_VID(pSma), pReq->suid); } return TSDB_CODE_SUCCESS; @@ -364,17 +368,17 @@ static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) { } static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, qTaskInfo_t *taskInfo, - STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid, int8_t level) { + STSchema *pTSchema, tb_uid_t suid, int8_t level) { SArray *pResult = NULL; if (!taskInfo) { - smaDebug("vgId:%d no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, suid); + smaDebug("vgId:%d, no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, suid); return TSDB_CODE_SUCCESS; } - smaDebug("vgId:%d execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, SMA_VID(pSma), level, taskInfo, suid); + smaDebug("vgId:%d, execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, SMA_VID(pSma), level, taskInfo, suid); - qSetStreamInput(taskInfo, pMsg, inputType); + qSetStreamInput(taskInfo, pMsg, inputType, true); while (1) { SSDataBlock *output = NULL; uint64_t ts; @@ -399,7 +403,7 @@ static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int3 blockDebugShowData(pResult); STsdb *sinkTsdb = (level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb1 : pSma->pRSmaTsdb2); SSubmitReq *pReq = NULL; - if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), uid, suid) != 0) { + if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), suid) != 0) { taosArrayDestroy(pResult); return TSDB_CODE_FAILED; } @@ -410,7 +414,7 @@ static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int3 } taosMemoryFreeClear(pReq); } else { - smaWarn("vgId:%d no rsma % " PRIi8 " data generated since %s", SMA_VID(pSma), level, tstrerror(terrno)); + smaDebug("vgId:%d, no rsma % " PRIi8 " data generated since %s", SMA_VID(pSma), level, tstrerror(terrno)); } taosArrayDestroy(pResult); @@ -418,38 +422,36 @@ static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int3 return TSDB_CODE_SUCCESS; } -static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid, tb_uid_t uid) { +static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) { SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); if (!pEnv) { // only applicable when rsma env exists return TSDB_CODE_SUCCESS; } - ASSERT(uid != 0); // TODO: remove later - SSmaStat *pStat = SMA_ENV_STAT(pEnv); SRSmaInfo *pRSmaInfo = NULL; pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); if (!pRSmaInfo || !(pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) { - smaDebug("vgId:%d no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid); + smaDebug("vgId:%d, no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid); return TSDB_CODE_SUCCESS; } if (!pRSmaInfo->taskInfo[0]) { - smaDebug("vgId:%d no rsma qTaskInfo for suid:%" PRIu64, SMA_VID(pSma), suid); + smaDebug("vgId:%d, no rsma qTaskInfo for suid:%" PRIu64, SMA_VID(pSma), suid); return TSDB_CODE_SUCCESS; } if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) { // TODO: use the proper schema instead of 0, and cache STSchema in cache - STSchema *pTSchema = metaGetTbTSchema(SMA_META(pSma), suid, 1); + STSchema *pTSchema = metaGetTbTSchema(SMA_META(pSma), suid, -1); if (!pTSchema) { terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; return TSDB_CODE_FAILED; } - tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[0], pTSchema, suid, uid, TSDB_RETENTION_L1); - tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[1], pTSchema, suid, uid, TSDB_RETENTION_L2); + tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[0], pTSchema, suid, TSDB_RETENTION_L1); + tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[1], pTSchema, suid, TSDB_RETENTION_L2); taosMemoryFree(pTSchema); } @@ -468,12 +470,12 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) { tdFetchSubmitReqSuids(pMsg, &uidStore); if (uidStore.suid != 0) { - tdExecuteRSma(pSma, pMsg, inputType, uidStore.suid, uidStore.uid); + tdExecuteRSma(pSma, pMsg, inputType, uidStore.suid); void *pIter = taosHashIterate(uidStore.uidHash, NULL); while (pIter) { tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); - tdExecuteRSma(pSma, pMsg, inputType, *pTbSuid, 0); + tdExecuteRSma(pSma, pMsg, inputType, *pTbSuid); pIter = taosHashIterate(uidStore.uidHash, pIter); } diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c index f771e73c8aa4210fd01b5c871877cbdaeb0fb2bc..b72be06455d8181dca8a27ea1c58cfa72ddef39f 100644 --- a/source/dnode/vnode/src/sma/smaTimeRange.c +++ b/source/dnode/vnode/src/sma/smaTimeRange.c @@ -326,13 +326,13 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) { if (!pDataBlocks) { terrno = TSDB_CODE_INVALID_PTR; - smaWarn("vgId:%d insert tSma data failed since pDataBlocks is NULL", SMA_VID(pSma)); + smaWarn("vgId:%d, insert tSma data failed since pDataBlocks is NULL", SMA_VID(pSma)); return terrno; } if (taosArrayGetSize(pDataBlocks) <= 0) { terrno = TSDB_CODE_INVALID_PARA; - smaWarn("vgId:%d insert tSma data failed since pDataBlocks is empty", SMA_VID(pSma)); + smaWarn("vgId:%d, insert tSma data failed since pDataBlocks is empty", SMA_VID(pSma)); return TSDB_CODE_FAILED; } @@ -487,11 +487,11 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) { smaCloseDBF(&tSmaH.dFile); } tdSetTSmaDataFile(&tSmaH, indexUid, fid); - smaDebug("@@@ vgId:%d write to DBF %s, days:%d, interval:%" PRIi64 ", storageLevel:%" PRIi32 + smaDebug("vgId:%d, write to DBF %s, days:%d, interval:%" PRIi64 ", storageLevel:%" PRIi32 " queryKey:%" PRIi64, SMA_VID(pSma), tSmaH.dFile.path, minutePerFile, tSmaH.interval, storageLevel, testSkey); if (smaOpenDBF(pEnv->dbEnv, &tSmaH.dFile) != 0) { - smaWarn("vgId:%d open DB file %s failed since %s", SMA_VID(pSma), + smaWarn("vgId:%d, open DB file %s failed since %s", SMA_VID(pSma), tSmaH.dFile.path ? tSmaH.dFile.path : "path is NULL", tstrerror(terrno)); tdDestroyTSmaWriteH(&tSmaH); tdUnRefSmaStat(pSma, pStat); @@ -501,7 +501,7 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) { } if (tdInsertTSmaBlocks(&tSmaH, &smaKey, SMA_KEY_LEN, dataBuf, tlen, &pEnv->txn) != 0) { - smaWarn("vgId:%d insert tsma data blocks fail for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64 + smaWarn("vgId:%d, insert tsma data blocks fail for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64 " since %s", SMA_VID(pSma), indexUid, skey, groupId, tstrerror(terrno)); tdSmaEndCommit(pEnv); @@ -510,14 +510,14 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) { return TSDB_CODE_FAILED; } - smaDebug("vgId:%d insert tsma data blocks success for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64, + smaDebug("vgId:%d, insert tsma data blocks success for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64, SMA_VID(pSma), indexUid, skey, groupId); // TODO:tsdbEndTSmaCommit(); // Step 3: reset the SSmaStat tdResetExpiredWindow(pSma, pStat, indexUid, skey); } else { - smaWarn("vgId:%d invalid data skey:%" PRIi64 ", tlen %" PRIi32 " during insert tSma data for %" PRIi64, + smaWarn("vgId:%d, invalid data skey:%" PRIi64 ", tlen %" PRIi32 " during insert tSma data for %" PRIi64, SMA_VID(pSma), skey, tlen, indexUid); } } @@ -532,7 +532,7 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) { int32_t tdDropTSmaData(SSma *pSma, int64_t indexUid) { int32_t code = TSDB_CODE_SUCCESS; if ((code = tdDropTSmaDataImpl(pSma, indexUid)) < 0) { - smaWarn("vgId:%d drop tSma data failed since %s", SMA_VID(pSma), tstrerror(terrno)); + smaWarn("vgId:%d, drop tSma data failed since %s", SMA_VID(pSma), tstrerror(terrno)); } return code; } @@ -553,11 +553,11 @@ static int32_t tdInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyL // TODO: insert tsma data blocks into B+Tree(TTB) if (smaSaveSmaToDB(pDBFile, smaKey, keyLen, pData, dataLen, txn) != 0) { - smaWarn("vgId:%d insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " fail", + smaWarn("vgId:%d, insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " fail", SMA_VID(pSmaH->pSma), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen); return TSDB_CODE_FAILED; } - smaDebug("vgId:%d insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " succeed", + smaDebug("vgId:%d, insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " succeed", SMA_VID(pSmaH->pSma), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen); #ifdef _TEST_SMA_PRINT_DEBUG_LOG_ @@ -565,7 +565,7 @@ static int32_t tdInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyL void *data = tdGetSmaDataByKey(pDBFile, smaKey, keyLen, &valueSize); ASSERT(data != NULL); for (uint32_t v = 0; v < valueSize; v += 8) { - smaWarn("vgId:%d insert sma data val[%d] %" PRIi64, REPO_ID(pSmaH->pTsdb), v, *(int64_t *)POINTER_SHIFT(data, v)); + smaWarn("vgId:%d, insert sma data val[%d] %" PRIi64, REPO_ID(pSmaH->pTsdb), v, *(int64_t *)POINTER_SHIFT(data, v)); } #endif return TSDB_CODE_SUCCESS; @@ -594,11 +594,11 @@ static int32_t tdResetExpiredWindow(SSma *pSma, SSmaStat *pStat, int64_t indexUi if (taosHashRemove(pItem->expiredWindows, &skey, sizeof(TSKEY)) != 0) { // error handling tdUnRefSmaStat(pSma, pStat); - smaWarn("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " fail", SMA_VID(pSma), skey, + smaWarn("vgId:%d, remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " fail", SMA_VID(pSma), skey, indexUid); return TSDB_CODE_FAILED; } - smaDebug("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " succeed", SMA_VID(pSma), + smaDebug("vgId:%d, remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " succeed", SMA_VID(pSma), skey, indexUid); // TODO: use a standalone interface to received state upate notification from stream computing module. /** @@ -612,7 +612,7 @@ static int32_t tdResetExpiredWindow(SSma *pSma, SSmaStat *pStat, int64_t indexUi } else { // error handling tdUnRefSmaStat(pSma, pStat); - smaWarn("vgId:%d expired window %" PRIi64 " not exists for sma index %" PRIi64, SMA_VID(pSma), skey, indexUid); + smaWarn("vgId:%d, expired window %" PRIi64 " not exists for sma index %" PRIi64, SMA_VID(pSma), skey, indexUid); return TSDB_CODE_FAILED; } @@ -632,19 +632,19 @@ static int32_t tdDropTSmaDataImpl(SSma *pSma, int64_t indexUid) { // clear local cache if (pEnv) { - smaDebug("vgId:%d drop tSma local cache for %" PRIi64, SMA_VID(pSma), indexUid); + smaDebug("vgId:%d, drop tSma local cache for %" PRIi64, SMA_VID(pSma), indexUid); SSmaStatItem *pItem = taosHashGet(SMA_ENV_STAT_ITEMS(pEnv), &indexUid, sizeof(indexUid)); if ((pItem) || ((pItem = *(SSmaStatItem **)pItem))) { if (tdSmaStatIsDropped(pItem)) { - smaDebug("vgId:%d tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid); + smaDebug("vgId:%d, tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid); return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode } tdWLockSmaEnv(pEnv); if (tdSmaStatIsDropped(pItem)) { tdUnLockSmaEnv(pEnv); - smaDebug("vgId:%d tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid); + smaDebug("vgId:%d, tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid); return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode } tdSmaStatSetDropped(pItem); @@ -654,19 +654,19 @@ static int32_t tdDropTSmaDataImpl(SSma *pSma, int64_t indexUid) { int32_t refVal = INT32_MAX; while (true) { if ((refVal = T_REF_VAL_GET(SMA_ENV_STAT(pEnv))) <= 0) { - smaDebug("vgId:%d drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal); + smaDebug("vgId:%d, drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal); break; } - smaDebug("vgId:%d wait 1s to drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal); + smaDebug("vgId:%d, wait 1s to drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal); taosSsleep(1); if (++nSleep > SMA_DROP_EXPIRED_TIME) { - smaDebug("vgId:%d drop index %" PRIi64 " after wait %d (refVal=%d)", SMA_VID(pSma), indexUid, nSleep, refVal); + smaDebug("vgId:%d, drop index %" PRIi64 " after wait %d (refVal=%d)", SMA_VID(pSma), indexUid, nSleep, refVal); break; }; } tdFreeSmaStatItem(pItem); - smaDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64 " in local cache", SMA_VID(pSma), indexUid); + smaDebug("vgId:%d, getTSmaDataImpl failed since no index %" PRIi64 " in local cache", SMA_VID(pSma), indexUid); } } // clear sma data files @@ -690,7 +690,7 @@ int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY query if (!pEnv) { terrno = TSDB_CODE_INVALID_PTR; - smaWarn("vgId:%d getTSmaDataImpl failed since pTSmaEnv is NULL", SMA_VID(pSma)); + smaWarn("vgId:%d, getTSmaDataImpl failed since pTSmaEnv is NULL", SMA_VID(pSma)); return TSDB_CODE_FAILED; } @@ -703,7 +703,7 @@ int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY query // it's NULL. tdUnRefSmaStat(pSma, pStat); terrno = TSDB_CODE_TDB_INVALID_ACTION; - smaDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64, SMA_VID(pSma), indexUid); + smaDebug("vgId:%d, getTSmaDataImpl failed since no index %" PRIi64, SMA_VID(pSma), indexUid); return TSDB_CODE_FAILED; } @@ -722,17 +722,17 @@ int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY query if (!tdSmaStatIsOK(pItem, &smaStat)) { // TODO: multiple check for large scale sma query tdUnRefSmaStat(pSma, pStat); terrno = TSDB_CODE_TDB_INVALID_SMA_STAT; - smaWarn("vgId:%d getTSmaDataImpl failed from index %" PRIi64 " since %s %" PRIi8, SMA_VID(pSma), indexUid, + smaWarn("vgId:%d, getTSmaDataImpl failed from index %" PRIi64 " since %s %" PRIi8, SMA_VID(pSma), indexUid, tstrerror(terrno), smaStat); return TSDB_CODE_FAILED; } if (taosHashGet(pItem->expiredWindows, &querySKey, sizeof(TSKEY))) { // TODO: mark this window as expired. - smaDebug("vgId:%d skey %" PRIi64 " of window exists in expired window for index %" PRIi64, SMA_VID(pSma), querySKey, + smaDebug("vgId:%d, skey %" PRIi64 " of window exists in expired window for index %" PRIi64, SMA_VID(pSma), querySKey, indexUid); } else { - smaDebug("vgId:%d skey %" PRIi64 " of window not in expired window for index %" PRIi64, SMA_VID(pSma), querySKey, + smaDebug("vgId:%d, skey %" PRIi64 " of window not in expired window for index %" PRIi64, SMA_VID(pSma), querySKey, indexUid); } @@ -750,7 +750,7 @@ int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY query smaDebug("### vgId:%d read from DBF %s days:%d, interval:%" PRIi64 ", storageLevel:%" PRIi8 " queryKey:%" PRIi64, SMA_VID(pSma), tReadH.dFile.path, tReadH.days, tReadH.interval, tReadH.storageLevel, querySKey); if (smaOpenDBF(pEnv->dbEnv, &tReadH.dFile) != 0) { - smaWarn("vgId:%d open DBF %s failed since %s", SMA_VID(pSma), tReadH.dFile.path, tstrerror(terrno)); + smaWarn("vgId:%d, open DBF %s failed since %s", SMA_VID(pSma), tReadH.dFile.path, tstrerror(terrno)); return TSDB_CODE_FAILED; } @@ -759,13 +759,13 @@ int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY query int64_t queryGroupId = 0; tdEncodeTSmaKey(queryGroupId, querySKey, (void **)&pSmaKey); - smaDebug("vgId:%d get sma data from %s: smaKey %" PRIx64 "-%" PRIx64 ", keyLen %d", SMA_VID(pSma), tReadH.dFile.path, + smaDebug("vgId:%d, get sma data from %s: smaKey %" PRIx64 "-%" PRIx64 ", keyLen %d", SMA_VID(pSma), tReadH.dFile.path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), SMA_KEY_LEN); void *result = NULL; int32_t valueSize = 0; if (!(result = smaGetSmaDataByKey(&tReadH.dFile, smaKey, SMA_KEY_LEN, &valueSize))) { - smaWarn("vgId:%d get sma data failed from smaIndex %" PRIi64 ", smaKey %" PRIx64 "-%" PRIx64 " since %s", + smaWarn("vgId:%d, get sma data failed from smaIndex %" PRIi64 ", smaKey %" PRIx64 "-%" PRIx64 " since %s", SMA_VID(pSma), indexUid, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), tstrerror(terrno)); smaCloseDBF(&tReadH.dFile); return TSDB_CODE_FAILED; @@ -774,7 +774,7 @@ int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY query #ifdef _TEST_SMA_PRINT_DEBUG_LOG_ for (uint32_t v = 0; v < valueSize; v += 8) { - smaWarn("vgId:%d get sma data v[%d]=%" PRIi64, SMA_VID(pSma), v, *(int64_t *)POINTER_SHIFT(result, v)); + smaWarn("vgId:%d, get sma data v[%d]=%" PRIi64, SMA_VID(pSma), v, *(int64_t *)POINTER_SHIFT(result, v)); } #endif taosMemoryFreeClear(result); // TODO: fill the result to output @@ -828,7 +828,7 @@ int32_t tdDropTSma(SSma *pSma, char *pMsg) { // TODO: send msg to stream computing to drop tSma // if ((send msg to stream computing) < 0) { - // tdDestroyTSma(&vCreateSmaReq); + // tDestroyTSma(&vCreateSmaReq); // return -1; // } // @@ -888,7 +888,7 @@ static int32_t tdSetExpiredWindow(SSma *pSma, SHashObj *pItemsHash, int64_t inde terrno = TSDB_CODE_TDB_NO_SMA_INDEX_IN_META; taosHashCleanup(pItem->expiredWindows); taosMemoryFree(pItem); - smaWarn("vgId:%d set expire window, get tsma meta failed for smaIndex %" PRIi64 " since %s", SMA_VID(pSma), + smaWarn("vgId:%d, set expire window, get tsma meta failed for smaIndex %" PRIi64 " since %s", SMA_VID(pSma), indexUid, tstrerror(terrno)); return TSDB_CODE_FAILED; } @@ -915,12 +915,12 @@ static int32_t tdSetExpiredWindow(SSma *pSma, SHashObj *pItemsHash, int64_t inde taosHashCleanup(pItem->expiredWindows); taosMemoryFreeClear(pItem->pTSma); taosHashRemove(pItemsHash, &indexUid, sizeof(indexUid)); - smaWarn("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window fail", SMA_VID(pSma), indexUid, + smaWarn("vgId:%d, smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window fail", SMA_VID(pSma), indexUid, winSKey); return TSDB_CODE_FAILED; } - smaDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window succeed", SMA_VID(pSma), indexUid, + smaDebug("vgId:%d, smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window succeed", SMA_VID(pSma), indexUid, winSKey); return TSDB_CODE_SUCCESS; } @@ -932,21 +932,21 @@ static int32_t tdSetExpiredWindow(SSma *pSma, SHashObj *pItemsHash, int64_t inde * @param msg SSubmitReq * @return int32_t */ -int32_t tdUpdateExpiredWindowImpl(SSma *pSma, SSubmitReq *pMsg, int64_t version) { +int32_t tdUpdateExpiredWindowImpl(SSma *pSma, const SSubmitReq *pMsg, int64_t version) { // no time-range-sma, just return success if (atomic_load_16(&SMA_TSMA_NUM(pSma)) <= 0) { - smaTrace("vgId:%d not update expire window since no tSma", SMA_VID(pSma)); + smaTrace("vgId:%d, not update expire window since no tSma", SMA_VID(pSma)); return TSDB_CODE_SUCCESS; } if (!SMA_META(pSma)) { terrno = TSDB_CODE_INVALID_PTR; - smaError("vgId:%d update expire window failed since no meta ptr", SMA_VID(pSma)); + smaError("vgId:%d, update expire window failed since no meta ptr", SMA_VID(pSma)); return TSDB_CODE_FAILED; } if (tdCheckAndInitSmaEnv(pSma, TSDB_SMA_TYPE_TIME_RANGE) < 0) { - smaError("vgId:%d init sma env failed since %s", SMA_VID(pSma), terrstr(terrno)); + smaError("vgId:%d, init sma env failed since %s", SMA_VID(pSma), terrstr(terrno)); terrno = TSDB_CODE_TDB_INIT_FAILED; return TSDB_CODE_FAILED; } @@ -982,25 +982,25 @@ int32_t tdUpdateExpiredWindowImpl(SSma *pSma, SSubmitReq *pMsg, int64_t version) SSubmitBlkIter blkIter = {0}; if (tInitSubmitBlkIter(&msgIter, pBlock, &blkIter) < 0) { - pSW = tdFreeTSmaWrapper(pSW, false); + pSW = tFreeTSmaWrapper(pSW, false); break; } while (true) { STSRow *row = tGetSubmitBlkNext(&blkIter); if (!row) { - pSW = tdFreeTSmaWrapper(pSW, false); + pSW = tFreeTSmaWrapper(pSW, false); break; } if (!pSW || (pTSma && (pTSma->tableUid != msgIter.suid))) { if (pSW) { - pSW = tdFreeTSmaWrapper(pSW, false); + pSW = tFreeTSmaWrapper(pSW, false); } if (!(pSW = metaGetSmaInfoByTable(SMA_META(pSma), msgIter.suid, false))) { break; } if ((pSW->number) <= 0 || !pSW->tSma) { - pSW = tdFreeTSmaWrapper(pSW, false); + pSW = tFreeTSmaWrapper(pSW, false); break; } @@ -1020,12 +1020,12 @@ int32_t tdUpdateExpiredWindowImpl(SSma *pSma, SSubmitReq *pMsg, int64_t version) if (lastWinSKey != winSKey) { lastWinSKey = winSKey; if (tdSetExpiredWindow(pSma, pItemsHash, pTSma->indexUid, winSKey, version) < 0) { - pSW = tdFreeTSmaWrapper(pSW, false); + pSW = tFreeTSmaWrapper(pSW, false); tdUnRefSmaStat(pSma, pStat); return TSDB_CODE_FAILED; } } else { - smaDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window ignore as duplicated", + smaDebug("vgId:%d, smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window ignore as duplicated", SMA_VID(pSma), pTSma->indexUid, winSKey); } } diff --git a/source/dnode/vnode/src/sma/smaTimeRange2.c b/source/dnode/vnode/src/sma/smaTimeRange2.c new file mode 100644 index 0000000000000000000000000000000000000000..5ef171c7991c47494efa265274852c48e0bac6b7 --- /dev/null +++ b/source/dnode/vnode/src/sma/smaTimeRange2.c @@ -0,0 +1,1084 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "sma.h" +#include "tsdb.h" + +typedef STsdbCfg STSmaKeepCfg; + +#undef _TEST_SMA_PRINT_DEBUG_LOG_ +#define SMA_STORAGE_MINUTES_MAX 86400 +#define SMA_STORAGE_MINUTES_DAY 1440 +#define SMA_STORAGE_MINUTES_MIN 1440 +#define SMA_STORAGE_TSDB_MINUTES 86400 +#define SMA_STORAGE_TSDB_TIMES 10 +#define SMA_STORAGE_SPLIT_FACTOR 14400 // least records in tsma file TODO: the feasible value? +#define SMA_KEY_LEN 16 // TSKEY+groupId 8+8 +#define SMA_DROP_EXPIRED_TIME 10 // default is 10 seconds + +#define SMA_STATE_ITEM_HASH_SLOT 32 + +typedef struct { + SSma *pSma; + SDBFile dFile; + const SArray *pDataBlocks; // sma data + int64_t interval; // interval with the precision of DB +} STSmaWriteH; + +typedef struct { + int32_t iter; + int32_t fid; +} SmaFsIter; + +typedef struct { + STsdb *pTsdb; + SSma *pSma; + SDBFile dFile; + int64_t interval; // interval with the precision of DB + int32_t blockSize; // size of SMA block item + int32_t days; + int8_t storageLevel; + SmaFsIter smaFsIter; +} STSmaReadH; + +typedef enum { + SMA_STORAGE_LEVEL_TSDB = 0, // use days of self-defined e.g. vnode${N}/tsdb/tsma/sma_index_uid/v2f200.tsma + SMA_STORAGE_LEVEL_DFILESET = 1 // use days of TS data e.g. vnode${N}/tsdb/tsma/sma_index_uid/v2f1906.tsma +} ESmaStorageLevel; + +// static func + +static int64_t tdGetIntervalByPrecision(int64_t interval, uint8_t intervalUnit, int8_t precision, bool adjusted); +static int32_t tdGetSmaStorageLevel(STSmaKeepCfg *pCfg, int64_t interval); +static int32_t tdInitTSmaWriteH(STSmaWriteH *pSmaH, SSma *pSma, const SArray *pDataBlocks, int64_t interval, + int8_t intervalUnit); +static int32_t tdInitTSmaReadH(STSmaReadH *pSmaH, SSma *pSma, int64_t interval, int8_t intervalUnit); +static void tdDestroyTSmaWriteH(STSmaWriteH *pSmaH); +static int32_t tdGetTSmaDays(SSma *pSma, int64_t interval, int32_t storageLevel); +static int32_t tdSetTSmaDataFile(STSmaWriteH *pSmaH, int64_t indexUid, int32_t fid); +static int32_t tdInitTSmaFile(STSmaReadH *pSmaH, int64_t indexUid, TSKEY skey); +static bool tdSetAndOpenTSmaFile(STSmaReadH *pReadH, TSKEY *queryKey); +static int32_t tdInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyLen, void *pData, int32_t dataLen, + TXN *txn); +// expired window + +static int32_t tdSetExpiredWindow(SSma *pSma, SHashObj *pItemsHash, int64_t indexUid, int64_t winSKey, int64_t version); +static int32_t tdResetExpiredWindow(SSma *pSma, SSmaStat *pStat, int64_t indexUid, TSKEY skey); +static int32_t tdDropTSmaDataImpl(SSma *pSma, int64_t indexUid); + +/** + * @brief Judge the tsma file split days + * + * @param pCfg + * @param pCont + * @param contLen + * @param days unit is minute + * @return int32_t + */ +int32_t tdGetTSmaDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days) { + SDecoder coder = {0}; + tDecoderInit(&coder, pCont, contLen); + + STSma tsma = {0}; + if (tDecodeSVCreateTSmaReq(&coder, &tsma) < 0) { + terrno = TSDB_CODE_MSG_DECODE_ERROR; + goto _err; + } + STsdbCfg *pTsdbCfg = &pCfg->tsdbCfg; + int64_t mInterval = convertTimeFromPrecisionToUnit(tsma.interval, pTsdbCfg->precision, TIME_UNIT_MINUTE); + int64_t records = pTsdbCfg->days / mInterval; + + if (records >= SMA_STORAGE_SPLIT_FACTOR) { + *days = pTsdbCfg->days; + } else { + int64_t daysPerFile = mInterval * SMA_STORAGE_MINUTES_DAY * 2; + + if (daysPerFile > SMA_STORAGE_MINUTES_MAX) { + *days = SMA_STORAGE_MINUTES_MAX; + } else { + *days = (int32_t)daysPerFile; + } + + if(*days < pTsdbCfg->days) { + *days = pTsdbCfg->days; + } + } + tDecoderClear(&coder); + return 0; +_err: + tDecoderClear(&coder); + return -1; +} + +// read data + +// implementation + +/** + * @brief + * + * @param pSmaH + * @param pSma + * @param interval + * @param intervalUnit + * @return int32_t + */ +static int32_t tdInitTSmaReadH(STSmaReadH *pSmaH, SSma *pSma, int64_t interval, int8_t intervalUnit) { + STSmaKeepCfg *pCfg = SMA_TSDB_CFG(pSma); + pSmaH->pSma = pSma; + pSmaH->interval = tdGetIntervalByPrecision(interval, intervalUnit, SMA_TSDB_CFG(pSma)->precision, true); + pSmaH->storageLevel = tdGetSmaStorageLevel(pCfg, interval); + pSmaH->days = tdGetTSmaDays(pSma, pSmaH->interval, pSmaH->storageLevel); + return TSDB_CODE_SUCCESS; +} + +/** + * @brief Init of tSma FS + * + * @param pReadH + * @param indexUid + * @param skey + * @return int32_t + */ +static int32_t tdInitTSmaFile(STSmaReadH *pSmaH, int64_t indexUid, TSKEY skey) { + SSma *pSma = pSmaH->pSma; + + int32_t fid = (int32_t)(TSDB_KEY_FID(skey, pSmaH->days, SMA_TSDB_CFG(pSma)->precision)); + char tSmaFile[TSDB_FILENAME_LEN] = {0}; + snprintf(tSmaFile, TSDB_FILENAME_LEN, "%" PRIi64 "%sv%df%d.tsma", indexUid, TD_DIRSEP, SMA_VID(pSma), fid); + pSmaH->dFile.path = strdup(tSmaFile); + pSmaH->smaFsIter.iter = 0; + pSmaH->smaFsIter.fid = fid; + return TSDB_CODE_SUCCESS; +} + +/** + * @brief Set and open tSma file if it has key locates in queryWin. + * + * @param pReadH + * @param param + * @param queryWin + * @return true + * @return false + */ +static bool tdSetAndOpenTSmaFile(STSmaReadH *pReadH, TSKEY *queryKey) { + // SArray *smaFs = pReadH->pTsdb->fs->cstatus->sf; + // int32_t nSmaFs = taosArrayGetSize(smaFs); + + smaCloseDBF(&pReadH->dFile); + +#if 0 + while (pReadH->smaFsIter.iter < nSmaFs) { + void *pSmaFile = taosArrayGet(smaFs, pReadH->smaFsIter.iter); + if (pSmaFile) { // match(indexName, queryWindow) + // TODO: select the file by index_name ... + pReadH->dFile = pSmaFile; + ++pReadH->smaFsIter.iter; + break; + } + ++pReadH->smaFsIter.iter; + } + + if (pReadH->pDFile) { + tdDebug("vg%d: smaFile %s matched", REPO_ID(pReadH->pTsdb), "[pSmaFile dir]"); + return true; + } +#endif + + return false; +} + +/** + * @brief Approximate value for week/month/year. + * + * @param interval + * @param intervalUnit + * @param precision + * @param adjusted Interval already adjusted according to DB precision + * @return int64_t + */ +static int64_t tdGetIntervalByPrecision(int64_t interval, uint8_t intervalUnit, int8_t precision, bool adjusted) { + if (adjusted) { + return interval; + } + + switch (intervalUnit) { + case TIME_UNIT_YEAR: // approximate value + interval *= 365 * 86400 * 1e3; + break; + case TIME_UNIT_MONTH: // approximate value + interval *= 30 * 86400 * 1e3; + break; + case TIME_UNIT_WEEK: // approximate value + interval *= 7 * 86400 * 1e3; + break; + case TIME_UNIT_DAY: // the interval for tSma calculation must <= day + interval *= 86400 * 1e3; + break; + case TIME_UNIT_HOUR: + interval *= 3600 * 1e3; + break; + case TIME_UNIT_MINUTE: + interval *= 60 * 1e3; + break; + case TIME_UNIT_SECOND: + interval *= 1e3; + break; + default: + break; + } + + switch (precision) { + case TSDB_TIME_PRECISION_MILLI: + if (TIME_UNIT_MICROSECOND == intervalUnit) { // us + return interval / 1e3; + } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // nano second + return interval / 1e6; + } else { // ms + return interval; + } + break; + case TSDB_TIME_PRECISION_MICRO: + if (TIME_UNIT_MICROSECOND == intervalUnit) { // us + return interval; + } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns + return interval / 1e3; + } else { // ms + return interval * 1e3; + } + break; + case TSDB_TIME_PRECISION_NANO: + if (TIME_UNIT_MICROSECOND == intervalUnit) { // us + return interval * 1e3; + } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns + return interval; + } else { // ms + return interval * 1e6; + } + break; + default: // ms + if (TIME_UNIT_MICROSECOND == intervalUnit) { // us + return interval / 1e3; + } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns + return interval / 1e6; + } else { // ms + return interval; + } + break; + } + return interval; +} + +static int32_t tdInitTSmaWriteH(STSmaWriteH *pSmaH, SSma *pSma, const SArray *pDataBlocks, int64_t interval, + int8_t intervalUnit) { + pSmaH->pSma = pSma; + pSmaH->interval = tdGetIntervalByPrecision(interval, intervalUnit, SMA_TSDB_CFG(pSma)->precision, true); + pSmaH->pDataBlocks = pDataBlocks; + pSmaH->dFile.fid = SMA_IVLD_FID; + return TSDB_CODE_SUCCESS; +} + +static void tdDestroyTSmaWriteH(STSmaWriteH *pSmaH) { + if (pSmaH) { + smaCloseDBF(&pSmaH->dFile); + } +} + +static int32_t tdSetTSmaDataFile(STSmaWriteH *pSmaH, int64_t indexUid, int32_t fid) { + SSma *pSma = pSmaH->pSma; + ASSERT(!pSmaH->dFile.path && !pSmaH->dFile.pDB); + + pSmaH->dFile.fid = fid; + char tSmaFile[TSDB_FILENAME_LEN] = {0}; + snprintf(tSmaFile, TSDB_FILENAME_LEN, "%" PRIi64 "%sv%df%d.tsma", indexUid, TD_DIRSEP, SMA_VID(pSma), fid); + pSmaH->dFile.path = strdup(tSmaFile); + + return TSDB_CODE_SUCCESS; +} + +/** + * @brief + * + * @param pSma + * @param interval Interval calculated by DB's precision + * @param storageLevel + * @return int32_t + */ +static int32_t tdGetTSmaDays(SSma *pSma, int64_t interval, int32_t storageLevel) { + STsdbCfg *pCfg = SMA_TSDB_CFG(pSma); + int32_t daysPerFile = pCfg->days; // unit is minute + + if (storageLevel == SMA_STORAGE_LEVEL_TSDB) { + int32_t minutes = SMA_STORAGE_TSDB_TIMES * (interval / tsTickPerMin[pCfg->precision]); + if (minutes > SMA_STORAGE_TSDB_MINUTES) { + daysPerFile = SMA_STORAGE_TSDB_MINUTES; + } + } + + return daysPerFile; +} + +/** + * @brief Judge the tSma storage level + * + * @param pCfg + * @param interval + * @return int32_t + */ +static int32_t tdGetSmaStorageLevel(STSmaKeepCfg *pCfg, int64_t interval) { + int64_t mInterval = convertTimeFromPrecisionToUnit(interval, pCfg->precision, TIME_UNIT_MINUTE); + if (pCfg->days / mInterval >= SMA_STORAGE_SPLIT_FACTOR) { + return SMA_STORAGE_LEVEL_DFILESET; + } + return SMA_STORAGE_LEVEL_TSDB; +} + +/** + * @brief Insert/Update Time-range-wise SMA data. + * - If interval < SMA_STORAGE_SPLIT_HOURS(e.g. 24), save the SMA data as a part of DFileSet to e.g. + * v3f1900.tsma.${sma_index_name}. The days is the same with that for TS data files. + * - If interval >= SMA_STORAGE_SPLIT_HOURS, save the SMA data to e.g. vnode3/tsma/v3f632.tsma.${sma_index_name}. The + * days is 30 times of the interval, and the minimum days is SMA_STORAGE_TSDB_DAYS(30d). + * - The destination file of one data block for some interval is determined by its start TS key. + * + * @param pSma + * @param msg + * @return int32_t + */ +int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) { + STsdbCfg *pCfg = SMA_TSDB_CFG(pSma); + const SArray *pDataBlocks = (const SArray *)msg; + int64_t testSkey = TSKEY_INITIAL_VAL; + + // TODO: destroy SSDataBlocks(msg) + + // For super table aggregation, the sma data is stored in vgroup calculated from the hash value of stable name. Thus + // the sma data would arrive ahead of the update-expired-window msg. + if (tdCheckAndInitSmaEnv(pSma, TSDB_SMA_TYPE_TIME_RANGE) != TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_TDB_INIT_FAILED; + return TSDB_CODE_FAILED; + } + + if (!pDataBlocks) { + terrno = TSDB_CODE_INVALID_PTR; + smaWarn("vgId:%d insert tSma data failed since pDataBlocks is NULL", SMA_VID(pSma)); + return terrno; + } + + if (taosArrayGetSize(pDataBlocks) <= 0) { + terrno = TSDB_CODE_INVALID_PARA; + smaWarn("vgId:%d insert tSma data failed since pDataBlocks is empty", SMA_VID(pSma)); + return TSDB_CODE_FAILED; + } + + SSmaEnv *pEnv = SMA_TSMA_ENV(pSma); + SSmaStat *pStat = SMA_ENV_STAT(pEnv); + SSmaStatItem *pItem = NULL; + + tdRefSmaStat(pSma, pStat); + + if (pStat && SMA_STAT_ITEMS(pStat)) { + pItem = taosHashGet(SMA_STAT_ITEMS(pStat), &indexUid, sizeof(indexUid)); + } + + if (!pItem || !(pItem = *(SSmaStatItem **)pItem) || tdSmaStatIsDropped(pItem)) { + terrno = TSDB_CODE_TDB_INVALID_SMA_STAT; + tdUnRefSmaStat(pSma, pStat); + return TSDB_CODE_FAILED; + } + + STSma *pTSma = pItem->pTSma; + STSmaWriteH tSmaH = {0}; + + if (tdInitTSmaWriteH(&tSmaH, pSma, pDataBlocks, pTSma->interval, pTSma->intervalUnit) != 0) { + return TSDB_CODE_FAILED; + } + + char rPath[TSDB_FILENAME_LEN] = {0}; + char aPath[TSDB_FILENAME_LEN] = {0}; + snprintf(rPath, TSDB_FILENAME_LEN, "%s%s%" PRIi64, SMA_ENV_PATH(pEnv), TD_DIRSEP, indexUid); + tfsAbsoluteName(SMA_TFS(pSma), SMA_ENV_DID(pEnv), rPath, aPath); + if (!taosCheckExistFile(aPath)) { + if (tfsMkdirRecurAt(SMA_TFS(pSma), rPath, SMA_ENV_DID(pEnv)) != TSDB_CODE_SUCCESS) { + tdUnRefSmaStat(pSma, pStat); + return TSDB_CODE_FAILED; + } + } + + // Step 1: Judge the storage level and days + int32_t storageLevel = tdGetSmaStorageLevel(pCfg, tSmaH.interval); + int32_t minutePerFile = tdGetTSmaDays(pSma, tSmaH.interval, storageLevel); + + char smaKey[SMA_KEY_LEN] = {0}; // key: skey + groupId + char dataBuf[512] = {0}; // val: aggr data // TODO: handle 512 buffer? + void *pDataBuf = NULL; + int32_t sz = taosArrayGetSize(pDataBlocks); + for (int32_t i = 0; i < sz; ++i) { + SSDataBlock *pDataBlock = taosArrayGet(pDataBlocks, i); + int32_t colNum = pDataBlock->info.numOfCols; + int32_t rows = pDataBlock->info.rows; + int32_t rowSize = pDataBlock->info.rowSize; + int64_t groupId = pDataBlock->info.groupId; + for (int32_t j = 0; j < rows; ++j) { + printf("|"); + TSKEY skey = TSKEY_INITIAL_VAL; // the start key of TS window by interval + void *pSmaKey = &smaKey; + bool isStartKey = false; + + int32_t tlen = 0; // reset the len + pDataBuf = &dataBuf; // reset the buf + for (int32_t k = 0; k < colNum; ++k) { + SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); + void *var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes); + switch (pColInfoData->info.type) { + case TSDB_DATA_TYPE_TIMESTAMP: + if (!isStartKey) { + isStartKey = true; + skey = *(TSKEY *)var; + testSkey = skey; + printf("= skey %" PRIi64 " groupId = %" PRIi64 "|", skey, groupId); + tdEncodeTSmaKey(groupId, skey, &pSmaKey); + } else { + printf(" %" PRIi64 " |", *(int64_t *)var); + tlen += taosEncodeFixedI64(&pDataBuf, *(int64_t *)var); + break; + } + break; + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_UTINYINT: + printf(" %15d |", *(uint8_t *)var); + tlen += taosEncodeFixedU8(&pDataBuf, *(uint8_t *)var); + break; + case TSDB_DATA_TYPE_TINYINT: + printf(" %15d |", *(int8_t *)var); + tlen += taosEncodeFixedI8(&pDataBuf, *(int8_t *)var); + break; + case TSDB_DATA_TYPE_SMALLINT: + printf(" %15d |", *(int16_t *)var); + tlen += taosEncodeFixedI16(&pDataBuf, *(int16_t *)var); + break; + case TSDB_DATA_TYPE_USMALLINT: + printf(" %15d |", *(uint16_t *)var); + tlen += taosEncodeFixedU16(&pDataBuf, *(uint16_t *)var); + break; + case TSDB_DATA_TYPE_INT: + printf(" %15d |", *(int32_t *)var); + tlen += taosEncodeFixedI32(&pDataBuf, *(int32_t *)var); + break; + case TSDB_DATA_TYPE_FLOAT: + printf(" %15f |", *(float *)var); + tlen += taosEncodeBinary(&pDataBuf, var, sizeof(float)); + break; + case TSDB_DATA_TYPE_UINT: + printf(" %15u |", *(uint32_t *)var); + tlen += taosEncodeFixedU32(&pDataBuf, *(uint32_t *)var); + break; + case TSDB_DATA_TYPE_BIGINT: + printf(" %15ld |", *(int64_t *)var); + tlen += taosEncodeFixedI64(&pDataBuf, *(int64_t *)var); + break; + case TSDB_DATA_TYPE_DOUBLE: + printf(" %15lf |", *(double *)var); + tlen += taosEncodeBinary(&pDataBuf, var, sizeof(double)); + case TSDB_DATA_TYPE_UBIGINT: + printf(" %15lu |", *(uint64_t *)var); + tlen += taosEncodeFixedU64(&pDataBuf, *(uint64_t *)var); + break; + case TSDB_DATA_TYPE_NCHAR: { + char tmpChar[100] = {0}; + strncpy(tmpChar, varDataVal(var), varDataLen(var)); + printf(" %s |", tmpChar); + tlen += taosEncodeBinary(&pDataBuf, varDataVal(var), varDataLen(var)); + break; + } + case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY + char tmpChar[100] = {0}; + strncpy(tmpChar, varDataVal(var), varDataLen(var)); + printf(" %s |", tmpChar); + tlen += taosEncodeBinary(&pDataBuf, varDataVal(var), varDataLen(var)); + break; + } + case TSDB_DATA_TYPE_VARBINARY: + // TODO: add binary/varbinary + TASSERT(0); + default: + printf("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type); + TASSERT(0); + break; + } + } + printf("\n"); + // if ((tlen > 0) && (skey != TSKEY_INITIAL_VAL)) { + if (tlen > 0) { + int32_t fid = (int32_t)(TSDB_KEY_FID(skey, minutePerFile, pCfg->precision)); + + // Step 2: Set the DFile for storage of SMA index, and iterate/split the TSma data and store to B+Tree index + // file + // - Set and open the DFile or the B+Tree file + // TODO: tsdbStartTSmaCommit(); + if (fid != tSmaH.dFile.fid) { + if (tSmaH.dFile.fid != SMA_IVLD_FID) { + tdSmaEndCommit(pEnv); + smaCloseDBF(&tSmaH.dFile); + } + tdSetTSmaDataFile(&tSmaH, indexUid, fid); + smaDebug("@@@ vgId:%d write to DBF %s, days:%d, interval:%" PRIi64 ", storageLevel:%" PRIi32 + " queryKey:%" PRIi64, + SMA_VID(pSma), tSmaH.dFile.path, minutePerFile, tSmaH.interval, storageLevel, testSkey); + if (smaOpenDBF(pEnv->dbEnv, &tSmaH.dFile) != 0) { + smaWarn("vgId:%d open DB file %s failed since %s", SMA_VID(pSma), + tSmaH.dFile.path ? tSmaH.dFile.path : "path is NULL", tstrerror(terrno)); + tdDestroyTSmaWriteH(&tSmaH); + tdUnRefSmaStat(pSma, pStat); + return TSDB_CODE_FAILED; + } + tdSmaBeginCommit(pEnv); + } + + if (tdInsertTSmaBlocks(&tSmaH, &smaKey, SMA_KEY_LEN, dataBuf, tlen, &pEnv->txn) != 0) { + smaWarn("vgId:%d insert tsma data blocks fail for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64 + " since %s", + SMA_VID(pSma), indexUid, skey, groupId, tstrerror(terrno)); + tdSmaEndCommit(pEnv); + tdDestroyTSmaWriteH(&tSmaH); + tdUnRefSmaStat(pSma, pStat); + return TSDB_CODE_FAILED; + } + + smaDebug("vgId:%d insert tsma data blocks success for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64, + SMA_VID(pSma), indexUid, skey, groupId); + // TODO:tsdbEndTSmaCommit(); + + // Step 3: reset the SSmaStat + tdResetExpiredWindow(pSma, pStat, indexUid, skey); + } else { + smaWarn("vgId:%d invalid data skey:%" PRIi64 ", tlen %" PRIi32 " during insert tSma data for %" PRIi64, + SMA_VID(pSma), skey, tlen, indexUid); + } + } + } + tdSmaEndCommit(pEnv); // TODO: not commit for every insert + tdDestroyTSmaWriteH(&tSmaH); + tdUnRefSmaStat(pSma, pStat); + + return TSDB_CODE_SUCCESS; +} + +int32_t tdDropTSmaData(SSma *pSma, int64_t indexUid) { + int32_t code = TSDB_CODE_SUCCESS; + if ((code = tdDropTSmaDataImpl(pSma, indexUid)) < 0) { + smaWarn("vgId:%d drop tSma data failed since %s", SMA_VID(pSma), tstrerror(terrno)); + } + return code; +} + +/** + * @brief Insert TSma data blocks to DB File build by B+Tree + * + * @param pSmaH + * @param smaKey tableUid-colId-skeyOfWindow(8-2-8) + * @param keyLen + * @param pData + * @param dataLen + * @return int32_t + */ +static int32_t tdInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyLen, void *pData, int32_t dataLen, + TXN *txn) { + SDBFile *pDBFile = &pSmaH->dFile; + + // TODO: insert tsma data blocks into B+Tree(TTB) + if (smaSaveSmaToDB(pDBFile, smaKey, keyLen, pData, dataLen, txn) != 0) { + smaWarn("vgId:%d insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " fail", + SMA_VID(pSmaH->pSma), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen); + return TSDB_CODE_FAILED; + } + smaDebug("vgId:%d insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " succeed", + SMA_VID(pSmaH->pSma), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen); + +#ifdef _TEST_SMA_PRINT_DEBUG_LOG_ + uint32_t valueSize = 0; + void *data = tdGetSmaDataByKey(pDBFile, smaKey, keyLen, &valueSize); + ASSERT(data != NULL); + for (uint32_t v = 0; v < valueSize; v += 8) { + smaWarn("vgId:%d insert sma data val[%d] %" PRIi64, REPO_ID(pSmaH->pTsdb), v, *(int64_t *)POINTER_SHIFT(data, v)); + } +#endif + return TSDB_CODE_SUCCESS; +} + +/** + * @brief When sma data received from stream computing, make the relative expired window valid. + * + * @param pSma + * @param pStat + * @param indexUid + * @param skey + * @return int32_t + */ +static int32_t tdResetExpiredWindow(SSma *pSma, SSmaStat *pStat, int64_t indexUid, TSKEY skey) { + SSmaStatItem *pItem = NULL; + + tdRefSmaStat(pSma, pStat); + + if (pStat && SMA_STAT_ITEMS(pStat)) { + pItem = taosHashGet(SMA_STAT_ITEMS(pStat), &indexUid, sizeof(indexUid)); + } + if ((pItem) && ((pItem = *(SSmaStatItem **)pItem))) { + // pItem resides in hash buffer all the time unless drop sma index + // TODO: multithread protect + if (taosHashRemove(pItem->expiredWindows, &skey, sizeof(TSKEY)) != 0) { + // error handling + tdUnRefSmaStat(pSma, pStat); + smaWarn("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " fail", SMA_VID(pSma), skey, + indexUid); + return TSDB_CODE_FAILED; + } + smaDebug("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " succeed", SMA_VID(pSma), + skey, indexUid); + // TODO: use a standalone interface to received state upate notification from stream computing module. + /** + * @brief state + * - When SMA env init in TSDB, its status is TSDB_SMA_STAT_OK. + * - In startup phase of stream computing module, it should notify the SMA env in TSDB to expired if needed(e.g. + * when batch data caculation not finised) + * - When TSDB_SMA_STAT_OK, the stream computing module should also notify that to the SMA env in TSDB. + */ + pItem->state = TSDB_SMA_STAT_OK; + } else { + // error handling + tdUnRefSmaStat(pSma, pStat); + smaWarn("vgId:%d expired window %" PRIi64 " not exists for sma index %" PRIi64, SMA_VID(pSma), skey, indexUid); + return TSDB_CODE_FAILED; + } + + tdUnRefSmaStat(pSma, pStat); + return TSDB_CODE_SUCCESS; +} + +/** + * @brief Drop tSma data and local cache + * - insert/query reference + * @param pSma + * @param msg + * @return int32_t + */ +static int32_t tdDropTSmaDataImpl(SSma *pSma, int64_t indexUid) { + SSmaEnv *pEnv = atomic_load_ptr(&SMA_TSMA_ENV(pSma)); + + // clear local cache + if (pEnv) { + smaDebug("vgId:%d drop tSma local cache for %" PRIi64, SMA_VID(pSma), indexUid); + + SSmaStatItem *pItem = taosHashGet(SMA_ENV_STAT_ITEMS(pEnv), &indexUid, sizeof(indexUid)); + if ((pItem) || ((pItem = *(SSmaStatItem **)pItem))) { + if (tdSmaStatIsDropped(pItem)) { + smaDebug("vgId:%d tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid); + return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode + } + + tdWLockSmaEnv(pEnv); + if (tdSmaStatIsDropped(pItem)) { + tdUnLockSmaEnv(pEnv); + smaDebug("vgId:%d tSma stat is already dropped for %" PRIi64, SMA_VID(pSma), indexUid); + return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode + } + tdSmaStatSetDropped(pItem); + tdUnLockSmaEnv(pEnv); + + int32_t nSleep = 0; + int32_t refVal = INT32_MAX; + while (true) { + if ((refVal = T_REF_VAL_GET(SMA_ENV_STAT(pEnv))) <= 0) { + smaDebug("vgId:%d drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal); + break; + } + smaDebug("vgId:%d wait 1s to drop index %" PRIi64 " since refVal=%d", SMA_VID(pSma), indexUid, refVal); + taosSsleep(1); + if (++nSleep > SMA_DROP_EXPIRED_TIME) { + smaDebug("vgId:%d drop index %" PRIi64 " after wait %d (refVal=%d)", SMA_VID(pSma), indexUid, nSleep, refVal); + break; + }; + } + + tdFreeSmaStatItem(pItem); + smaDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64 " in local cache", SMA_VID(pSma), indexUid); + } + } + // clear sma data files + // TODO: + return TSDB_CODE_SUCCESS; +} + +/** + * @brief + * + * @param pSma Return the data between queryWin and fill the pData. + * @param pData + * @param indexUid + * @param pQuerySKey + * @param nMaxResult The query invoker should control the nMaxResult need to return to avoid OOM. + * @return int32_t + */ +int32_t tdGetTSmaDataImpl(SSma *pSma, char *pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult) { + SSmaEnv *pEnv = atomic_load_ptr(&SMA_TSMA_ENV(pSma)); + SSmaStat *pStat = NULL; + + if (!pEnv) { + terrno = TSDB_CODE_INVALID_PTR; + smaWarn("vgId:%d getTSmaDataImpl failed since pTSmaEnv is NULL", SMA_VID(pSma)); + return TSDB_CODE_FAILED; + } + + pStat = SMA_ENV_STAT(pEnv); + + tdRefSmaStat(pSma, pStat); + SSmaStatItem *pItem = taosHashGet(SMA_ENV_STAT_ITEMS(pEnv), &indexUid, sizeof(indexUid)); + if (!pItem || !(pItem = *(SSmaStatItem **)pItem)) { + // Normally pItem should not be NULL, mark all windows as expired and notify query module to fetch raw TS data if + // it's NULL. + tdUnRefSmaStat(pSma, pStat); + terrno = TSDB_CODE_TDB_INVALID_ACTION; + smaDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64, SMA_VID(pSma), indexUid); + return TSDB_CODE_FAILED; + } + +#if 0 + int32_t nQueryWin = taosArrayGetSize(pQuerySKey); + for (int32_t n = 0; n < nQueryWin; ++n) { + TSKEY skey = taosArrayGet(pQuerySKey, n); + if (taosHashGet(pItem->expiredWindows, &skey, sizeof(TSKEY))) { + // TODO: mark this window as expired. + } + } +#endif + +#if 1 + int8_t smaStat = 0; + if (!tdSmaStatIsOK(pItem, &smaStat)) { // TODO: multiple check for large scale sma query + tdUnRefSmaStat(pSma, pStat); + terrno = TSDB_CODE_TDB_INVALID_SMA_STAT; + smaWarn("vgId:%d getTSmaDataImpl failed from index %" PRIi64 " since %s %" PRIi8, SMA_VID(pSma), indexUid, + tstrerror(terrno), smaStat); + return TSDB_CODE_FAILED; + } + + if (taosHashGet(pItem->expiredWindows, &querySKey, sizeof(TSKEY))) { + // TODO: mark this window as expired. + smaDebug("vgId:%d skey %" PRIi64 " of window exists in expired window for index %" PRIi64, SMA_VID(pSma), querySKey, + indexUid); + } else { + smaDebug("vgId:%d skey %" PRIi64 " of window not in expired window for index %" PRIi64, SMA_VID(pSma), querySKey, + indexUid); + } + + STSma *pTSma = pItem->pTSma; +#endif + +#if 1 + STSmaReadH tReadH = {0}; + tdInitTSmaReadH(&tReadH, pSma, pTSma->interval, pTSma->intervalUnit); + smaCloseDBF(&tReadH.dFile); + + tdUnRefSmaStat(pSma, pStat); + + tdInitTSmaFile(&tReadH, indexUid, querySKey); + smaDebug("### vgId:%d read from DBF %s days:%d, interval:%" PRIi64 ", storageLevel:%" PRIi8 " queryKey:%" PRIi64, + SMA_VID(pSma), tReadH.dFile.path, tReadH.days, tReadH.interval, tReadH.storageLevel, querySKey); + if (smaOpenDBF(pEnv->dbEnv, &tReadH.dFile) != 0) { + smaWarn("vgId:%d open DBF %s failed since %s", SMA_VID(pSma), tReadH.dFile.path, tstrerror(terrno)); + return TSDB_CODE_FAILED; + } + + char smaKey[SMA_KEY_LEN] = {0}; + void *pSmaKey = &smaKey; + int64_t queryGroupId = 0; + tdEncodeTSmaKey(queryGroupId, querySKey, (void **)&pSmaKey); + + smaDebug("vgId:%d get sma data from %s: smaKey %" PRIx64 "-%" PRIx64 ", keyLen %d", SMA_VID(pSma), tReadH.dFile.path, + *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), SMA_KEY_LEN); + + void *result = NULL; + int32_t valueSize = 0; + if (!(result = smaGetSmaDataByKey(&tReadH.dFile, smaKey, SMA_KEY_LEN, &valueSize))) { + smaWarn("vgId:%d get sma data failed from smaIndex %" PRIi64 ", smaKey %" PRIx64 "-%" PRIx64 " since %s", + SMA_VID(pSma), indexUid, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), tstrerror(terrno)); + smaCloseDBF(&tReadH.dFile); + return TSDB_CODE_FAILED; + } +#endif + +#ifdef _TEST_SMA_PRINT_DEBUG_LOG_ + for (uint32_t v = 0; v < valueSize; v += 8) { + smaWarn("vgId:%d get sma data v[%d]=%" PRIi64, SMA_VID(pSma), v, *(int64_t *)POINTER_SHIFT(result, v)); + } +#endif + taosMemoryFreeClear(result); // TODO: fill the result to output + +#if 0 + int32_t nResult = 0; + int64_t lastKey = 0; + + while (true) { + if (nResult >= nMaxResult) { + break; + } + + // set and open the file according to the STSma param + if (tdSetAndOpenTSmaFile(&tReadH, queryWin)) { + char bTree[100] = "\0"; + while (strncmp(bTree, "has more nodes", 100) == 0) { + if (nResult >= nMaxResult) { + break; + } + // tdGetDataFromBTree(bTree, queryWin, lastKey) + // fill the pData + ++nResult; + } + } + } +#endif + // read data from file and fill the result + smaCloseDBF(&tReadH.dFile); + return TSDB_CODE_SUCCESS; +} + +int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) { + SSmaCfg *pCfg = (SSmaCfg *)pMsg; + + if (metaCreateTSma(SMA_META(pSma), version, pCfg) < 0) { + return -1; + } + + tdTSmaAdd(pSma, 1); + return 0; +} + +int32_t tdDropTSma(SSma *pSma, char *pMsg) { +#if 0 + SVDropTSmaReq vDropSmaReq = {0}; + if (!tDeserializeSVDropTSmaReq(pMsg, &vDropSmaReq)) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + + // TODO: send msg to stream computing to drop tSma + // if ((send msg to stream computing) < 0) { + // tDestroyTSma(&vCreateSmaReq); + // return -1; + // } + // + + if (metaDropTSma(SMA_META(pSma), vDropSmaReq.indexUid) < 0) { + // TODO: handle error + return -1; + } + + if (tdDropTSmaData(pSma, vDropSmaReq.indexUid) < 0) { + // TODO: handle error + return -1; + } + + tdTSmaSub(pSma, 1); +#endif + + // TODO: return directly or go on follow steps? + return TSDB_CODE_SUCCESS; +} + +static SSmaStatItem *tdNewSmaStatItem(int8_t state) { + SSmaStatItem *pItem = NULL; + + pItem = (SSmaStatItem *)taosMemoryCalloc(1, sizeof(SSmaStatItem)); + if (!pItem) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + + pItem->state = state; + pItem->expiredWindows = taosHashInit(SMA_STATE_ITEM_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP), + true, HASH_ENTRY_LOCK); + if (!pItem->expiredWindows) { + taosMemoryFreeClear(pItem); + return NULL; + } + + return pItem; +} + +static int32_t tdSetExpiredWindow(SSma *pSma, SHashObj *pItemsHash, int64_t indexUid, int64_t winSKey, + int64_t version) { + SSmaStatItem *pItem = taosHashGet(pItemsHash, &indexUid, sizeof(indexUid)); + if (!pItem) { + // TODO: use TSDB_SMA_STAT_EXPIRED and update by stream computing later + pItem = tdNewSmaStatItem(TSDB_SMA_STAT_OK); // TODO use the real state + if (!pItem) { + // Response to stream computing: OOM + // For query, if the indexUid not found, the TSDB should tell query module to query raw TS data. + return TSDB_CODE_FAILED; + } + + // cache smaMeta + STSma *pTSma = metaGetSmaInfoByIndex(SMA_META(pSma), indexUid); + if (!pTSma) { + terrno = TSDB_CODE_TDB_NO_SMA_INDEX_IN_META; + taosHashCleanup(pItem->expiredWindows); + taosMemoryFree(pItem); + smaWarn("vgId:%d set expire window, get tsma meta failed for smaIndex %" PRIi64 " since %s", SMA_VID(pSma), + indexUid, tstrerror(terrno)); + return TSDB_CODE_FAILED; + } + pItem->pTSma = pTSma; + + if (taosHashPut(pItemsHash, &indexUid, sizeof(indexUid), &pItem, sizeof(pItem)) != 0) { + // If error occurs during put smaStatItem, free the resources of pItem + taosHashCleanup(pItem->expiredWindows); + taosMemoryFree(pItem); + return TSDB_CODE_FAILED; + } + } else if (!(pItem = *(SSmaStatItem **)pItem)) { + terrno = TSDB_CODE_INVALID_PTR; + return TSDB_CODE_FAILED; + } + + if (taosHashPut(pItem->expiredWindows, &winSKey, sizeof(TSKEY), &version, sizeof(version)) != 0) { + // If error occurs during taosHashPut expired windows, remove the smaIndex from pSma->pSmaStat, thus TSDB would + // tell query module to query raw TS data. + // N.B. + // 1) It is assumed to be extemely little probability event of fail to taosHashPut. + // 2) This would solve the inconsistency to some extent, but not completely, unless we record all expired + // windows failed to put into hash table. + taosHashCleanup(pItem->expiredWindows); + taosMemoryFreeClear(pItem->pTSma); + taosHashRemove(pItemsHash, &indexUid, sizeof(indexUid)); + smaWarn("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window fail", SMA_VID(pSma), indexUid, + winSKey); + return TSDB_CODE_FAILED; + } + + smaDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window succeed", SMA_VID(pSma), indexUid, + winSKey); + return TSDB_CODE_SUCCESS; +} + +/** + * @brief Update expired window according to msg from stream computing module. + * + * @param pSma + * @param msg SSubmitReq + * @return int32_t + */ +int32_t tdUpdateExpiredWindowImpl(SSma *pSma, const SSubmitReq *pMsg, int64_t version) { + // no time-range-sma, just return success + if (atomic_load_16(&SMA_TSMA_NUM(pSma)) <= 0) { + smaTrace("vgId:%d not update expire window since no tSma", SMA_VID(pSma)); + return TSDB_CODE_SUCCESS; + } + + if (!SMA_META(pSma)) { + terrno = TSDB_CODE_INVALID_PTR; + smaError("vgId:%d update expire window failed since no meta ptr", SMA_VID(pSma)); + return TSDB_CODE_FAILED; + } + + if (tdCheckAndInitSmaEnv(pSma, TSDB_SMA_TYPE_TIME_RANGE) < 0) { + smaError("vgId:%d init sma env failed since %s", SMA_VID(pSma), terrstr(terrno)); + terrno = TSDB_CODE_TDB_INIT_FAILED; + return TSDB_CODE_FAILED; + } + + // Firstly, assume that tSma can only be created on super table/normal table. + // getActiveTimeWindow + + SSmaEnv *pEnv = SMA_TSMA_ENV(pSma); + SSmaStat *pStat = SMA_ENV_STAT(pEnv); + SHashObj *pItemsHash = SMA_ENV_STAT_ITEMS(pEnv); + + TASSERT(pEnv && pStat && pItemsHash); + + // basic procedure + // TODO: optimization + tdRefSmaStat(pSma, pStat); + + SSubmitMsgIter msgIter = {0}; + SSubmitBlk *pBlock = NULL; + SInterval interval = {0}; + TSKEY lastWinSKey = INT64_MIN; + + if (tInitSubmitMsgIter(pMsg, &msgIter) < 0) { + return TSDB_CODE_FAILED; + } + + while (true) { + tGetSubmitMsgNext(&msgIter, &pBlock); + if (!pBlock) break; + + STSmaWrapper *pSW = NULL; + STSma *pTSma = NULL; + + SSubmitBlkIter blkIter = {0}; + if (tInitSubmitBlkIter(&msgIter, pBlock, &blkIter) < 0) { + pSW = tFreeTSmaWrapper(pSW, false); + break; + } + + while (true) { + STSRow *row = tGetSubmitBlkNext(&blkIter); + if (!row) { + pSW = tFreeTSmaWrapper(pSW, false); + break; + } + if (!pSW || (pTSma && (pTSma->tableUid != msgIter.suid))) { + if (pSW) { + pSW = tFreeTSmaWrapper(pSW, false); + } + if (!(pSW = metaGetSmaInfoByTable(SMA_META(pSma), msgIter.suid, false))) { + break; + } + if ((pSW->number) <= 0 || !pSW->tSma) { + pSW = tFreeTSmaWrapper(pSW, false); + break; + } + + pTSma = pSW->tSma; + + interval.interval = pTSma->interval; + interval.intervalUnit = pTSma->intervalUnit; + interval.offset = pTSma->offset; + interval.precision = SMA_TSDB_CFG(pSma)->precision; + interval.sliding = pTSma->sliding; + interval.slidingUnit = pTSma->slidingUnit; + } + + // TODO: process multiple tsma for one table uid + TSKEY winSKey = taosTimeTruncate(TD_ROW_KEY(row), &interval, interval.precision); + + if (lastWinSKey != winSKey) { + lastWinSKey = winSKey; + if (tdSetExpiredWindow(pSma, pItemsHash, pTSma->indexUid, winSKey, version) < 0) { + pSW = tFreeTSmaWrapper(pSW, false); + tdUnRefSmaStat(pSma, pStat); + return TSDB_CODE_FAILED; + } + } else { + smaDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window ignore as duplicated", + SMA_VID(pSma), pTSma->indexUid, winSKey); + } + } + } + + tdUnRefSmaStat(pSma, pStat); + + return TSDB_CODE_SUCCESS; +} \ No newline at end of file diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 9941b00ff73cbe95a9a7ead1baf619f910bdef18..310b59b2e82c20bfbbb6dc5256f3393c67ae3ca3 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -14,7 +14,6 @@ */ #include "tq.h" -#include "tdbInt.h" int32_t tqInit() { int8_t old; @@ -47,51 +46,6 @@ void tqCleanUp() { } } -int tqExecKeyCompare(const void* pKey1, int32_t kLen1, const void* pKey2, int32_t kLen2) { - return strcmp(pKey1, pKey2); -} - -int32_t tqStoreExec(STQ* pTq, const char* key, const STqExec* pExec) { - int32_t code; - int32_t vlen; - tEncodeSize(tEncodeSTqExec, pExec, vlen, code); - ASSERT(code == 0); - - void* buf = taosMemoryCalloc(1, vlen); - if (buf == NULL) { - ASSERT(0); - } - - SEncoder encoder; - tEncoderInit(&encoder, buf, vlen); - - if (tEncodeSTqExec(&encoder, pExec) < 0) { - ASSERT(0); - } - - TXN txn; - - if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { - ASSERT(0); - } - - if (tdbBegin(pTq->pMetaStore, &txn) < 0) { - ASSERT(0); - } - - if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, &txn) < 0) { - ASSERT(0); - } - - if (tdbCommit(pTq->pMetaStore, &txn) < 0) { - ASSERT(0); - } - - tEncoderClear(&encoder); - taosMemoryFree(buf); - return 0; -} - STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) { STQ* pTq = taosMemoryMalloc(sizeof(STQ)); if (pTq == NULL) { @@ -102,69 +56,13 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) { pTq->pVnode = pVnode; pTq->pWal = pWal; - pTq->execs = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); + pTq->handles = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); pTq->pStreamTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); pTq->pushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); - if (tdbOpen(path, 16 * 1024, 1, &pTq->pMetaStore) < 0) { - ASSERT(0); - } - - if (tdbTbOpen("exec", -1, -1, tqExecKeyCompare, pTq->pMetaStore, &pTq->pExecStore) < 0) { - ASSERT(0); - } - - TXN txn; - - if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, 0) < 0) { - ASSERT(0); - } - - /*if (tdbBegin(pTq->pMetaStore, &txn) < 0) {*/ - /*ASSERT(0);*/ - /*}*/ - - TBC* pCur; - if (tdbTbcOpen(pTq->pExecStore, &pCur, &txn) < 0) { - ASSERT(0); - } - - void* pKey; - int kLen; - void* pVal; - int vLen; - - tdbTbcMoveToFirst(pCur); - SDecoder decoder; - while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) { - STqExec exec; - tDecoderInit(&decoder, (uint8_t*)pVal, vLen); - tDecodeSTqExec(&decoder, &exec); - exec.pWalReader = walOpenReadHandle(pTq->pVnode->pWal); - if (exec.subType == TOPIC_SUB_TYPE__TABLE) { - for (int32_t i = 0; i < 5; i++) { - exec.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); - - SReadHandle handle = { - .reader = exec.pExecReader[i], - .meta = pTq->pVnode->pMeta, - .pMsgCb = &pTq->pVnode->msgCb, - }; - exec.task[i] = qCreateStreamExecTaskInfo(exec.qmsg, &handle); - ASSERT(exec.task[i]); - } - } else { - for (int32_t i = 0; i < 5; i++) { - exec.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); - } - exec.pDropTbUid = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - } - taosHashPut(pTq->execs, pKey, kLen, &exec, sizeof(STqExec)); - } - - if (tdbTxnClose(&txn) < 0) { + if (tqMetaOpen(pTq) < 0) { ASSERT(0); } @@ -174,229 +72,50 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) { void tqClose(STQ* pTq) { if (pTq) { taosMemoryFreeClear(pTq->path); - taosHashCleanup(pTq->execs); + taosHashCleanup(pTq->handles); taosHashCleanup(pTq->pStreamTasks); taosHashCleanup(pTq->pushMgr); - tdbClose(pTq->pMetaStore); + tqMetaClose(pTq); taosMemoryFree(pTq); } // TODO } -int32_t tEncodeSTqExec(SEncoder* pEncoder, const STqExec* pExec) { - if (tStartEncode(pEncoder) < 0) return -1; - if (tEncodeCStr(pEncoder, pExec->subKey) < 0) return -1; - if (tEncodeI64(pEncoder, pExec->consumerId) < 0) return -1; - if (tEncodeI32(pEncoder, pExec->epoch) < 0) return -1; - if (tEncodeI8(pEncoder, pExec->subType) < 0) return -1; - if (tEncodeI8(pEncoder, pExec->withTbName) < 0) return -1; - if (tEncodeI8(pEncoder, pExec->withSchema) < 0) return -1; - if (tEncodeI8(pEncoder, pExec->withTag) < 0) return -1; - if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { - if (tEncodeCStr(pEncoder, pExec->qmsg) < 0) return -1; - } - tEndEncode(pEncoder); - return pEncoder->pos; -} - -int32_t tDecodeSTqExec(SDecoder* pDecoder, STqExec* pExec) { - if (tStartDecode(pDecoder) < 0) return -1; - if (tDecodeCStrTo(pDecoder, pExec->subKey) < 0) return -1; - if (tDecodeI64(pDecoder, &pExec->consumerId) < 0) return -1; - if (tDecodeI32(pDecoder, &pExec->epoch) < 0) return -1; - if (tDecodeI8(pDecoder, &pExec->subType) < 0) return -1; - if (tDecodeI8(pDecoder, &pExec->withTbName) < 0) return -1; - if (tDecodeI8(pDecoder, &pExec->withSchema) < 0) return -1; - if (tDecodeI8(pDecoder, &pExec->withTag) < 0) return -1; - if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { - if (tDecodeCStrAlloc(pDecoder, &pExec->qmsg) < 0) return -1; - } - tEndDecode(pDecoder); - return 0; -} -int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { - void* pIter = NULL; - while (1) { - pIter = taosHashIterate(pTq->execs, pIter); - if (pIter == NULL) break; - STqExec* pExec = (STqExec*)pIter; - if (pExec->subType == TOPIC_SUB_TYPE__DB) { - if (!isAdd) { - int32_t sz = taosArrayGetSize(tbUidList); - for (int32_t i = 0; i < sz; i++) { - int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i); - taosHashPut(pExec->pDropTbUid, &tbUid, sizeof(int64_t), NULL, 0); - } - } - } else { - for (int32_t i = 0; i < 5; i++) { - int32_t code = qUpdateQualifiedTableId(pExec->task[i], tbUidList, isAdd); - ASSERT(code == 0); - } - } - } - return 0; -} - -int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver, SRpcHandleInfo handleInfo) { - if (msgType != TDMT_VND_SUBMIT) return 0; - void* pIter = NULL; - STqExec* pExec = NULL; - SSubmitReq* pReq = (SSubmitReq*)msg; - int32_t workerId = 4; - int64_t fetchOffset = ver; - - while (1) { - pIter = taosHashIterate(pTq->pushMgr, pIter); - if (pIter == NULL) break; - pExec = *(STqExec**)pIter; - - taosWLockLatch(&pExec->pushHandle.lock); - - SRpcMsg* pMsg = atomic_load_ptr(&pExec->pushHandle.handle); - ASSERT(pMsg); - - SMqDataBlkRsp rsp = {0}; - rsp.reqOffset = pExec->pushHandle.reqOffset; - rsp.blockData = taosArrayInit(0, sizeof(void*)); - rsp.blockDataLen = taosArrayInit(0, sizeof(int32_t)); - - if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { - qTaskInfo_t task = pExec->task[workerId]; - ASSERT(task); - qSetStreamInput(task, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK); - while (1) { - SSDataBlock* pDataBlock = NULL; - uint64_t ts = 0; - if (qExecTask(task, &pDataBlock, &ts) < 0) { - ASSERT(0); - } - if (pDataBlock == NULL) break; - - ASSERT(pDataBlock->info.rows != 0); - ASSERT(pDataBlock->info.numOfCols != 0); - - int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pDataBlock); - void* buf = taosMemoryCalloc(1, dataStrLen); - SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf; - pRetrieve->useconds = ts; - pRetrieve->precision = TSDB_DEFAULT_PRECISION; - pRetrieve->compressed = 0; - pRetrieve->completed = 1; - pRetrieve->numOfRows = htonl(pDataBlock->info.rows); - - // TODO enable compress - int32_t actualLen = 0; - blockCompressEncode(pDataBlock, pRetrieve->data, &actualLen, pDataBlock->info.numOfCols, false); - actualLen += sizeof(SRetrieveTableRsp); - ASSERT(actualLen <= dataStrLen); - taosArrayPush(rsp.blockDataLen, &actualLen); - taosArrayPush(rsp.blockData, &buf); - rsp.blockNum++; - } - } else if (pExec->subType == TOPIC_SUB_TYPE__DB) { - STqReadHandle* pReader = pExec->pExecReader[workerId]; - tqReadHandleSetMsg(pReader, pReq, 0); - while (tqNextDataBlock(pReader)) { - SSDataBlock block = {0}; - if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows, - &block.info.numOfCols) < 0) { - ASSERT(0); - } - int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(&block); - void* buf = taosMemoryCalloc(1, dataStrLen); - SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf; - /*pRetrieve->useconds = 0;*/ - pRetrieve->precision = TSDB_DEFAULT_PRECISION; - pRetrieve->compressed = 0; - pRetrieve->completed = 1; - pRetrieve->numOfRows = htonl(block.info.rows); - - // TODO enable compress - int32_t actualLen = 0; - blockCompressEncode(&block, pRetrieve->data, &actualLen, block.info.numOfCols, false); - actualLen += sizeof(SRetrieveTableRsp); - ASSERT(actualLen <= dataStrLen); - taosArrayPush(rsp.blockDataLen, &actualLen); - taosArrayPush(rsp.blockData, &buf); - rsp.blockNum++; - } - } else { - ASSERT(0); - } - - if (rsp.blockNum == 0) { - taosWUnLockLatch(&pExec->pushHandle.lock); - continue; - } - - ASSERT(taosArrayGetSize(rsp.blockData) == rsp.blockNum); - ASSERT(taosArrayGetSize(rsp.blockDataLen) == rsp.blockNum); - - rsp.rspOffset = fetchOffset; - - int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqDataBlkRsp(NULL, &rsp); - void* buf = rpcMallocCont(tlen); - if (buf == NULL) { - pMsg->code = -1; - return -1; - } - - ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP; - ((SMqRspHead*)buf)->epoch = pExec->pushHandle.epoch; - ((SMqRspHead*)buf)->consumerId = pExec->pushHandle.consumerId; - - void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead)); - tEncodeSMqDataBlkRsp(&abuf, &rsp); - - SRpcMsg resp = {.info = handleInfo, .pCont = buf, .contLen = tlen, .code = 0}; - tmsgSendRsp(&resp); - - atomic_store_ptr(&pExec->pushHandle.handle, NULL); - taosWUnLockLatch(&pExec->pushHandle.lock); - - tqDebug("vg %d offset %ld from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld", - TD_VID(pTq->pVnode), fetchOffset, pExec->pushHandle.consumerId, pExec->pushHandle.epoch, rsp.blockNum, - rsp.reqOffset, rsp.rspOffset); - - // TODO destroy - taosArrayDestroy(rsp.blockData); - taosArrayDestroy(rsp.blockDataLen); +int32_t tqSendPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataBlkRsp* pRsp) { + int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqDataBlkRsp(NULL, pRsp); + void* buf = rpcMallocCont(tlen); + if (buf == NULL) { + return -1; } - return 0; -} - -int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) { - if (msgType == TDMT_VND_SUBMIT) { - if (taosHashGetSize(pTq->pStreamTasks) == 0) return 0; + ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP; + ((SMqRspHead*)buf)->epoch = pReq->epoch; + ((SMqRspHead*)buf)->consumerId = pReq->consumerId; - if (tdUpdateExpireWindow(pTq->pVnode->pSma, msg, ver) != 0) { - // TODO handle sma error - } - void* data = taosMemoryMalloc(msgLen); - if (data == NULL) { - return -1; - } - memcpy(data, msg, msgLen); + void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead)); + tEncodeSMqDataBlkRsp(&abuf, pRsp); - tqProcessStreamTrigger(pTq, data); - } + SRpcMsg resp = { + .info = pMsg->info, + .pCont = buf, + .contLen = tlen, + .code = 0, + }; + tmsgSendRsp(&resp); - return 0; -} + tqDebug("vg %d from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld", + TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->blockNum, pRsp->reqOffset, pRsp->rspOffset); -int tqCommit(STQ* pTq) { - // do nothing return 0; } int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { SMqPollReq* pReq = pMsg->pCont; int64_t consumerId = pReq->consumerId; - int64_t waitTime = pReq->waitTime; + int64_t timeout = pReq->timeout; int32_t reqEpoch = pReq->epoch; int64_t fetchOffset; + int32_t code = 0; // get offset to fetch message if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__EARLIEAST) { @@ -410,12 +129,12 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { tqDebug("tmq poll: consumer %ld (epoch %d) recv poll req in vg %d, req %ld %ld", consumerId, pReq->epoch, TD_VID(pTq->pVnode), pReq->currentOffset, fetchOffset); - STqExec* pExec = taosHashGet(pTq->execs, pReq->subKey, strlen(pReq->subKey)); - ASSERT(pExec); + STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey)); + ASSERT(pHandle); - int32_t consumerEpoch = atomic_load_32(&pExec->epoch); + int32_t consumerEpoch = atomic_load_32(&pHandle->epoch); while (consumerEpoch < reqEpoch) { - consumerEpoch = atomic_val_compare_exchange_32(&pExec->epoch, consumerEpoch, reqEpoch); + consumerEpoch = atomic_val_compare_exchange_32(&pHandle->epoch, consumerEpoch, reqEpoch); } SWalHead* pHeadWithCkSum = taosMemoryMalloc(sizeof(SWalHead) + 2048); @@ -423,259 +142,103 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { return -1; } - walSetReaderCapacity(pExec->pWalReader, 2048); + walSetReaderCapacity(pHandle->pWalReader, 2048); SMqDataBlkRsp rsp = {0}; rsp.reqOffset = pReq->currentOffset; - rsp.withSchema = pExec->withSchema; rsp.blockData = taosArrayInit(0, sizeof(void*)); rsp.blockDataLen = taosArrayInit(0, sizeof(int32_t)); - rsp.blockSchema = taosArrayInit(0, sizeof(void*)); - rsp.blockTbName = taosArrayInit(0, sizeof(void*)); - int8_t withTbName = pExec->withTbName; - if (pReq->withTbName != -1) { - withTbName = pReq->withTbName; + rsp.withTbName = pReq->withTbName; + if (rsp.withTbName) { + rsp.blockTbName = taosArrayInit(0, sizeof(void*)); + } + if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { + rsp.withSchema = false; + rsp.withTag = false; + } else { + rsp.withSchema = true; + rsp.withTag = false; + rsp.blockSchema = taosArrayInit(0, sizeof(void*)); } - rsp.withTbName = withTbName; while (1) { - consumerEpoch = atomic_load_32(&pExec->epoch); + consumerEpoch = atomic_load_32(&pHandle->epoch); if (consumerEpoch > reqEpoch) { - tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, found new consumer epoch %d discard req epoch %d", - consumerId, pReq->epoch, TD_VID(pTq->pVnode), fetchOffset, consumerEpoch, reqEpoch); + tqWarn("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, found new consumer epoch %d, discard req epoch %d", + consumerId, pReq->epoch, TD_VID(pTq->pVnode), fetchOffset, consumerEpoch, reqEpoch); break; } - taosThreadMutexLock(&pExec->pWalReader->mutex); - - if (walFetchHead(pExec->pWalReader, fetchOffset, pHeadWithCkSum) < 0) { - tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", consumerId, pReq->epoch, - TD_VID(pTq->pVnode), fetchOffset); - taosThreadMutexUnlock(&pExec->pWalReader->mutex); + if (tqFetchLog(pTq, pHandle, &fetchOffset, &pHeadWithCkSum) < 0) { + // TODO add push mgr break; } - if (pHeadWithCkSum->head.msgType != TDMT_VND_SUBMIT) { - ASSERT(walSkipFetchBody(pExec->pWalReader, pHeadWithCkSum) == 0); - } else { - ASSERT(walFetchBody(pExec->pWalReader, &pHeadWithCkSum) == 0); - } - SWalReadHead* pHead = &pHeadWithCkSum->head; - taosThreadMutexUnlock(&pExec->pWalReader->mutex); - -#if 0 - SWalReadHead* pHead; - if (walReadWithHandle_s(pExec->pWalReader, fetchOffset, &pHead) < 0) { - // TODO: no more log, set timer to wait blocking time - // if data inserted during waiting, launch query and - // response to user - tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", consumerId, pReq->epoch, - TD_VID(pTq->pVnode), fetchOffset); - -#if 0 - // add to pushMgr - taosWLockLatch(&pExec->pushHandle.lock); - - pExec->pushHandle.consumerId = consumerId; - pExec->pushHandle.epoch = reqEpoch; - pExec->pushHandle.reqOffset = rsp.reqOffset; - pExec->pushHandle.skipLogNum = rsp.skipLogNum; - pExec->pushHandle.handle = pMsg; - - taosWUnLockLatch(&pExec->pushHandle.lock); - - // TODO add timer - - // TODO: the pointer will always be valid? - taosHashPut(pTq->pushMgr, &consumerId, sizeof(int64_t), &pExec, sizeof(void*)); - taosArrayDestroy(rsp.blockData); - taosArrayDestroy(rsp.blockDataLen); - return 0; -#endif - - break; - } -#endif - tqDebug("tmq poll: consumer %ld (epoch %d) iter log, vg %d offset %ld msgType %d", consumerId, pReq->epoch, TD_VID(pTq->pVnode), fetchOffset, pHead->msgType); if (pHead->msgType == TDMT_VND_SUBMIT) { SSubmitReq* pCont = (SSubmitReq*)&pHead->body; - // table subscribe - if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { - qTaskInfo_t task = pExec->task[workerId]; - ASSERT(task); - qSetStreamInput(task, pCont, STREAM_DATA_TYPE_SUBMIT_BLOCK); - while (1) { - SSDataBlock* pDataBlock = NULL; - uint64_t ts = 0; - if (qExecTask(task, &pDataBlock, &ts) < 0) { - ASSERT(0); - } - if (pDataBlock == NULL) break; - - ASSERT(pDataBlock->info.rows != 0); - ASSERT(pDataBlock->info.numOfCols != 0); - - int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pDataBlock); - void* buf = taosMemoryCalloc(1, dataStrLen); - SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf; - pRetrieve->useconds = ts; - pRetrieve->precision = TSDB_DEFAULT_PRECISION; - pRetrieve->compressed = 0; - pRetrieve->completed = 1; - pRetrieve->numOfRows = htonl(pDataBlock->info.rows); - - // TODO enable compress - int32_t actualLen = 0; - blockCompressEncode(pDataBlock, pRetrieve->data, &actualLen, pDataBlock->info.numOfCols, false); - actualLen += sizeof(SRetrieveTableRsp); - ASSERT(actualLen <= dataStrLen); - taosArrayPush(rsp.blockDataLen, &actualLen); - taosArrayPush(rsp.blockData, &buf); - - if (pExec->withSchema) { - SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader[workerId]->pSchemaWrapper); - taosArrayPush(rsp.blockSchema, &pSW); - } - - if (withTbName) { - SMetaReader mr = {0}; - metaReaderInit(&mr, pTq->pVnode->pMeta, 0); - int64_t uid = pExec->pExecReader[workerId]->msgIter.uid; - if (metaGetTableEntryByUid(&mr, uid) < 0) { - ASSERT(0); - } - char* tbName = strdup(mr.me.name); - taosArrayPush(rsp.blockTbName, &tbName); - metaReaderClear(&mr); - } - - rsp.blockNum++; - } - // db subscribe - } else if (pExec->subType == TOPIC_SUB_TYPE__DB) { - rsp.withSchema = 1; - STqReadHandle* pReader = pExec->pExecReader[workerId]; - tqReadHandleSetMsg(pReader, pCont, 0); - while (tqNextDataBlockFilterOut(pReader, pExec->pDropTbUid)) { - SSDataBlock block = {0}; - if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows, - &block.info.numOfCols) < 0) { - if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue; - ASSERT(0); - } - int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(&block); - void* buf = taosMemoryCalloc(1, dataStrLen); - SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf; - /*pRetrieve->useconds = 0;*/ - pRetrieve->precision = TSDB_DEFAULT_PRECISION; - pRetrieve->compressed = 0; - pRetrieve->completed = 1; - pRetrieve->numOfRows = htonl(block.info.rows); - - // TODO enable compress - int32_t actualLen = 0; - blockCompressEncode(&block, pRetrieve->data, &actualLen, block.info.numOfCols, false); - actualLen += sizeof(SRetrieveTableRsp); - ASSERT(actualLen <= dataStrLen); - taosArrayPush(rsp.blockDataLen, &actualLen); - taosArrayPush(rsp.blockData, &buf); - if (withTbName) { - SMetaReader mr = {0}; - metaReaderInit(&mr, pTq->pVnode->pMeta, 0); - if (metaGetTableEntryByUid(&mr, block.info.uid) < 0) { - ASSERT(0); - } - char* tbName = strdup(mr.me.name); - taosArrayPush(rsp.blockTbName, &tbName); - metaReaderClear(&mr); - } - - SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader[workerId]->pSchemaWrapper); - taosArrayPush(rsp.blockSchema, &pSW); - - rsp.blockNum++; - } - } else { - ASSERT(0); + + if (tqDataExec(pTq, &pHandle->execHandle, pCont, &rsp, workerId) < 0) { + /*ASSERT(0);*/ } + } else { + // TODO + ASSERT(0); } // TODO batch optimization: // TODO continue scan until meeting batch requirement - if (rsp.blockNum != 0) break; - rsp.skipLogNum++; - fetchOffset++; + if (rsp.blockNum > 0 /* threshold */) { + break; + } else { + fetchOffset++; + } } taosMemoryFree(pHeadWithCkSum); + ASSERT(taosArrayGetSize(rsp.blockData) == rsp.blockNum); ASSERT(taosArrayGetSize(rsp.blockDataLen) == rsp.blockNum); - - if (rsp.blockNum != 0) - rsp.rspOffset = fetchOffset; - else - rsp.rspOffset = fetchOffset - 1; - - int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqDataBlkRsp(NULL, &rsp); - void* buf = rpcMallocCont(tlen); - if (buf == NULL) { - pMsg->code = -1; - return -1; + if (rsp.withSchema) { + ASSERT(taosArrayGetSize(rsp.blockSchema) == rsp.blockNum); } - ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP; - ((SMqRspHead*)buf)->epoch = pReq->epoch; - ((SMqRspHead*)buf)->consumerId = consumerId; - - void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead)); - tEncodeSMqDataBlkRsp(&abuf, &rsp); - - SRpcMsg resp = {.info = pMsg->info, .pCont = buf, .contLen = tlen, .code = 0}; - tmsgSendRsp(&resp); + rsp.rspOffset = fetchOffset; - tqDebug("vg %d offset %ld from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld", - TD_VID(pTq->pVnode), fetchOffset, consumerId, pReq->epoch, rsp.blockNum, rsp.reqOffset, rsp.rspOffset); + if (tqSendPollRsp(pTq, pMsg, pReq, &rsp) < 0) { + code = -1; + } - // TODO destroy + // TODO wrap in destroy func taosArrayDestroy(rsp.blockData); taosArrayDestroy(rsp.blockDataLen); - taosArrayDestroyP(rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper); - taosArrayDestroyP(rsp.blockTbName, (FDelete)taosMemoryFree); - return 0; + if (rsp.withSchema) { + taosArrayDestroyP(rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper); + } + + if (rsp.withTbName) { + taosArrayDestroyP(rsp.blockTbName, (FDelete)taosMemoryFree); + } + + return code; } int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) { SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg; - int32_t code = taosHashRemove(pTq->execs, pReq->subKey, strlen(pReq->subKey)); + int32_t code = taosHashRemove(pTq->handles, pReq->subKey, strlen(pReq->subKey)); ASSERT(code == 0); - TXN txn; - - if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { + if (tqMetaDeleteHandle(pTq, pReq->subKey) < 0) { ASSERT(0); } - - if (tdbBegin(pTq->pMetaStore, &txn) < 0) { - ASSERT(0); - } - - if (tdbTbDelete(pTq->pExecStore, pReq->subKey, (int)strlen(pReq->subKey), &txn) < 0) { - /*ASSERT(0);*/ - } - - if (tdbCommit(pTq->pMetaStore, &txn) < 0) { - ASSERT(0); - } - return 0; } @@ -684,81 +247,67 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) { SMqRebVgReq req = {0}; tDecodeSMqRebVgReq(msg, &req); // todo lock - STqExec* pExec = taosHashGet(pTq->execs, req.subKey, strlen(req.subKey)); - if (pExec == NULL) { + STqHandle* pHandle = taosHashGet(pTq->handles, req.subKey, strlen(req.subKey)); + if (pHandle == NULL) { ASSERT(req.oldConsumerId == -1); ASSERT(req.newConsumerId != -1); - STqExec exec = {0}; - pExec = &exec; + STqHandle tqHandle = {0}; + pHandle = &tqHandle; /*taosInitRWLatch(&pExec->lock);*/ - memcpy(pExec->subKey, req.subKey, TSDB_SUBSCRIBE_KEY_LEN); - pExec->consumerId = req.newConsumerId; - pExec->epoch = -1; + memcpy(pHandle->subKey, req.subKey, TSDB_SUBSCRIBE_KEY_LEN); + pHandle->consumerId = req.newConsumerId; + pHandle->epoch = -1; - pExec->subType = req.subType; - pExec->withTbName = req.withTbName; - pExec->withSchema = req.withSchema; - pExec->withTag = req.withTag; + pHandle->execHandle.subType = req.subType; - pExec->qmsg = req.qmsg; - req.qmsg = NULL; - - pExec->pWalReader = walOpenReadHandle(pTq->pVnode->pWal); - if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { + pHandle->pWalReader = walOpenReadHandle(pTq->pVnode->pWal); + for (int32_t i = 0; i < 5; i++) { + pHandle->execHandle.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); + } + if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { + pHandle->execHandle.exec.execCol.qmsg = req.qmsg; + req.qmsg = NULL; for (int32_t i = 0; i < 5; i++) { - pExec->pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); - SReadHandle handle = { - .reader = pExec->pExecReader[i], + .reader = pHandle->execHandle.pExecReader[i], .meta = pTq->pVnode->pMeta, .pMsgCb = &pTq->pVnode->msgCb, }; - pExec->task[i] = qCreateStreamExecTaskInfo(pExec->qmsg, &handle); - ASSERT(pExec->task[i]); + pHandle->execHandle.exec.execCol.task[i] = + qCreateStreamExecTaskInfo(pHandle->execHandle.exec.execCol.qmsg, &handle); + ASSERT(pHandle->execHandle.exec.execCol.task[i]); + } + } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) { + pHandle->execHandle.exec.execDb.pFilterOutTbUid = + taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) { + pHandle->execHandle.exec.execTb.suid = req.suid; + SArray* tbUidList = taosArrayInit(0, sizeof(int64_t)); + tsdbGetCtbIdList(pTq->pVnode->pMeta, req.suid, tbUidList); + tqDebug("vg %d, tq try get suid: %ld", pTq->pVnode->config.vgId, req.suid); + for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) { + int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i); + tqDebug("vg %d, idx %d, uid: %ld", pTq->pVnode->config.vgId, i, tbUid); } - } else { for (int32_t i = 0; i < 5; i++) { - pExec->pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); + tqReadHandleSetTbUidList(pHandle->execHandle.pExecReader[i], tbUidList); } - pExec->pDropTbUid = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - } - taosHashPut(pTq->execs, req.subKey, strlen(req.subKey), pExec, sizeof(STqExec)); - - if (tqStoreExec(pTq, req.subKey, pExec) < 0) { - // TODO + taosArrayDestroy(tbUidList); } - return 0; + taosHashPut(pTq->handles, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle)); } else { /*ASSERT(pExec->consumerId == req.oldConsumerId);*/ // TODO handle qmsg and exec modification - atomic_store_32(&pExec->epoch, -1); - atomic_store_64(&pExec->consumerId, req.newConsumerId); - atomic_add_fetch_32(&pExec->epoch, 1); - - if (tqStoreExec(pTq, req.subKey, pExec) < 0) { - // TODO - } - return 0; + atomic_store_32(&pHandle->epoch, -1); + atomic_store_64(&pHandle->consumerId, req.newConsumerId); + atomic_add_fetch_32(&pHandle->epoch, 1); } -} - -void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) { - const SArray* pRes = (const SArray*)data; - SVnode* pVnode = (SVnode*)vnode; - - ASSERT(pTask->tbSink.pTSchema); - SSubmitReq* pReq = tdBlockToSubmit(pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid, - pTask->tbSink.stbFullName, pVnode->config.vgId); - /*tPrintFixedSchemaSubmitReq(pReq, pTask->tbSink.pTSchema);*/ - // build write msg - SRpcMsg msg = { - .msgType = TDMT_VND_SUBMIT, - .pCont = pReq, - .contLen = ntohl(pReq->length), - }; - ASSERT(tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) == 0); + if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) { + // TODO + } + return 0; } int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen) { @@ -851,9 +400,11 @@ int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq) { continue; } - streamDataSubmitRefInc(pSubmit); - SStreamDataSubmit* pSubmitClone = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM); - memcpy(pSubmitClone, pSubmit, sizeof(SStreamDataSubmit)); + SStreamDataSubmit* pSubmitClone = streamSubmitRefClone(pSubmit); + if (pSubmitClone == NULL) { + atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED); + continue; + } taosWriteQitem(pTask->inputQ, pSubmitClone); int8_t execStatus = atomic_load_8(&pTask->status); diff --git a/source/dnode/vnode/src/tq/tqCommit.c b/source/dnode/vnode/src/tq/tqCommit.c index e31566f3faca14b0955b851f654247355f500630..7b116bff2e942bf1a461458ea443548e708756eb 100644 --- a/source/dnode/vnode/src/tq/tqCommit.c +++ b/source/dnode/vnode/src/tq/tqCommit.c @@ -14,3 +14,8 @@ */ #include "tq.h" + +int tqCommit(STQ* pTq) { + // do nothing + return 0; +} diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c new file mode 100644 index 0000000000000000000000000000000000000000..b8fec34b57f49ed732f3a2f3820ec50b367937fb --- /dev/null +++ b/source/dnode/vnode/src/tq/tqExec.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tq.h" + +static int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataBlkRsp* pRsp) { + int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock); + void* buf = taosMemoryCalloc(1, dataStrLen); + if (buf == NULL) return -1; + + SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf; + pRetrieve->useconds = 0; + pRetrieve->precision = TSDB_DEFAULT_PRECISION; + pRetrieve->compressed = 0; + pRetrieve->completed = 1; + pRetrieve->numOfRows = htonl(pBlock->info.rows); + + // TODO enable compress + int32_t actualLen = 0; + blockCompressEncode(pBlock, pRetrieve->data, &actualLen, pBlock->info.numOfCols, false); + actualLen += sizeof(SRetrieveTableRsp); + ASSERT(actualLen <= dataStrLen); + taosArrayPush(pRsp->blockDataLen, &actualLen); + taosArrayPush(pRsp->blockData, &buf); + return 0; +} + +static int32_t tqAddBlockSchemaToRsp(const STqExecHandle* pExec, int32_t workerId, SMqDataBlkRsp* pRsp) { + SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader[workerId]->pSchemaWrapper); + taosArrayPush(pRsp->blockSchema, &pSW); + return 0; +} + +static int32_t tqAddTbNameToRsp(const STQ* pTq, const STqExecHandle* pExec, SMqDataBlkRsp* pRsp, int32_t workerId) { + SMetaReader mr = {0}; + metaReaderInit(&mr, pTq->pVnode->pMeta, 0); + int64_t uid = pExec->pExecReader[workerId]->msgIter.uid; + if (metaGetTableEntryByUid(&mr, uid) < 0) { + ASSERT(0); + return -1; + } + char* tbName = strdup(mr.me.name); + taosArrayPush(pRsp->blockTbName, &tbName); + metaReaderClear(&mr); + return 0; +} + +int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkRsp* pRsp, int32_t workerId) { + if (pExec->subType == TOPIC_SUB_TYPE__COLUMN) { + qTaskInfo_t task = pExec->exec.execCol.task[workerId]; + ASSERT(task); + qSetStreamInput(task, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK, false); + while (1) { + SSDataBlock* pDataBlock = NULL; + uint64_t ts = 0; + if (qExecTask(task, &pDataBlock, &ts) < 0) { + ASSERT(0); + } + if (pDataBlock == NULL) break; + + ASSERT(pDataBlock->info.rows != 0); + ASSERT(pDataBlock->info.numOfCols != 0); + + tqAddBlockDataToRsp(pDataBlock, pRsp); + if (pRsp->withTbName) { + tqAddTbNameToRsp(pTq, pExec, pRsp, workerId); + } + pRsp->blockNum++; + } + } else if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { + pRsp->withSchema = 1; + STqReadHandle* pReader = pExec->pExecReader[workerId]; + tqReadHandleSetMsg(pReader, pReq, 0); + while (tqNextDataBlock(pReader)) { + SSDataBlock block = {0}; + if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows, + &block.info.numOfCols) < 0) { + if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue; + ASSERT(0); + } + tqAddBlockDataToRsp(&block, pRsp); + if (pRsp->withTbName) { + tqAddTbNameToRsp(pTq, pExec, pRsp, workerId); + } + tqAddBlockSchemaToRsp(pExec, workerId, pRsp); + pRsp->blockNum++; + } + } else if (pExec->subType == TOPIC_SUB_TYPE__DB) { + pRsp->withSchema = 1; + STqReadHandle* pReader = pExec->pExecReader[workerId]; + tqReadHandleSetMsg(pReader, pReq, 0); + while (tqNextDataBlockFilterOut(pReader, pExec->exec.execDb.pFilterOutTbUid)) { + SSDataBlock block = {0}; + if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows, + &block.info.numOfCols) < 0) { + if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue; + ASSERT(0); + } + tqAddBlockDataToRsp(&block, pRsp); + if (pRsp->withTbName) { + tqAddTbNameToRsp(pTq, pExec, pRsp, workerId); + } + tqAddBlockSchemaToRsp(pExec, workerId, pRsp); + pRsp->blockNum++; + } + } + if (pRsp->blockNum == 0) { + pRsp->skipLogNum++; + return -1; + } + return 0; +} diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c new file mode 100644 index 0000000000000000000000000000000000000000..9447c4007b87cd9dd256c555df1ac4eb431edaee --- /dev/null +++ b/source/dnode/vnode/src/tq/tqMeta.c @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#include "tdbInt.h" +#include "tq.h" + +static int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) { + if (tStartEncode(pEncoder) < 0) return -1; + if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1; + if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1; + if (tEncodeI32(pEncoder, pHandle->epoch) < 0) return -1; + if (tEncodeI8(pEncoder, pHandle->execHandle.subType) < 0) return -1; + if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { + if (tEncodeCStr(pEncoder, pHandle->execHandle.exec.execCol.qmsg) < 0) return -1; + } + tEndEncode(pEncoder); + return pEncoder->pos; +} + +static int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) { + if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1; + if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1; + if (tDecodeI32(pDecoder, &pHandle->epoch) < 0) return -1; + if (tDecodeI8(pDecoder, &pHandle->execHandle.subType) < 0) return -1; + if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { + if (tDecodeCStrAlloc(pDecoder, &pHandle->execHandle.exec.execCol.qmsg) < 0) return -1; + } + tEndDecode(pDecoder); + return 0; +} + +int tqExecKeyCompare(const void* pKey1, int32_t kLen1, const void* pKey2, int32_t kLen2) { + return strcmp(pKey1, pKey2); +} + +int32_t tqMetaOpen(STQ* pTq) { + if (tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaStore) < 0) { + ASSERT(0); + } + + if (tdbTbOpen("handles", -1, -1, tqExecKeyCompare, pTq->pMetaStore, &pTq->pExecStore) < 0) { + ASSERT(0); + } + + TXN txn; + + if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, 0) < 0) { + ASSERT(0); + } + + TBC* pCur; + if (tdbTbcOpen(pTq->pExecStore, &pCur, &txn) < 0) { + ASSERT(0); + } + + void* pKey; + int kLen; + void* pVal; + int vLen; + + tdbTbcMoveToFirst(pCur); + SDecoder decoder; + + while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) { + STqHandle handle; + tDecoderInit(&decoder, (uint8_t*)pVal, vLen); + tDecodeSTqHandle(&decoder, &handle); + handle.pWalReader = walOpenReadHandle(pTq->pVnode->pWal); + for (int32_t i = 0; i < 5; i++) { + handle.execHandle.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); + } + if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { + for (int32_t i = 0; i < 5; i++) { + SReadHandle reader = { + .reader = handle.execHandle.pExecReader[i], + .meta = pTq->pVnode->pMeta, + .pMsgCb = &pTq->pVnode->msgCb, + }; + handle.execHandle.exec.execCol.task[i] = + qCreateStreamExecTaskInfo(handle.execHandle.exec.execCol.qmsg, &reader); + ASSERT(handle.execHandle.exec.execCol.task[i]); + } + } else { + handle.execHandle.exec.execDb.pFilterOutTbUid = + taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + } + taosHashPut(pTq->handles, pKey, kLen, &handle, sizeof(STqHandle)); + } + + if (tdbTxnClose(&txn) < 0) { + ASSERT(0); + } + return 0; +} + +int32_t tqMetaClose(STQ* pTq) { + tdbClose(pTq->pMetaStore); + return 0; +} + +int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) { + int32_t code; + int32_t vlen; + tEncodeSize(tEncodeSTqHandle, pHandle, vlen, code); + ASSERT(code == 0); + + void* buf = taosMemoryCalloc(1, vlen); + if (buf == NULL) { + ASSERT(0); + } + + SEncoder encoder; + tEncoderInit(&encoder, buf, vlen); + + if (tEncodeSTqHandle(&encoder, pHandle) < 0) { + ASSERT(0); + } + + TXN txn; + + if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { + ASSERT(0); + } + + if (tdbBegin(pTq->pMetaStore, &txn) < 0) { + ASSERT(0); + } + + if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, &txn) < 0) { + ASSERT(0); + } + + if (tdbCommit(pTq->pMetaStore, &txn) < 0) { + ASSERT(0); + } + + tEncoderClear(&encoder); + taosMemoryFree(buf); + return 0; +} + +int32_t tqMetaDeleteHandle(STQ* pTq, const char* key) { + TXN txn; + + if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { + ASSERT(0); + } + + if (tdbBegin(pTq->pMetaStore, &txn) < 0) { + ASSERT(0); + } + + if (tdbTbDelete(pTq->pExecStore, key, (int)strlen(key), &txn) < 0) { + /*ASSERT(0);*/ + } + + if (tdbCommit(pTq->pMetaStore, &txn) < 0) { + ASSERT(0); + } + + return 0; +} diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c index 90f512611b1100bc79a6e85784ad87ebe10380c2..4d83a67579f89c24bde1c4724fdaacd1666bcfdd 100644 --- a/source/dnode/vnode/src/tq/tqOffset.c +++ b/source/dnode/vnode/src/tq/tqOffset.c @@ -30,7 +30,7 @@ struct STqOffsetStore { SHashObj* pHash; // SHashObj }; -STqOffsetStore* STqOffsetOpen(STqOffsetCfg* pCfg) { +STqOffsetStore* tqOffsetOpen(STqOffsetCfg* pCfg) { STqOffsetStore* pStore = taosMemoryMalloc(sizeof(STqOffsetStore)); if (pStore == NULL) { return NULL; diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c index f2f48bbc8a69a022d0fc6b8a88c5a9a55d0b4ad6..d94c3e387a09dd891825a6d6ed11b96a248d9605 100644 --- a/source/dnode/vnode/src/tq/tqPush.c +++ b/source/dnode/vnode/src/tq/tqPush.c @@ -12,3 +12,244 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ + +#include "tq.h" + +void tqTmrRspFunc(void* param, void* tmrId) { + STqHandle* pHandle = (STqHandle*)param; + atomic_store_8(&pHandle->pushHandle.tmrStopped, 1); +} + +static int32_t tqLoopExecFromQueue(STQ* pTq, STqHandle* pHandle, SStreamDataSubmit** ppSubmit, SMqDataBlkRsp* pRsp) { + SStreamDataSubmit* pSubmit = *ppSubmit; + while (pSubmit != NULL) { + ASSERT(pSubmit->ver == pHandle->pushHandle.processedVer + 1); + if (tqDataExec(pTq, &pHandle->execHandle, pSubmit->data, pRsp, 0) < 0) { + /*ASSERT(0);*/ + } + // update processed + atomic_store_64(&pHandle->pushHandle.processedVer, pSubmit->ver); + streamQSetSuccess(&pHandle->pushHandle.inputQ); + streamDataSubmitRefDec(pSubmit); + if (pRsp->blockNum > 0) { + *ppSubmit = pSubmit; + return 0; + } else { + pSubmit = streamQNextItem(&pHandle->pushHandle.inputQ); + } + } + *ppSubmit = pSubmit; + return -1; +} + +int32_t tqExecFromInputQ(STQ* pTq, STqHandle* pHandle) { + SMqDataBlkRsp rsp = {0}; + // 1. guard and set status executing + int8_t execStatus = + atomic_val_compare_exchange_8(&pHandle->pushHandle.execStatus, TASK_STATUS__IDLE, TASK_STATUS__EXECUTING); + if (execStatus == TASK_STATUS__IDLE) { + SStreamDataSubmit* pSubmit = NULL; + // 2. check processedVer + // 2.1. if not missed, get msg from queue + // 2.2. if missed, scan wal + pSubmit = streamQNextItem(&pHandle->pushHandle.inputQ); + while (pHandle->pushHandle.processedVer <= pSubmit->ver) { + // read from wal + } + while (pHandle->pushHandle.processedVer > pSubmit->ver + 1) { + streamQSetSuccess(&pHandle->pushHandle.inputQ); + streamDataSubmitRefDec(pSubmit); + pSubmit = streamQNextItem(&pHandle->pushHandle.inputQ); + if (pSubmit == NULL) break; + } + // 3. exec, after each success, update processed ver + // first run + if (tqLoopExecFromQueue(pTq, pHandle, &pSubmit, &rsp) == 0) { + goto SEND_RSP; + } + // set exec status closing + atomic_store_8(&pHandle->pushHandle.execStatus, TASK_STATUS__CLOSING); + // second run + if (tqLoopExecFromQueue(pTq, pHandle, &pSubmit, &rsp) == 0) { + goto SEND_RSP; + } + // set exec status idle + atomic_store_8(&pHandle->pushHandle.execStatus, TASK_STATUS__IDLE); + } +SEND_RSP: + // 4. if get result + // 4.1 set exec input status blocked and exec status idle + atomic_store_8(&pHandle->pushHandle.execStatus, TASK_STATUS__IDLE); + // 4.2 rpc send + rsp.rspOffset = pHandle->pushHandle.processedVer; + /*if (tqSendPollRsp(pTq, pMsg, pReq, &rsp) < 0) {*/ + /*return -1;*/ + /*}*/ + // 4.3 clear rpc info + memset(&pHandle->pushHandle.rpcInfo, 0, sizeof(SRpcHandleInfo)); + return 0; +} + +int32_t tqOpenPushHandle(STQ* pTq, STqHandle* pHandle) { + memset(&pHandle->pushHandle, 0, sizeof(STqPushHandle)); + pHandle->pushHandle.inputQ.queue = taosOpenQueue(); + pHandle->pushHandle.inputQ.qall = taosAllocateQall(); + if (pHandle->pushHandle.inputQ.queue == NULL || pHandle->pushHandle.inputQ.qall == NULL) { + if (pHandle->pushHandle.inputQ.queue) { + taosCloseQueue(pHandle->pushHandle.inputQ.queue); + } + if (pHandle->pushHandle.inputQ.qall) { + taosFreeQall(pHandle->pushHandle.inputQ.qall); + } + return -1; + } + return 0; +} + +int32_t tqPreparePush(STQ* pTq, STqHandle* pHandle, int64_t reqId, const SRpcHandleInfo* pInfo, int64_t processedVer, + int64_t timeout) { + memcpy(&pHandle->pushHandle.rpcInfo, pInfo, sizeof(SRpcHandleInfo)); + atomic_store_64(&pHandle->pushHandle.reqId, reqId); + atomic_store_64(&pHandle->pushHandle.processedVer, processedVer); + atomic_store_8(&pHandle->pushHandle.inputStatus, TASK_INPUT_STATUS__NORMAL); + atomic_store_8(&pHandle->pushHandle.tmrStopped, 0); + taosTmrReset(tqTmrRspFunc, (int32_t)timeout, pHandle, tqMgmt.timer, &pHandle->pushHandle.timerId); + return 0; +} + +int32_t tqEnqueue(STqHandle* pHandle, SStreamDataSubmit* pSubmit) { + int8_t inputStatus = atomic_load_8(&pHandle->pushHandle.inputStatus); + if (inputStatus == TASK_INPUT_STATUS__NORMAL) { + SStreamDataSubmit* pSubmitClone = streamSubmitRefClone(pSubmit); + if (pSubmitClone == NULL) { + return -1; + } + taosWriteQitem(pHandle->pushHandle.inputQ.queue, pSubmitClone); + return 0; + } + return -1; +} + +int32_t tqSendExecReq(STQ* pTq, STqHandle* pHandle) { + // + return 0; +} + +int32_t tqEnqueueAll(STQ* pTq, SSubmitReq* pReq) { + void* pIter = NULL; + SStreamDataSubmit* pSubmit = streamDataSubmitNew(pReq); + if (pSubmit == NULL) { + return -1; + } + + while (1) { + pIter = taosHashIterate(pTq->handles, pIter); + if (pIter == NULL) break; + STqHandle* pHandle = (STqHandle*)pIter; + if (tqEnqueue(pHandle, pSubmit) < 0) { + continue; + } + int8_t execStatus = atomic_load_8(&pHandle->pushHandle.execStatus); + if (execStatus == TASK_STATUS__IDLE || execStatus == TASK_STATUS__CLOSING) { + tqSendExecReq(pTq, pHandle); + } + } + + streamDataSubmitRefDec(pSubmit); + + return 0; +} + +int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver, SRpcHandleInfo handleInfo) { + if (msgType != TDMT_VND_SUBMIT) return 0; + void* pIter = NULL; + STqHandle* pHandle = NULL; + SSubmitReq* pReq = (SSubmitReq*)msg; + int32_t workerId = 4; + int64_t fetchOffset = ver; + + while (1) { + pIter = taosHashIterate(pTq->pushMgr, pIter); + if (pIter == NULL) break; + pHandle = *(STqHandle**)pIter; + + taosWLockLatch(&pHandle->pushHandle.lock); + + SMqDataBlkRsp rsp = {0}; + rsp.reqOffset = pHandle->pushHandle.reqOffset; + rsp.blockData = taosArrayInit(0, sizeof(void*)); + rsp.blockDataLen = taosArrayInit(0, sizeof(int32_t)); + + if (msgType == TDMT_VND_SUBMIT) { + tqDataExec(pTq, &pHandle->execHandle, pReq, &rsp, workerId); + } else { + // TODO + ASSERT(0); + } + + if (rsp.blockNum == 0) { + taosWUnLockLatch(&pHandle->pushHandle.lock); + continue; + } + + ASSERT(taosArrayGetSize(rsp.blockData) == rsp.blockNum); + ASSERT(taosArrayGetSize(rsp.blockDataLen) == rsp.blockNum); + + rsp.rspOffset = fetchOffset; + + int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqDataBlkRsp(NULL, &rsp); + void* buf = rpcMallocCont(tlen); + if (buf == NULL) { + // todo free + return -1; + } + + ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP; + ((SMqRspHead*)buf)->epoch = pHandle->pushHandle.epoch; + ((SMqRspHead*)buf)->consumerId = pHandle->pushHandle.consumerId; + + void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead)); + tEncodeSMqDataBlkRsp(&abuf, &rsp); + + SRpcMsg resp = { + .info = pHandle->pushHandle.rpcInfo, + .pCont = buf, + .contLen = tlen, + .code = 0, + }; + tmsgSendRsp(&resp); + + memset(&pHandle->pushHandle.rpcInfo, 0, sizeof(SRpcHandleInfo)); + taosWUnLockLatch(&pHandle->pushHandle.lock); + + tqDebug("vg %d offset %ld from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld", + TD_VID(pTq->pVnode), fetchOffset, pHandle->pushHandle.consumerId, pHandle->pushHandle.epoch, rsp.blockNum, + rsp.reqOffset, rsp.rspOffset); + + // TODO destroy + taosArrayDestroy(rsp.blockData); + taosArrayDestroy(rsp.blockDataLen); + } + + return 0; +} + +int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) { + if (msgType == TDMT_VND_SUBMIT) { + if (taosHashGetSize(pTq->pStreamTasks) == 0) return 0; + + if (tdUpdateExpireWindow(pTq->pVnode->pSma, msg, ver) != 0) { + // TODO handle sma error + } + void* data = taosMemoryMalloc(msgLen); + if (data == NULL) { + return -1; + } + memcpy(data, msg, msgLen); + + tqProcessStreamTrigger(pTq, data); + } + + return 0; +} + diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 9f4c5fc81e05f7a39cd76612af0809f42f01700e..8909a00c72faf0e7ea9df06819c571af28921da8 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -15,6 +15,48 @@ #include "tq.h" +int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead** ppHeadWithCkSum) { + int32_t code = 0; + taosThreadMutexLock(&pHandle->pWalReader->mutex); + int64_t offset = *fetchOffset; + + while (1) { + if (walFetchHead(pHandle->pWalReader, offset, *ppHeadWithCkSum) < 0) { + tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", pHandle->consumerId, + pHandle->epoch, TD_VID(pTq->pVnode), offset); + *fetchOffset = offset - 1; + code = -1; + goto END; + } + + if ((*ppHeadWithCkSum)->head.msgType == TDMT_VND_SUBMIT) { + code = walFetchBody(pHandle->pWalReader, ppHeadWithCkSum); + + if (code < 0) { + ASSERT(0); + *fetchOffset = offset; + code = -1; + goto END; + } + *fetchOffset = offset; + code = 0; + goto END; + } else { + code = walSkipFetchBody(pHandle->pWalReader, *ppHeadWithCkSum); + if (code < 0) { + ASSERT(0); + *fetchOffset = offset; + code = -1; + goto END; + } + offset++; + } + } +END: + taosThreadMutexUnlock(&pHandle->pWalReader->mutex); + return code; +} + STqReadHandle* tqInitSubmitMsgScanner(SMeta* pMeta) { STqReadHandle* pReadHandle = taosMemoryMalloc(sizeof(STqReadHandle)); if (pReadHandle == NULL) { @@ -24,7 +66,7 @@ STqReadHandle* tqInitSubmitMsgScanner(SMeta* pMeta) { pReadHandle->pMsg = NULL; pReadHandle->ver = -1; pReadHandle->pColIdList = NULL; - pReadHandle->sver = -1; + pReadHandle->cachedSchemaVer = -1; pReadHandle->cachedSchemaUid = -1; pReadHandle->pSchema = NULL; pReadHandle->pSchemaWrapper = NULL; @@ -88,11 +130,11 @@ int32_t tqRetrieveDataBlock(SArray** ppCols, STqReadHandle* pHandle, uint64_t* p // TODO set to real sversion /*int32_t sversion = 1;*/ int32_t sversion = htonl(pHandle->pBlock->sversion); - if (pHandle->sver != sversion || pHandle->cachedSchemaUid != pHandle->msgIter.suid) { + if (pHandle->cachedSchemaVer != sversion || pHandle->cachedSchemaUid != pHandle->msgIter.suid) { pHandle->pSchema = metaGetTbTSchema(pHandle->pVnodeMeta, pHandle->msgIter.uid, sversion); if (pHandle->pSchema == NULL) { tqWarn("cannot found tsschema for table: uid: %ld (suid: %ld), version %d, possibly dropped table", - pHandle->msgIter.uid, pHandle->msgIter.suid, pHandle->sver); + pHandle->msgIter.uid, pHandle->msgIter.suid, pHandle->cachedSchemaVer); /*ASSERT(0);*/ terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND; return -1; @@ -102,12 +144,12 @@ int32_t tqRetrieveDataBlock(SArray** ppCols, STqReadHandle* pHandle, uint64_t* p pHandle->pSchemaWrapper = metaGetTableSchema(pHandle->pVnodeMeta, pHandle->msgIter.suid, sversion, true); if (pHandle->pSchemaWrapper == NULL) { tqWarn("cannot found schema wrapper for table: suid: %ld, version %d, possibly dropped table", - pHandle->msgIter.suid, pHandle->sver); + pHandle->msgIter.suid, pHandle->cachedSchemaVer); /*ASSERT(0);*/ terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND; return -1; } - pHandle->sver = sversion; + pHandle->cachedSchemaVer = sversion; pHandle->cachedSchemaUid = pHandle->msgIter.suid; } @@ -256,3 +298,38 @@ int tqReadHandleRemoveTbUidList(STqReadHandle* pHandle, const SArray* tbUidList) return 0; } + +int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { + void* pIter = NULL; + while (1) { + pIter = taosHashIterate(pTq->handles, pIter); + if (pIter == NULL) break; + STqHandle* pExec = (STqHandle*)pIter; + if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { + for (int32_t i = 0; i < 5; i++) { + int32_t code = qUpdateQualifiedTableId(pExec->execHandle.exec.execCol.task[i], tbUidList, isAdd); + ASSERT(code == 0); + } + } else if (pExec->execHandle.subType == TOPIC_SUB_TYPE__DB) { + if (!isAdd) { + int32_t sz = taosArrayGetSize(tbUidList); + for (int32_t i = 0; i < sz; i++) { + int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i); + taosHashPut(pExec->execHandle.exec.execDb.pFilterOutTbUid, &tbUid, sizeof(int64_t), NULL, 0); + } + } + } else { + // tq update id + } + } + while (1) { + pIter = taosHashIterate(pTq->pStreamTasks, pIter); + if (pIter == NULL) break; + SStreamTask* pTask = (SStreamTask*)pIter; + if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) { + int32_t code = qUpdateQualifiedTableId(pTask->exec.executor, tbUidList, isAdd); + ASSERT(code == 0); + } + } + return 0; +} diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c new file mode 100644 index 0000000000000000000000000000000000000000..5c0bf971fb8702ffbb73ed92feb8c97d1f4032d1 --- /dev/null +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tq.h" + +void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) { + const SArray* pRes = (const SArray*)data; + SVnode* pVnode = (SVnode*)vnode; + + ASSERT(pTask->tbSink.pTSchema); + SSubmitReq* pReq = tdBlockToSubmit(pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid, + pTask->tbSink.stbFullName, pVnode->config.vgId); + /*tPrintFixedSchemaSubmitReq(pReq, pTask->tbSink.pTSchema);*/ + // build write msg + SRpcMsg msg = { + .msgType = TDMT_VND_SUBMIT, + .pCont = pReq, + .contLen = ntohl(pReq->length), + }; + + ASSERT(tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) == 0); +} diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index d462b7e046c0ace1f869ab5e0d0788ab43b9a915..86929fe6d5e51c7371e5ecd2dd3d4b45cfa89080 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -84,10 +84,22 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols static void tsdbResetCommitTable(SCommitH *pCommith); static void tsdbCloseCommitFile(SCommitH *pCommith, bool hasError); static bool tsdbCanAddSubBlock(SCommitH *pCommith, SBlock *pBlock, SMergeInfo *pInfo); -static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, - TSKEY maxKey, int maxRows, int8_t update); +static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, + SDataCols *pTarget, TSKEY maxKey, int maxRows, int8_t update); int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf); +int tsdbBegin(STsdb *pTsdb) { + if (!pTsdb) return 0; + + STsdbMemTable *pMem; + + if (tsdbMemTableCreate(pTsdb, &pTsdb->mem) < 0) { + return -1; + } + + return 0; +} + int tsdbApplyRtnOnFSet(STsdb *pRepo, SDFileSet *pSet, SRtn *pRtn) { SDiskID did; SDFileSet nSet = {0}; @@ -108,7 +120,7 @@ int tsdbApplyRtnOnFSet(STsdb *pRepo, SDFileSet *pSet, SRtn *pRtn) { tsdbInitDFileSet(pRepo, &nSet, did, pSet->fid, FS_TXN_VERSION(pfs)); if (tsdbCopyDFileSet(pSet, &nSet) < 0) { - tsdbError("vgId:%d failed to copy FSET %d from level %d to level %d since %s", REPO_ID(pRepo), pSet->fid, + tsdbError("vgId:%d, failed to copy FSET %d from level %d to level %d since %s", REPO_ID(pRepo), pSet->fid, TSDB_FSET_LEVEL(pSet), did.level, tstrerror(terrno)); return -1; } @@ -117,7 +129,7 @@ int tsdbApplyRtnOnFSet(STsdb *pRepo, SDFileSet *pSet, SRtn *pRtn) { return -1; } - tsdbInfo("vgId:%d FSET %d is copied from level %d disk id %d to level %d disk id %d", REPO_ID(pRepo), pSet->fid, + tsdbInfo("vgId:%d, FSET %d is copied from level %d disk id %d to level %d disk id %d", REPO_ID(pRepo), pSet->fid, TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet), did.level, did.id); } else { // On a correct level @@ -158,7 +170,7 @@ int tsdbCommit(STsdb *pRepo) { tsdbSeekCommitIter(&commith, commith.rtn.minKey); while ((pSet = tsdbFSIterNext(&(commith.fsIter)))) { if (pSet->fid < commith.rtn.minFid) { - tsdbInfo("vgId:%d FSET %d on level %d disk id %d expires, remove it", REPO_ID(pRepo), pSet->fid, + tsdbInfo("vgId:%d, FSET %d on level %d disk id %d expires, remove it", REPO_ID(pRepo), pSet->fid, TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet)); } else { break; @@ -224,23 +236,23 @@ void tsdbGetRtnSnap(STsdb *pRepo, SRtn *pRtn) { pRtn->minFid = (int)(TSDB_KEY_FID(minKey, pCfg->days, pCfg->precision)); pRtn->midFid = (int)(TSDB_KEY_FID(midKey, pCfg->days, pCfg->precision)); pRtn->maxFid = (int)(TSDB_KEY_FID(maxKey, pCfg->days, pCfg->precision)); - tsdbDebug("vgId:%d now:%" PRId64 " minKey:%" PRId64 " minFid:%d, midFid:%d, maxFid:%d", REPO_ID(pRepo), now, minKey, + tsdbDebug("vgId:%d, now:%" PRId64 " minKey:%" PRId64 " minFid:%d, midFid:%d, maxFid:%d", REPO_ID(pRepo), now, minKey, pRtn->minFid, pRtn->midFid, pRtn->maxFid); } static void tsdbStartCommit(STsdb *pRepo) { STsdbMemTable *pMem = pRepo->imem; - tsdbInfo("vgId:%d start to commit", REPO_ID(pRepo)); + tsdbInfo("vgId:%d, start to commit", REPO_ID(pRepo)); tsdbStartFSTxn(pRepo, 0, 0); } static void tsdbEndCommit(STsdb *pTsdb, int eno) { tsdbEndFSTxn(pTsdb); - tsdbMemTableDestroy(pTsdb, pTsdb->imem); + tsdbMemTableDestroy(pTsdb->imem); pTsdb->imem = NULL; - tsdbInfo("vgId:%d commit over, %s", REPO_ID(pTsdb), (eno == TSDB_CODE_SUCCESS) ? "succeed" : "failed"); + tsdbInfo("vgId:%d, commit over, %s", REPO_ID(pTsdb), (eno == TSDB_CODE_SUCCESS) ? "succeed" : "failed"); } static int tsdbInitCommitH(SCommitH *pCommith, STsdb *pRepo) { @@ -413,7 +425,7 @@ static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) { if (tsdbWriteBlockIdx(TSDB_COMMIT_HEAD_FILE(pCommith), pCommith->aBlkIdx, (void **)(&(TSDB_COMMIT_BUF(pCommith)))) < 0) { - tsdbError("vgId:%d failed to write SBlockIdx part to FSET %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); + tsdbError("vgId:%d, failed to write SBlockIdx part to FSET %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); tsdbCloseCommitFile(pCommith, true); // revert the file change tsdbApplyDFileSetChange(TSDB_COMMIT_WRITE_FSET(pCommith), pSet); @@ -421,7 +433,7 @@ static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) { } if (tsdbUpdateDFileSetHeader(&(pCommith->wSet)) < 0) { - tsdbError("vgId:%d failed to update FSET %d header since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); + tsdbError("vgId:%d, failed to update FSET %d header since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); tsdbCloseCommitFile(pCommith, true); // revert the file change tsdbApplyDFileSetChange(TSDB_COMMIT_WRITE_FSET(pCommith), pSet); @@ -466,7 +478,7 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) { pTbData = (STbData *)pNode->pData; pCommitIter = pCommith->iters + i; - pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, 1); // TODO: schema version + pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, -1); if (pTSchema) { pCommitIter->pIter = tSkipListCreateIter(pTbData->pData); @@ -475,7 +487,8 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) { pCommitIter->pTable = (STable *)taosMemoryMalloc(sizeof(STable)); pCommitIter->pTable->uid = pTbData->uid; pCommitIter->pTable->tid = pTbData->uid; - pCommitIter->pTable->pSchema = pTSchema; // metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, 0); + pCommitIter->pTable->pSchema = pTSchema; + pCommitIter->pTable->pCacheSchema = NULL; } } tSkipListDestroyIter(pSlIter); @@ -490,6 +503,7 @@ static void tsdbDestroyCommitIters(SCommitH *pCommith) { tSkipListDestroyIter(pCommith->iters[i].pIter); if (pCommith->iters[i].pTable) { tdFreeSchema(pCommith->iters[i].pTable->pSchema); + tdFreeSchema(pCommith->iters[i].pTable->pCacheSchema); taosMemoryFreeClear(pCommith->iters[i].pTable); } } @@ -529,7 +543,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid return -1; } - tsdbDebug("vgId:%d FSET %d at level %d disk id %d is opened to read to commit", REPO_ID(pRepo), TSDB_FSET_FID(pSet), + tsdbDebug("vgId:%d, FSET %d at level %d disk id %d is opened to read to commit", REPO_ID(pRepo), TSDB_FSET_FID(pSet), TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet)); } else { pCommith->isRFileSet = false; @@ -541,7 +555,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid tsdbInitDFileSet(pRepo, pWSet, did, fid, FS_TXN_VERSION(REPO_FS(pRepo))); if (tsdbCreateDFileSet(pRepo, pWSet, true) < 0) { - tsdbError("vgId:%d failed to create FSET %d at level %d disk id %d since %s", REPO_ID(pRepo), + tsdbError("vgId:%d, failed to create FSET %d at level %d disk id %d since %s", REPO_ID(pRepo), TSDB_FSET_FID(pWSet), TSDB_FSET_LEVEL(pWSet), TSDB_FSET_ID(pWSet), tstrerror(terrno)); if (pCommith->isRFileSet) { tsdbCloseAndUnsetFSet(&(pCommith->readh)); @@ -552,7 +566,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid pCommith->isDFileSame = false; pCommith->isLFileSame = false; - tsdbDebug("vgId:%d FSET %d at level %d disk id %d is created to commit", REPO_ID(pRepo), TSDB_FSET_FID(pWSet), + tsdbDebug("vgId:%d, FSET %d at level %d disk id %d is created to commit", REPO_ID(pRepo), TSDB_FSET_FID(pWSet), TSDB_FSET_LEVEL(pWSet), TSDB_FSET_ID(pWSet)); } else { did.level = TSDB_FSET_LEVEL(pSet); @@ -565,7 +579,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid SDFile *pWHeadf = TSDB_COMMIT_HEAD_FILE(pCommith); tsdbInitDFile(pRepo, pWHeadf, did, fid, FS_TXN_VERSION(REPO_FS(pRepo)), TSDB_FILE_HEAD); if (tsdbCreateDFile(pRepo, pWHeadf, true, TSDB_FILE_HEAD) < 0) { - tsdbError("vgId:%d failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWHeadf), + tsdbError("vgId:%d, failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWHeadf), tstrerror(terrno)); if (pCommith->isRFileSet) { @@ -580,7 +594,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid tsdbInitDFileEx(pWDataf, pRDataf); // if (tsdbOpenDFile(pWDataf, O_WRONLY) < 0) { if (tsdbOpenDFile(pWDataf, TD_FILE_WRITE) < 0) { - tsdbError("vgId:%d failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWDataf), + tsdbError("vgId:%d, failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWDataf), tstrerror(terrno)); tsdbCloseDFileSet(pWSet); @@ -601,7 +615,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid // if (tsdbOpenDFile(pWLastf, O_WRONLY) < 0) { if (tsdbOpenDFile(pWLastf, TD_FILE_WRITE) < 0) { - tsdbError("vgId:%d failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWLastf), + tsdbError("vgId:%d, failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWLastf), tstrerror(terrno)); tsdbCloseDFileSet(pWSet); @@ -616,7 +630,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid pCommith->isLFileSame = false; if (tsdbCreateDFile(pRepo, pWLastf, true, TSDB_FILE_LAST) < 0) { - tsdbError("vgId:%d failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWLastf), + tsdbError("vgId:%d, failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWLastf), tstrerror(terrno)); tsdbCloseDFileSet(pWSet); @@ -633,11 +647,11 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid SDFile *pWSmadF = TSDB_COMMIT_SMAD_FILE(pCommith); if (!taosCheckExistFile(TSDB_FILE_FULL_NAME(pRSmadF))) { - tsdbDebug("vgId:%d create data file %s as not exist", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pRSmadF)); + tsdbDebug("vgId:%d, create data file %s as not exist", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pRSmadF)); tsdbInitDFile(pRepo, pWSmadF, did, fid, FS_TXN_VERSION(REPO_FS(pRepo)), TSDB_FILE_SMAD); if (tsdbCreateDFile(pRepo, pWSmadF, true, TSDB_FILE_SMAD) < 0) { - tsdbError("vgId:%d failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmadF), + tsdbError("vgId:%d, failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmadF), tstrerror(terrno)); tsdbCloseDFileSet(pWSet); @@ -650,7 +664,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid } else { tsdbInitDFileEx(pWSmadF, pRSmadF); if (tsdbOpenDFile(pWSmadF, O_RDWR) < 0) { - tsdbError("vgId:%d failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmadF), + tsdbError("vgId:%d, failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmadF), tstrerror(terrno)); tsdbCloseDFileSet(pWSet); @@ -669,7 +683,7 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid if ((pCommith->isLFileSame) && taosCheckExistFile(TSDB_FILE_FULL_NAME(pRSmalF))) { tsdbInitDFileEx(pWSmalF, pRSmalF); if (tsdbOpenDFile(pWSmalF, O_RDWR) < 0) { - tsdbError("vgId:%d failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmalF), + tsdbError("vgId:%d, failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmalF), tstrerror(terrno)); tsdbCloseDFileSet(pWSet); @@ -680,11 +694,11 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid } } } else { - tsdbDebug("vgId:%d create data file %s as not exist", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pRSmalF)); + tsdbDebug("vgId:%d, create data file %s as not exist", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pRSmalF)); tsdbInitDFile(pRepo, pWSmalF, did, fid, FS_TXN_VERSION(REPO_FS(pRepo)), TSDB_FILE_SMAL); if (tsdbCreateDFile(pRepo, pWSmalF, true, TSDB_FILE_SMAL) < 0) { - tsdbError("vgId:%d failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmalF), + tsdbError("vgId:%d, failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWSmalF), tstrerror(terrno)); tsdbCloseDFileSet(pWSet); @@ -885,7 +899,7 @@ static int tsdbCommitToTable(SCommitH *pCommith, int tid) { } if (tsdbWriteBlockInfo(pCommith) < 0) { - tsdbError("vgId:%d failed to write SBlockInfo part into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith), + tsdbError("vgId:%d, failed to write SBlockInfo part into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith), TSDB_FILE_FULL_NAME(TSDB_COMMIT_HEAD_FILE(pCommith)), tstrerror(terrno)); return -1; } @@ -914,7 +928,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) { while (bidx < nBlocks) { if (!pTSchema && !tsdbCommitIsSameFile(pCommith, bidx)) { // Set commit table - pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, 1); // TODO: schema version + pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, -1); // TODO: schema version if (!pTSchema) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; @@ -927,7 +941,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) { } if (tsdbMoveBlock(pCommith, bidx) < 0) { - tsdbError("vgId:%d failed to move block into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith), + tsdbError("vgId:%d, failed to move block into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith), TSDB_FILE_FULL_NAME(TSDB_COMMIT_HEAD_FILE(pCommith)), tstrerror(terrno)); taosMemoryFreeClear(pTSchema); return -1; @@ -937,7 +951,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) { } if (tsdbWriteBlockInfo(pCommith) < 0) { - tsdbError("vgId:%d failed to write SBlockInfo part into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith), + tsdbError("vgId:%d, failed to write SBlockInfo part into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith), TSDB_FILE_FULL_NAME(TSDB_COMMIT_HEAD_FILE(pCommith)), tstrerror(terrno)); taosMemoryFreeClear(pTSchema); return -1; @@ -948,7 +962,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) { } static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable) { - STSchema *pSchema = tsdbGetTableSchemaImpl(TSDB_COMMIT_REPO(pCommith),pTable, false, false, -1); + STSchema *pSchema = tsdbGetTableSchemaImpl(TSDB_COMMIT_REPO(pCommith), pTable, false, false, -1); pCommith->pTable = pTable; @@ -1207,7 +1221,7 @@ int tsdbWriteBlockImpl(STsdb *pRepo, STable *pTable, SDFile *pDFile, SDFile *pDF pBlock->blkVer = SBlockVerLatest; pBlock->aggrOffset = (uint64_t)offsetAggr; - tsdbDebug("vgId:%d uid:%" PRId64 " a block of data is written to file %s, offset %" PRId64 + tsdbDebug("vgId:%d, uid:%" PRId64 " a block of data is written to file %s, offset %" PRId64 " numOfRows %d len %d numOfCols %" PRId16 " keyFirst %" PRId64 " keyLast %" PRId64, REPO_ID(pRepo), TABLE_UID(pTable), TSDB_FILE_FULL_NAME(pDFile), offset, rowsToWrite, pBlock->len, pBlock->numOfCols, pBlock->keyFirst, pBlock->keyLast); @@ -1422,8 +1436,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols int biter = 0; while (true) { - tsdbLoadAndMergeFromCache(TSDB_COMMIT_REPO(pCommith), pCommith->readh.pDCols[0], &biter, pIter, pCommith->pDataCols, keyLimit, defaultRows, - pCfg->update); + tsdbLoadAndMergeFromCache(TSDB_COMMIT_REPO(pCommith), pCommith->readh.pDCols[0], &biter, pIter, pCommith->pDataCols, + keyLimit, defaultRows, pCfg->update); if (pCommith->pDataCols->numOfRows == 0) break; @@ -1447,8 +1461,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols return 0; } -static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, - TSKEY maxKey, int maxRows, int8_t update) { +static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, + SDataCols *pTarget, TSKEY maxKey, int maxRows, int8_t update) { TSKEY key1 = INT64_MAX; TSKEY key2 = INT64_MAX; TSKEY lastKey = TSKEY_INITIAL_VAL; diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit2.c b/source/dnode/vnode/src/tsdb/tsdbDelete.c similarity index 76% rename from source/dnode/vnode/src/tsdb/tsdbCommit2.c rename to source/dnode/vnode/src/tsdb/tsdbDelete.c index 844cfc094b5650899373152ef7168f6ed4909129..6dea4a4e57392be988126c579648f39a8270b9bf 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit2.c +++ b/source/dnode/vnode/src/tsdb/tsdbDelete.c @@ -11,18 +11,4 @@ * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . - */ - -#include "tsdb.h" - -int tsdbBegin(STsdb *pTsdb) { - if (!pTsdb) return 0; - - STsdbMemTable *pMem; - - if (tsdbMemTableCreate(pTsdb, &pTsdb->mem) < 0) { - return -1; - } - - return 0; -} + */ \ No newline at end of file diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c index 6dfd73158ea15b3f36b23158b0de54a7a904725c..c0ca2f9594e4f30038ad2ad6636c97263805de00 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS.c @@ -260,7 +260,7 @@ int tsdbOpenFS(STsdb *pRepo) { tsdbGetRtnSnap(pRepo, &pRepo->rtn); if (taosCheckExistFile(current)) { if (tsdbOpenFSFromCurrent(pRepo) < 0) { - tsdbError("vgId:%d failed to open FS since %s", REPO_ID(pRepo), tstrerror(terrno)); + tsdbError("vgId:%d, failed to open FS since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; } @@ -271,19 +271,19 @@ int tsdbOpenFS(STsdb *pRepo) { } else { // should skip expired fileset inside of the function if (tsdbRestoreCurrent(pRepo) < 0) { - tsdbError("vgId:%d failed to restore current file since %s", REPO_ID(pRepo), tstrerror(terrno)); + tsdbError("vgId:%d, failed to restore current file since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; } } if (tsdbScanAndTryFixFS(pRepo) < 0) { - tsdbError("vgId:%d failed to scan and fix FS since %s", REPO_ID(pRepo), tstrerror(terrno)); + tsdbError("vgId:%d, failed to scan and fix FS since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; } // // Load meta cache if has meta file // if ((!(pRepo->state & TSDB_STATE_BAD_META)) && tsdbLoadMetaCache(pRepo, true) < 0) { - // tsdbError("vgId:%d failed to open FS while loading meta cache since %s", REPO_ID(pRepo), tstrerror(terrno)); + // tsdbError("vgId:%d, failed to open FS while loading meta cache since %s", REPO_ID(pRepo), tstrerror(terrno)); // return -1; // } @@ -607,7 +607,7 @@ static int tsdbOpenFSFromCurrent(STsdb *pRepo) { // current file exists, try to recover pFile = taosOpenFile(current, TD_FILE_READ); if (pFile == NULL) { - tsdbError("vgId:%d failed to open file %s since %s", REPO_ID(pRepo), current, strerror(errno)); + tsdbError("vgId:%d, failed to open file %s since %s", REPO_ID(pRepo), current, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); goto _err; } @@ -618,20 +618,20 @@ static int tsdbOpenFSFromCurrent(STsdb *pRepo) { int nread = (int)taosReadFile(pFile, buffer, TSDB_FILE_HEAD_SIZE); if (nread < 0) { - tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pRepo), TSDB_FILENAME_LEN, current, + tsdbError("vgId:%d, failed to read %d bytes from file %s since %s", REPO_ID(pRepo), TSDB_FILENAME_LEN, current, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); goto _err; } if (nread < TSDB_FILE_HEAD_SIZE) { - tsdbError("vgId:%d failed to read header of file %s, read bytes:%d", REPO_ID(pRepo), current, nread); + tsdbError("vgId:%d, failed to read header of file %s, read bytes:%d", REPO_ID(pRepo), current, nread); terrno = TSDB_CODE_TDB_FILE_CORRUPTED; goto _err; } if (!taosCheckChecksumWhole((uint8_t *)buffer, TSDB_FILE_HEAD_SIZE)) { - tsdbError("vgId:%d header of file %s failed checksum check", REPO_ID(pRepo), current); + tsdbError("vgId:%d, header of file %s failed checksum check", REPO_ID(pRepo), current); terrno = TSDB_CODE_TDB_FILE_CORRUPTED; goto _err; } @@ -652,19 +652,19 @@ static int tsdbOpenFSFromCurrent(STsdb *pRepo) { nread = (int)taosReadFile(pFile, buffer, fsheader.len); if (nread < 0) { - tsdbError("vgId:%d failed to read file %s since %s", REPO_ID(pRepo), current, strerror(errno)); + tsdbError("vgId:%d, failed to read file %s since %s", REPO_ID(pRepo), current, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); goto _err; } if (nread < fsheader.len) { - tsdbError("vgId:%d failed to read %d bytes from file %s", REPO_ID(pRepo), fsheader.len, current); + tsdbError("vgId:%d, failed to read %d bytes from file %s", REPO_ID(pRepo), fsheader.len, current); terrno = TSDB_CODE_TDB_FILE_CORRUPTED; goto _err; } if (!taosCheckChecksumWhole((uint8_t *)buffer, fsheader.len)) { - tsdbError("vgId:%d file %s is corrupted since wrong checksum", REPO_ID(pRepo), current); + tsdbError("vgId:%d, file %s is corrupted since wrong checksum", REPO_ID(pRepo), current); terrno = TSDB_CODE_TDB_FILE_CORRUPTED; goto _err; } @@ -694,7 +694,7 @@ static int tsdbScanAndTryFixFS(STsdb *pRepo) { SFSStatus *pStatus = pfs->cstatus; // if (tsdbScanAndTryFixMFile(pRepo) < 0) { - // tsdbError("vgId:%d failed to fix MFile since %s", REPO_ID(pRepo), tstrerror(terrno)); + // tsdbError("vgId:%d, failed to fix MFile since %s", REPO_ID(pRepo), tstrerror(terrno)); // return -1; // } @@ -704,7 +704,7 @@ static int tsdbScanAndTryFixFS(STsdb *pRepo) { SDFileSet *pSet = (SDFileSet *)taosArrayGet(pStatus->df, i); if (tsdbScanAndTryFixDFileSet(pRepo, pSet) < 0) { - tsdbError("vgId:%d failed to fix MFile since %s", REPO_ID(pRepo), tstrerror(terrno)); + tsdbError("vgId:%d, failed to fix MFile since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; } } @@ -724,7 +724,7 @@ static int tsdbScanRootDir(STsdb *pRepo) { tsdbGetRootDir(REPO_ID(pRepo), pRepo->dir, rootDir); STfsDir *tdir = tfsOpendir(REPO_TFS(pRepo), rootDir); if (tdir == NULL) { - tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), rootDir, tstrerror(terrno)); + tsdbError("vgId:%d, failed to open directory %s since %s", REPO_ID(pRepo), rootDir, tstrerror(terrno)); return -1; } @@ -741,7 +741,7 @@ static int tsdbScanRootDir(STsdb *pRepo) { // } (void)tfsRemoveFile(pf); - tsdbDebug("vgId:%d invalid file %s is removed", REPO_ID(pRepo), pf->aname); + tsdbDebug("vgId:%d, invalid file %s is removed", REPO_ID(pRepo), pf->aname); } tfsClosedir(tdir); @@ -758,7 +758,7 @@ static int tsdbScanDataDir(STsdb *pRepo) { tsdbGetDataDir(REPO_ID(pRepo), pRepo->dir, dataDir); STfsDir *tdir = tfsOpendir(REPO_TFS(pRepo), dataDir); if (tdir == NULL) { - tsdbError("vgId:%d failed to open directory %s since %s", REPO_ID(pRepo), dataDir, tstrerror(terrno)); + tsdbError("vgId:%d, failed to open directory %s since %s", REPO_ID(pRepo), dataDir, tstrerror(terrno)); return -1; } @@ -767,7 +767,7 @@ static int tsdbScanDataDir(STsdb *pRepo) { if (!tsdbIsTFileInFS(pfs, pf)) { (void)tfsRemoveFile(pf); - tsdbDebug("vgId:%d invalid file %s is removed", REPO_ID(pRepo), pf->aname); + tsdbDebug("vgId:%d, invalid file %s is removed", REPO_ID(pRepo), pf->aname); } } @@ -811,7 +811,7 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) { fArray = taosArrayInit(1024, sizeof(STfsFile)); if (fArray == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - tsdbError("vgId:%d failed to restore DFileSet while open directory %s since %s", REPO_ID(pRepo), dataDir, + tsdbError("vgId:%d, failed to restore DFileSet while open directory %s since %s", REPO_ID(pRepo), dataDir, tstrerror(terrno)); regfree(®ex); return -1; @@ -819,7 +819,7 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) { tdir = tfsOpendir(REPO_TFS(pRepo), dataDir); if (tdir == NULL) { - tsdbError("vgId:%d failed to restore DFileSet while open directory %s since %s", REPO_ID(pRepo), dataDir, + tsdbError("vgId:%d, failed to restore DFileSet while open directory %s since %s", REPO_ID(pRepo), dataDir, tstrerror(terrno)); taosArrayDestroy(fArray); regfree(®ex); @@ -840,12 +840,12 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) { } } else if (code == REG_NOMATCH) { // Not match - tsdbInfo("vgId:%d invalid file %s exists, remove it", REPO_ID(pRepo), pf->aname); + tsdbInfo("vgId:%d, invalid file %s exists, remove it", REPO_ID(pRepo), pf->aname); (void)tfsRemoveFile(pf); continue; } else { // Has other error - tsdbError("vgId:%d failed to restore DFileSet Array while run regexec since %s", REPO_ID(pRepo), strerror(code)); + tsdbError("vgId:%d, failed to restore DFileSet Array while run regexec since %s", REPO_ID(pRepo), strerror(code)); terrno = TAOS_SYSTEM_ERROR(code); tfsClosedir(tdir); taosArrayDestroy(fArray); @@ -876,7 +876,7 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) { SDFile *pDFile = TSDB_DFILE_IN_SET(&fset, ftype); if (index >= taosArrayGetSize(fArray)) { - tsdbError("vgId:%d incomplete DFileSet, fid:%d", REPO_ID(pRepo), fset.fid); + tsdbError("vgId:%d, incomplete DFileSet, fid:%d", REPO_ID(pRepo), fset.fid); taosArrayDestroy(fArray); return -1; } @@ -902,14 +902,14 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) { fset.fid = tfid; } else { if (tfid != fset.fid) { - tsdbError("vgId:%d incomplete dFileSet, fid:%d", REPO_ID(pRepo), fset.fid); + tsdbError("vgId:%d, incomplete dFileSet, fid:%d", REPO_ID(pRepo), fset.fid); taosArrayDestroy(fArray); return -1; } } if (ttype != ftype) { - tsdbError("vgId:%d incomplete dFileSet, fid:%d", REPO_ID(pRepo), fset.fid); + tsdbError("vgId:%d, incomplete dFileSet, fid:%d", REPO_ID(pRepo), fset.fid); taosArrayDestroy(fArray); return -1; } @@ -918,14 +918,14 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) { // if (tsdbOpenDFile(pDFile, O_RDONLY) < 0) { if (tsdbOpenDFile(pDFile, TD_FILE_READ) < 0) { - tsdbError("vgId:%d failed to open DFile %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), + tsdbError("vgId:%d, failed to open DFile %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno)); taosArrayDestroy(fArray); return -1; } if (tsdbLoadDFileHeader(pDFile, &(pDFile->info)) < 0) { - tsdbError("vgId:%d failed to load DFile %s header since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), + tsdbError("vgId:%d, failed to load DFile %s header since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno)); taosArrayDestroy(fArray); return -1; @@ -943,7 +943,7 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) { if (pDFile->info.size != file_size) { int64_t tfsize = pDFile->info.size; pDFile->info.size = file_size; - tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), + tsdbInfo("vgId:%d, file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), tfsize, pDFile->info.size); } } @@ -952,7 +952,7 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) { index++; } - tsdbInfo("vgId:%d FSET %d is restored", REPO_ID(pRepo), fset.fid); + tsdbInfo("vgId:%d, FSET %d is restored", REPO_ID(pRepo), fset.fid); taosArrayPush(pfs->cstatus->df, &fset); } @@ -965,18 +965,18 @@ static int tsdbRestoreDFileSet(STsdb *pRepo) { static int tsdbRestoreCurrent(STsdb *pRepo) { // // Loop to recover mfile // if (tsdbRestoreMeta(pRepo) < 0) { - // tsdbError("vgId:%d failed to restore current since %s", REPO_ID(pRepo), tstrerror(terrno)); + // tsdbError("vgId:%d, failed to restore current since %s", REPO_ID(pRepo), tstrerror(terrno)); // return -1; // } // Loop to recover dfile set if (tsdbRestoreDFileSet(pRepo) < 0) { - tsdbError("vgId:%d failed to restore DFileSet since %s", REPO_ID(pRepo), tstrerror(terrno)); + tsdbError("vgId:%d, failed to restore DFileSet since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; } if (tsdbSaveFSStatus(pRepo, pRepo->fs->cstatus) < 0) { - tsdbError("vgId:%d failed to restore current since %s", REPO_ID(pRepo), tstrerror(terrno)); + tsdbError("vgId:%d, failed to restore current since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; } @@ -1024,11 +1024,11 @@ static void tsdbScanAndTryFixDFilesHeader(STsdb *pRepo, int32_t *nExpired) { if (fset.fid < pRepo->rtn.minFid) { ++*nExpired; } - tsdbDebug("vgId:%d scan DFileSet %d header", REPO_ID(pRepo), fset.fid); + tsdbDebug("vgId:%d, scan DFileSet %d header", REPO_ID(pRepo), fset.fid); // if (tsdbOpenDFileSet(&fset, O_RDWR) < 0) { if (tsdbOpenDFileSet(&fset, TD_FILE_WRITE | TD_FILE_READ) < 0) { - tsdbError("vgId:%d failed to open DFileSet %d since %s, continue", REPO_ID(pRepo), fset.fid, tstrerror(terrno)); + tsdbError("vgId:%d, failed to open DFileSet %d since %s, continue", REPO_ID(pRepo), fset.fid, tstrerror(terrno)); continue; } @@ -1038,14 +1038,14 @@ static void tsdbScanAndTryFixDFilesHeader(STsdb *pRepo, int32_t *nExpired) { if ((tsdbLoadDFileHeader(pDFile, &info) < 0) || pDFile->info.size != info.size || pDFile->info.magic != info.magic) { if (tsdbUpdateDFileHeader(pDFile) < 0) { - tsdbError("vgId:%d failed to update DFile header of %s since %s, continue", REPO_ID(pRepo), + tsdbError("vgId:%d, failed to update DFile header of %s since %s, continue", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno)); } else { - tsdbInfo("vgId:%d DFile header of %s is updated", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile)); + tsdbInfo("vgId:%d, DFile header of %s is updated", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile)); TSDB_FILE_FSYNC(pDFile); } } else { - tsdbDebug("vgId:%d DFile header of %s is correct", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile)); + tsdbDebug("vgId:%d, DFile header of %s is correct", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile)); } } diff --git a/source/dnode/vnode/src/tsdb/tsdbFile.c b/source/dnode/vnode/src/tsdb/tsdbFile.c index 04be2a48deb36ea343a55fa66a74a03ad79acce9..4198a94655835dcf6a33dfbf399767add97b6365 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFile.c +++ b/source/dnode/vnode/src/tsdb/tsdbFile.c @@ -181,7 +181,7 @@ static int tsdbScanAndTryFixDFile(STsdb *pRepo, SDFile *pDFile) { tsdbInitDFileEx(&df, pDFile); if (!taosCheckExistFile(TSDB_FILE_FULL_NAME(pDFile))) { - tsdbError("vgId:%d data file %s not exit, report to upper layer to fix it", REPO_ID(pRepo), + tsdbError("vgId:%d, data file %s not exit, report to upper layer to fix it", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile)); // pRepo->state |= TSDB_STATE_BAD_DATA; TSDB_FILE_SET_STATE(pDFile, TSDB_FILE_STATE_BAD); @@ -211,17 +211,17 @@ static int tsdbScanAndTryFixDFile(STsdb *pRepo, SDFile *pDFile) { } tsdbCloseDFile(&df); - tsdbInfo("vgId:%d file %s is truncated from %" PRId64 " to %" PRId64, REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), + tsdbInfo("vgId:%d, file %s is truncated from %" PRId64 " to %" PRId64, REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), file_size, pDFile->info.size); } else if (pDFile->info.size > file_size) { - tsdbError("vgId:%d data file %s has wrong size %" PRId64 " expected %" PRId64 ", report to upper layer to fix it", + tsdbError("vgId:%d, data file %s has wrong size %" PRId64 " expected %" PRId64 ", report to upper layer to fix it", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), file_size, pDFile->info.size); // pRepo->state |= TSDB_STATE_BAD_DATA; TSDB_FILE_SET_STATE(pDFile, TSDB_FILE_STATE_BAD); terrno = TSDB_CODE_TDB_FILE_CORRUPTED; return 0; } else { - tsdbDebug("vgId:%d file %s passes the scan", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile)); + tsdbDebug("vgId:%d, file %s passes the scan", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile)); } return 0; diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c index 9b9a431b5008c806adfbbf3172f61830129c3bdb..350e7235413cbca9f0dd0bdb1df0a938b389a5f8 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c +++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c @@ -60,7 +60,7 @@ int tsdbMemTableCreate(STsdb *pTsdb, STsdbMemTable **ppMemTable) { return 0; } -void tsdbMemTableDestroy(STsdb *pTsdb, STsdbMemTable *pMemTable) { +void tsdbMemTableDestroy(STsdbMemTable *pMemTable) { if (pMemTable) { taosHashCleanup(pMemTable->pHashIdx); SSkipListIterator *pIter = tSkipListCreateIter(pMemTable->pSlIdx); @@ -142,69 +142,6 @@ int tsdbLoadDataFromCache(STsdb *pTsdb, STable *pTable, SSkipListIterator *pIter } else { fKey = tdGetKey(filterKeys[filterIter]); } -#if 0 - } else if (fKey > rowKey) { - if (isRowDel) { - pMergeInfo->rowsDeleteFailed++; - } else { - if (pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed >= maxRowsToRead) break; - if (pCols && pMergeInfo->nOperations >= pCols->maxPoints) break; - - pMergeInfo->rowsInserted++; - pMergeInfo->nOperations++; - pMergeInfo->keyFirst = TMIN(pMergeInfo->keyFirst, rowKey); - pMergeInfo->keyLast = TMAX(pMergeInfo->keyLast, rowKey); - tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row); - } - - tSkipListIterNext(pIter); - row = tsdbNextIterRow(pIter); - if (row == NULL || TD_ROW_KEY(row) > maxKey) { - rowKey = INT64_MAX; - isRowDel = false; - } else { - rowKey = TD_ROW_KEY(row); - isRowDel = TD_ROW_IS_DELETED(row); - } - } else { - if (isRowDel) { - ASSERT(!keepDup); - if (pCols && pMergeInfo->nOperations >= pCols->maxPoints) break; - pMergeInfo->rowsDeleteSucceed++; - pMergeInfo->nOperations++; - tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row); - } else { - if (keepDup) { - if (pCols && pMergeInfo->nOperations >= pCols->maxPoints) break; - pMergeInfo->rowsUpdated++; - pMergeInfo->nOperations++; - pMergeInfo->keyFirst = TMIN(pMergeInfo->keyFirst, rowKey); - pMergeInfo->keyLast = TMAX(pMergeInfo->keyLast, rowKey); - tsdbAppendTableRowToCols(pTable, pCols, &pSchema, row); - } else { - pMergeInfo->keyFirst = TMIN(pMergeInfo->keyFirst, fKey); - pMergeInfo->keyLast = TMAX(pMergeInfo->keyLast, fKey); - } - } - - tSkipListIterNext(pIter); - row = tsdbNextIterRow(pIter); - if (row == NULL || TD_ROW_KEY(row) > maxKey) { - rowKey = INT64_MAX; - isRowDel = false; - } else { - rowKey = TD_ROW_KEY(row); - isRowDel = TD_ROW_IS_DELETED(row); - } - - filterIter++; - if (filterIter >= nFilterKeys) { - fKey = INT64_MAX; - } else { - fKey = tdGetKey(filterKeys[filterIter]); - } - } -#endif #if 1 } else if (fKey > rowKey) { if (isRowDel) { @@ -321,7 +258,7 @@ int tsdbInsertTableData(STsdb *pTsdb, SSubmitMsgIter *pMsgIter, SSubmitBlk *pBlo terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST; return -1; } - if(pRsp->tblFName) strcat(pRsp->tblFName, mr.me.name); + if (pRsp->tblFName) strcat(pRsp->tblFName, mr.me.name); if (mr.me.type == TSDB_NORMAL_TABLE) { sverNew = mr.me.ntbEntry.schemaRow.version; diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable2.c b/source/dnode/vnode/src/tsdb/tsdbMemTable2.c index 025b2ab580163cf3e9b9031b24f1b07881d3ec61..f4d4d9414201bac528b39d27a04a7315a331ea6d 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMemTable2.c +++ b/source/dnode/vnode/src/tsdb/tsdbMemTable2.c @@ -15,366 +15,382 @@ #include "tsdb.h" -typedef struct SMemTable SMemTable; -typedef struct SMemData SMemData; -typedef struct SMemSkipList SMemSkipList; -typedef struct SMemSkipListNode SMemSkipListNode; -typedef struct SMemSkipListCurosr SMemSkipListCurosr; - -#define SL_MAX_LEVEL 5 - -struct SMemTable { - STsdb *pTsdb; - TSKEY minKey; - TSKEY maxKey; - int64_t minVer; - int64_t maxVer; - int64_t nRows; - int32_t nHash; - int32_t nBucket; - SMemData **pBuckets; - SMemSkipListCurosr *pSlc; -}; +typedef struct SMemData SMemData; +typedef struct SMemSkipList SMemSkipList; +typedef struct SMemSkipListNode SMemSkipListNode; struct SMemSkipListNode { int8_t level; - SMemSkipListNode *forwards[1]; // Windows does not allow 0 + SMemSkipListNode *forwards[0]; }; struct SMemSkipList { - uint32_t seed; - int8_t maxLevel; - int8_t level; - int32_t size; - SMemSkipListNode pHead[1]; // Windows does not allow 0 + uint32_t seed; + int32_t size; + int8_t maxLevel; + int8_t level; + SMemSkipListNode *pHead; + SMemSkipListNode *pTail; }; struct SMemData { - SMemData *pHashNext; tb_uid_t suid; tb_uid_t uid; - TSKEY minKey; - TSKEY maxKey; - int64_t minVer; - int64_t maxVer; - int64_t nRows; + TSDBKEY minKey; + TSDBKEY maxKey; + SDelOp *delOpHead; + SDelOp *delOpTail; SMemSkipList sl; }; -struct SMemSkipListCurosr { - SMemSkipList *pSl; - SMemSkipListNode *pNodes[SL_MAX_LEVEL]; +struct SMemTable { + STsdb *pTsdb; + int32_t nRef; + TSDBKEY minKey; + TSDBKEY maxKey; + int64_t nRows; + SArray *pArray; // SArray }; -typedef struct { - int64_t version; - uint32_t szRow; - const STSRow *pRow; -} STsdbRow; - -#define HASH_BUCKET(SUID, UID, NBUCKET) (TABS((SUID) + (UID)) % (NBUCKET)) +#define SL_MAX_LEVEL 5 #define SL_NODE_SIZE(l) (sizeof(SMemSkipListNode) + sizeof(SMemSkipListNode *) * (l)*2) -#define SL_NODE_HALF_SIZE(l) (sizeof(SMemSkipListNode) + sizeof(SMemSkipListNode *) * (l)) #define SL_NODE_FORWARD(n, l) ((n)->forwards[l]) #define SL_NODE_BACKWARD(n, l) ((n)->forwards[(n)->level + (l)]) #define SL_NODE_DATA(n) (&SL_NODE_BACKWARD(n, (n)->level)) -#define SL_HEAD_NODE(sl) ((sl)->pHead) -#define SL_TAIL_NODE(sl) ((SMemSkipListNode *)&SL_NODE_FORWARD(SL_HEAD_NODE(sl), (sl)->maxLevel)) -#define SL_HEAD_NODE_FORWARD(n, l) SL_NODE_FORWARD(n, l) -#define SL_TAIL_NODE_BACKWARD(n, l) SL_NODE_FORWARD(n, l) - +static int32_t tsdbGetOrCreateMemData(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid, SMemData **ppMemData); +static int memDataPCmprFn(const void *p1, const void *p2); +static int32_t tPutTSDBRow(uint8_t *p, TSDBROW *pRow); +static int32_t tGetTSDBRow(uint8_t *p, TSDBROW *pRow); static int8_t tsdbMemSkipListRandLevel(SMemSkipList *pSl); -static int32_t tsdbEncodeRow(SEncoder *pEncoder, const STsdbRow *pRow); -static int32_t tsdbDecodeRow(SDecoder *pDecoder, STsdbRow *pRow); -static int32_t tsdbMemSkipListCursorCreate(int8_t maxLevel, SMemSkipListCurosr **ppSlc); -static void tsdbMemSkipListCursorDestroy(SMemSkipListCurosr *pSlc); -static void tsdbMemSkipListCursorInit(SMemSkipListCurosr *pSlc, SMemSkipList *pSl); -static void tsdbMemSkipListCursorPut(SMemSkipListCurosr *pSlc, SMemSkipListNode *pNode); -static int32_t tsdbMemSkipListCursorMoveTo(SMemSkipListCurosr *pSlc, int64_t version, TSKEY ts, int32_t flags); -static void tsdbMemSkipListCursorMoveToFirst(SMemSkipListCurosr *pSlc); -static void tsdbMemSkipListCursorMoveToLast(SMemSkipListCurosr *pSlc); -static int32_t tsdbMemSkipListCursorMoveToNext(SMemSkipListCurosr *pSlc); -static int32_t tsdbMemSkipListCursorMoveToPrev(SMemSkipListCurosr *pSlc); -static SMemSkipListNode *tsdbMemSkipListNodeCreate(SVBufPool *pPool, SMemSkipList *pSl, const STsdbRow *pTRow); - -// SMemTable -int32_t tsdbMemTableCreate2(STsdb *pTsdb, SMemTable **ppMemTb) { - SMemTable *pMemTb = NULL; - - pMemTb = taosMemoryCalloc(1, sizeof(*pMemTb)); - if (pMemTb == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; +static void memDataMovePos(SMemData *pMemData, TSDBROW *pRow, int8_t isForward, SMemSkipListNode **pos); +static int32_t memDataPutRow(SVBufPool *pPool, SMemData *pMemData, TSDBROW *pRow, int8_t isForward, + SMemSkipListNode **pos); + +// SMemTable ============================================== +int32_t tsdbMemTableCreate2(STsdb *pTsdb, SMemTable **ppMemTable) { + int32_t code = 0; + SMemTable *pMemTable = NULL; + + pMemTable = (SMemTable *)taosMemoryCalloc(1, sizeof(*pMemTable)); + if (pMemTable == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; } - - pMemTb->pTsdb = pTsdb; - pMemTb->minKey = TSKEY_MAX; - pMemTb->maxKey = TSKEY_MIN; - pMemTb->minVer = -1; - pMemTb->maxVer = -1; - pMemTb->nRows = 0; - pMemTb->nHash = 0; - pMemTb->nBucket = 1024; - pMemTb->pBuckets = taosMemoryCalloc(pMemTb->nBucket, sizeof(*pMemTb->pBuckets)); - if (pMemTb->pBuckets == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - taosMemoryFree(pMemTb); - return -1; - } - if (tsdbMemSkipListCursorCreate(pTsdb->pVnode->config.tsdbCfg.slLevel, &pMemTb->pSlc) < 0) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - taosMemoryFree(pMemTb->pBuckets); - taosMemoryFree(pMemTb); + pMemTable->pTsdb = pTsdb; + pMemTable->nRef = 1; + pMemTable->minKey = (TSDBKEY){.version = INT64_MAX, .ts = TSKEY_MAX}; + pMemTable->maxKey = (TSDBKEY){.version = -1, .ts = TSKEY_MIN}; + pMemTable->nRows = 0; + pMemTable->pArray = taosArrayInit(512, sizeof(SMemData *)); + if (pMemTable->pArray == NULL) { + taosMemoryFree(pMemTable); + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; } - *ppMemTb = pMemTb; - return 0; + *ppMemTable = pMemTable; + return code; + +_err: + *ppMemTable = NULL; + return code; } -int32_t tsdbMemTableDestroy2(STsdb *pTsdb, SMemTable *pMemTb) { - if (pMemTb) { - // loop to destroy the contents (todo) - tsdbMemSkipListCursorDestroy(pMemTb->pSlc); - taosMemoryFree(pMemTb->pBuckets); - taosMemoryFree(pMemTb); - } - return 0; +void tsdbMemTableDestroy2(SMemTable *pMemTable) { + taosArrayDestroyEx(pMemTable->pArray, NULL /*TODO*/); + taosMemoryFree(pMemTable); } -int32_t tsdbInsertData2(SMemTable *pMemTb, int64_t version, const SVSubmitBlk *pSubmitBlk) { +int32_t tsdbInsertTableData2(STsdb *pTsdb, int64_t version, SVSubmitBlk *pSubmitBlk) { + int32_t code = 0; + SMemTable *pMemTable = (SMemTable *)pTsdb->mem; // TODO SMemData *pMemData; - STsdb *pTsdb = pMemTb->pTsdb; - SVnode *pVnode = pTsdb->pVnode; - SVBufPool *pPool = pVnode->inUse; - tb_uid_t suid = pSubmitBlk->suid; - tb_uid_t uid = pSubmitBlk->uid; - int32_t iBucket; - - // search SMemData by hash - iBucket = HASH_BUCKET(suid, uid, pMemTb->nBucket); - for (pMemData = pMemTb->pBuckets[iBucket]; pMemData; pMemData = pMemData->pHashNext) { - if (pMemData->suid == suid && pMemData->uid == uid) break; - } + TSDBROW row = {.version = version}; - // create pMemData if need - if (pMemData == NULL) { - int8_t maxLevel = pVnode->config.tsdbCfg.slLevel; - int32_t tsize = sizeof(*pMemData) + SL_NODE_HALF_SIZE(maxLevel) * 2; - SMemSkipListNode *pHead, *pTail; - - pMemData = vnodeBufPoolMalloc(pPool, tsize); - if (pMemData == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } + ASSERT(pMemTable); + ASSERT(pSubmitBlk->nData > 0); - pMemData->pHashNext = NULL; - pMemData->suid = suid; - pMemData->uid = uid; - pMemData->minKey = TSKEY_MAX; - pMemData->maxKey = TSKEY_MIN; - pMemData->minVer = -1; - pMemData->maxVer = -1; - pMemData->nRows = 0; - pMemData->sl.seed = taosRand(); - pMemData->sl.maxLevel = maxLevel; - pMemData->sl.level = 0; - pMemData->sl.size = 0; - pHead = SL_HEAD_NODE(&pMemData->sl); - pTail = SL_TAIL_NODE(&pMemData->sl); - pHead->level = maxLevel; - pTail->level = maxLevel; - for (int iLevel = 0; iLevel < maxLevel; iLevel++) { - SL_HEAD_NODE_FORWARD(pHead, iLevel) = pTail; - SL_TAIL_NODE_BACKWARD(pTail, iLevel) = pHead; - } + { + // check if table exists (todo) + } - // add to hash - if (pMemTb->nHash >= pMemTb->nBucket) { - // rehash (todo) - } - iBucket = HASH_BUCKET(suid, uid, pMemTb->nBucket); - pMemData->pHashNext = pMemTb->pBuckets[iBucket]; - pMemTb->pBuckets[iBucket] = pMemData; - pMemTb->nHash++; + code = tsdbGetOrCreateMemData(pMemTable, pSubmitBlk->suid, pSubmitBlk->uid, &pMemData); + if (code) { + tsdbError("vgId:%d, failed to create/get table data since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); + goto _err; + } + + // do insert + int32_t nt; + int32_t n = 0; + uint8_t *p = pSubmitBlk->pData; + int32_t nRow = 0; + SMemSkipListNode *pos[SL_MAX_LEVEL] = {0}; - // sort organize (todo) + for (int8_t iLevel = 0; iLevel < SL_MAX_LEVEL; iLevel++) { + pos[iLevel] = pMemData->sl.pTail; } + while (n < pSubmitBlk->nData) { + nt = tGetTSRow(p + n, &row.tsRow); + n += nt; - // do insert data to SMemData - SMemSkipListNode *forwards[SL_MAX_LEVEL]; - SMemSkipListNode *pNode; - int32_t iRow; - STsdbRow tRow = {.version = version}; - SEncoder ec = {0}; - SDecoder dc = {0}; - - tDecoderInit(&dc, pSubmitBlk->pData, pSubmitBlk->nData); - tsdbMemSkipListCursorInit(pMemTb->pSlc, &pMemData->sl); - for (iRow = 0;; iRow++) { - if (tDecodeIsEnd(&dc)) break; - - // decode row - if (tDecodeBinary(&dc, (uint8_t **)&tRow.pRow, &tRow.szRow) < 0) { - terrno = TSDB_CODE_INVALID_MSG; - return -1; + ASSERT(n <= pSubmitBlk->nData); + + memDataMovePos(pMemData, &row, nRow ? 1 : 0, pos); + code = memDataPutRow(pTsdb->pVnode->inUse, pMemData, &row, nRow ? 1 : 0, pos); + if (code) { + goto _err; } - // move cursor - tsdbMemSkipListCursorMoveTo(pMemTb->pSlc, version, tRow.pRow->ts, 0); + nRow++; + } - // encode row - pNode = tsdbMemSkipListNodeCreate(pPool, &pMemData->sl, &tRow); - if (pNode == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } + return code; - // put the node - tsdbMemSkipListCursorPut(pMemTb->pSlc, pNode); +_err: + return code; +} - // update status - if (tRow.pRow->ts < pMemData->minKey) pMemData->minKey = tRow.pRow->ts; - if (tRow.pRow->ts > pMemData->maxKey) pMemData->maxKey = tRow.pRow->ts; - } - tDecoderClear(&dc); +int32_t tsdbDeleteTableData2(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey) { + int32_t code = 0; + SMemTable *pMemTable = (SMemTable *)pTsdb->mem; // TODO + SMemData *pMemData; + SVBufPool *pPool = pTsdb->pVnode->inUse; - // update status - if (pMemData->minVer == -1) pMemData->minVer = version; - if (pMemData->maxVer == -1 || pMemData->maxVer < version) pMemData->maxVer = version; + ASSERT(pMemTable); - if (pMemTb->minKey < pMemData->minKey) pMemTb->minKey = pMemData->minKey; - if (pMemTb->maxKey < pMemData->maxKey) pMemTb->maxKey = pMemData->maxKey; - if (pMemTb->minVer == -1) pMemTb->minVer = version; - if (pMemTb->maxVer == -1 || pMemTb->maxVer < version) pMemTb->maxVer = version; + { + // check if table exists (todo) + } - return 0; -} + code = tsdbGetOrCreateMemData(pMemTable, suid, uid, &pMemData); + if (code) { + goto _err; + } -static FORCE_INLINE int8_t tsdbMemSkipListRandLevel(SMemSkipList *pSl) { - int8_t level = 1; - int8_t tlevel = TMIN(pSl->maxLevel, pSl->level + 1); - const uint32_t factor = 4; + // do delete + SDelOp *pDelOp = (SDelOp *)vnodeBufPoolMalloc(pPool, sizeof(*pDelOp)); + if (pDelOp == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + pDelOp->version = version; + pDelOp->sKey = sKey; + pDelOp->eKey = eKey; + pDelOp->pNext = NULL; + if (pMemData->delOpHead == NULL) { + ASSERT(pMemData->delOpTail == NULL); + pMemData->delOpHead = pMemData->delOpTail = pDelOp; + } else { + pMemData->delOpTail->pNext = pDelOp; + pMemData->delOpTail = pDelOp; + } - while ((taosRandR(&pSl->seed) % factor) == 0 && level < tlevel) { - level++; + { + // update the state of pMemTable, pMemData, last and lastrow (todo) } - return level; -} + tsdbDebug("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " sKey:%" PRId64 " eKey:%" PRId64 + " since %s", + TD_VID(pTsdb->pVnode), suid, uid, sKey, eKey, tstrerror(code)); + return code; -static FORCE_INLINE int32_t tsdbEncodeRow(SEncoder *pEncoder, const STsdbRow *pRow) { - if (tEncodeI64(pEncoder, pRow->version) < 0) return -1; - if (tEncodeBinary(pEncoder, (const uint8_t *)pRow->pRow, pRow->szRow) < 0) return -1; - return 0; +_err: + tsdbError("vgId:%d, failed to delete data from table suid:%" PRId64 " uid:%" PRId64 " sKey:%" PRId64 " eKey:%" PRId64 + " since %s", + TD_VID(pTsdb->pVnode), suid, uid, sKey, eKey, tstrerror(code)); + return code; } -static FORCE_INLINE int32_t tsdbDecodeRow(SDecoder *pDecoder, STsdbRow *pRow) { - if (tDecodeI64(pDecoder, &pRow->version) < 0) return -1; - if (tDecodeBinary(pDecoder, (uint8_t **)&pRow->pRow, &pRow->szRow) < 0) return -1; - return 0; -} +static int32_t tsdbGetOrCreateMemData(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid, SMemData **ppMemData) { + int32_t code = 0; + int32_t idx = 0; + SMemData *pMemDataT = &(SMemData){.suid = suid, .uid = uid}; + SMemData *pMemData = NULL; + SVBufPool *pPool = pMemTable->pTsdb->pVnode->inUse; + int8_t maxLevel = pMemTable->pTsdb->pVnode->config.tsdbCfg.slLevel; + + // get + idx = taosArraySearchIdx(pMemTable->pArray, &pMemDataT, memDataPCmprFn, TD_GE); + if (idx >= 0) { + pMemData = (SMemData *)taosArrayGet(pMemTable->pArray, idx); + if (memDataPCmprFn(&pMemDataT, &pMemData) == 0) goto _exit; + } -static int32_t tsdbMemSkipListCursorCreate(int8_t maxLevel, SMemSkipListCurosr **ppSlc) { - *ppSlc = (SMemSkipListCurosr *)taosMemoryCalloc(1, sizeof(**ppSlc) + sizeof(SMemSkipListNode *) * maxLevel); - if (*ppSlc == NULL) { - return -1; + // create + pMemData = vnodeBufPoolMalloc(pPool, sizeof(*pMemData) + SL_NODE_SIZE(maxLevel) * 2); + if (pMemData == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + pMemData->suid = suid; + pMemData->uid = uid; + pMemData->minKey = (TSDBKEY){.version = INT64_MAX, .ts = TSKEY_MAX}; + pMemData->maxKey = (TSDBKEY){.version = -1, .ts = TSKEY_MIN}; + pMemData->delOpHead = pMemData->delOpTail = NULL; + pMemData->sl.seed = taosRand(); + pMemData->sl.size = 0; + pMemData->sl.maxLevel = maxLevel; + pMemData->sl.level = 0; + pMemData->sl.pHead = (SMemSkipListNode *)&pMemData[1]; + pMemData->sl.pTail = (SMemSkipListNode *)POINTER_SHIFT(pMemData->sl.pHead, SL_NODE_SIZE(maxLevel)); + pMemData->sl.pHead->level = maxLevel; + pMemData->sl.pTail->level = maxLevel; + + for (int8_t iLevel = 0; iLevel < pMemData->sl.maxLevel; iLevel++) { + SL_NODE_FORWARD(pMemData->sl.pHead, iLevel) = pMemData->sl.pTail; + SL_NODE_BACKWARD(pMemData->sl.pHead, iLevel) = NULL; + SL_NODE_BACKWARD(pMemData->sl.pTail, iLevel) = pMemData->sl.pHead; + SL_NODE_FORWARD(pMemData->sl.pTail, iLevel) = NULL; } - return 0; -} -static void tsdbMemSkipListCursorDestroy(SMemSkipListCurosr *pSlc) { taosMemoryFree(pSlc); } + if (idx < 0) idx = 0; + if (taosArrayInsert(pMemTable->pArray, idx, &pMemData) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } -static void tsdbMemSkipListCursorInit(SMemSkipListCurosr *pSlc, SMemSkipList *pSl) { - SMemSkipListNode *pHead = SL_HEAD_NODE(pSl); - pSlc->pSl = pSl; - // for (int8_t iLevel = 0; iLevel < pSl->maxLevel; iLevel++) { - // pSlc->forwards[iLevel] = pHead; - // } -} +_exit: + *ppMemData = pMemData; + return code; -static void tsdbMemSkipListCursorPut(SMemSkipListCurosr *pSlc, SMemSkipListNode *pNode) { - SMemSkipList *pSl = pSlc->pSl; - SMemSkipListNode *pNodeNext; +_err: + *ppMemData = NULL; + return code; +} - for (int8_t iLevel = 0; iLevel < pNode->level; iLevel++) { - // todo +static int memDataPCmprFn(const void *p1, const void *p2) { + SMemData *pMemData1 = *(SMemData **)p1; + SMemData *pMemData2 = *(SMemData **)p2; - ASSERT(0); + if (pMemData1->suid < pMemData2->suid) { + return -1; + } else if (pMemData1->suid > pMemData2->suid) { + return 1; } - if (pSl->level < pNode->level) { - pSl->level = pNode->level; + if (pMemData1->uid < pMemData2->uid) { + return -1; + } else if (pMemData1->uid > pMemData2->uid) { + return 1; } - pSl->size += 1; + return 0; } -static int32_t tsdbMemSkipListCursorMoveTo(SMemSkipListCurosr *pSlc, int64_t version, TSKEY ts, int32_t flags) { - SMemSkipListNode **pForwards = NULL; - SMemSkipList *pSl = pSlc->pSl; - int8_t maxLevel = pSl->maxLevel; - SMemSkipListNode *pHead = SL_HEAD_NODE(pSl); - SMemSkipListNode *pTail = SL_TAIL_NODE(pSl); +static int32_t tPutTSDBRow(uint8_t *p, TSDBROW *pRow) { + int32_t n = 0; - if (pSl->size == 0) { - for (int8_t iLevel = 0; iLevel < pSl->maxLevel; iLevel++) { - pForwards[iLevel] = pHead; - } - } + n += tPutI64(p ? p + n : p, pRow->version); + n += tPutTSRow(p ? p + n : p, &pRow->tsRow); - return 0; + return n; } -static void tsdbMemSkipListCursorMoveToFirst(SMemSkipListCurosr *pSlc) { - SMemSkipList *pSl = pSlc->pSl; - SMemSkipListNode *pHead = SL_HEAD_NODE(pSl); +static int32_t tGetTSDBRow(uint8_t *p, TSDBROW *pRow) { + int32_t n = 0; - for (int8_t iLevel = 0; iLevel < pSl->maxLevel; iLevel++) { - pSlc->pNodes[iLevel] = pHead; - } + n += tGetI64(p + n, &pRow->version); + n += tGetTSRow(p + n, &pRow->tsRow); - tsdbMemSkipListCursorMoveToNext(pSlc); + return n; } -static void tsdbMemSkipListCursorMoveToLast(SMemSkipListCurosr *pSlc) { - SMemSkipList *pSl = pSlc->pSl; - SMemSkipListNode *pTail = SL_TAIL_NODE(pSl); +static FORCE_INLINE int8_t tsdbMemSkipListRandLevel(SMemSkipList *pSl) { + int8_t level = 1; + int8_t tlevel = TMIN(pSl->maxLevel, pSl->level + 1); + const uint32_t factor = 4; - for (int8_t iLevel = 0; iLevel < pSl->maxLevel; iLevel++) { - pSlc->pNodes[iLevel] = pTail; + while ((taosRandR(&pSl->seed) % factor) == 0 && level < tlevel) { + level++; } - tsdbMemSkipListCursorMoveToPrev(pSlc); + return level; } -static int32_t tsdbMemSkipListCursorMoveToNext(SMemSkipListCurosr *pSlc) { - // TODO - return 0; +static void memDataMovePos(SMemData *pMemData, TSDBROW *pRow, int8_t isForward, SMemSkipListNode **pos) { + TSDBKEY *pKey; + int c; + + if (isForward) { + // TODO + } else { + SMemSkipListNode *px = pMemData->sl.pTail; + + for (int8_t iLevel = pMemData->sl.maxLevel - 1; iLevel >= 0; iLevel--) { + if (iLevel < pMemData->sl.level) { + SMemSkipListNode *p = SL_NODE_BACKWARD(px, iLevel); + + while (p != pMemData->sl.pHead) { + pKey = (TSDBKEY *)SL_NODE_DATA(p); + + c = tsdbKeyCmprFn(pKey, pRow); + if (c <= 0) { + break; + } else { + px = p; + p = SL_NODE_BACKWARD(px, iLevel); + } + } + + pos[iLevel] = px; + } + } + } } -static int32_t tsdbMemSkipListCursorMoveToPrev(SMemSkipListCurosr *pSlc) { - // TODO - return 0; +static void memMovePosFrom(SMemData *pMemData, SMemSkipListNode *pNode, TSDBROW *pRow, int8_t isForward, + SMemSkipListNode **pos) { + SMemSkipListNode *px = pNode; + TSDBKEY *pKey; + SMemSkipListNode *p; + int c; + + if (isForward) { + } else { + ASSERT(pNode != pMemData->sl.pHead); + + for (int8_t iLevel = pMemData->sl.maxLevel - 1; iLevel >= 0; iLevel--) { + p = SL_NODE_BACKWARD(px, iLevel); + while (p != pMemData->sl.pHead) { + pKey = (TSDBKEY *)SL_NODE_DATA(p); + + c = tsdbKeyCmprFn(pKey, pRow); + if (c <= 0) { + break; + } else { + px = p; + p = SL_NODE_BACKWARD(px, iLevel); + } + } + + pos[iLevel] = px; + } + } } -static SMemSkipListNode *tsdbMemSkipListNodeCreate(SVBufPool *pPool, SMemSkipList *pSl, const STsdbRow *pTRow) { - int32_t tsize; - int32_t ret; - int8_t level = tsdbMemSkipListRandLevel(pSl); - SMemSkipListNode *pNode = NULL; - SEncoder ec = {0}; - - tEncodeSize(tsdbEncodeRow, pTRow, tsize, ret); - pNode = vnodeBufPoolMalloc(pPool, tsize + SL_NODE_SIZE(level)); - if (pNode) { - pNode->level = level; - tEncoderInit(&ec, (uint8_t *)SL_NODE_DATA(pNode), tsize); - tsdbEncodeRow(&ec, pTRow); - tEncoderClear(&ec); +static int32_t memDataPutRow(SVBufPool *pPool, SMemData *pMemData, TSDBROW *pRow, int8_t isForward, + SMemSkipListNode **pos) { + int32_t code = 0; + int8_t level; + SMemSkipListNode *pNode; + + level = tsdbMemSkipListRandLevel(&pMemData->sl); + pNode = (SMemSkipListNode *)vnodeBufPoolMalloc(pPool, SL_NODE_SIZE(level) + tPutTSDBRow(NULL, pRow)); + if (pNode == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + + // do the read put + if (isForward) { + // TODO + } else { + // TODO } - return pNode; +_exit: + return code; } \ No newline at end of file diff --git a/source/dnode/vnode/src/tsdb/tsdbOpen.c b/source/dnode/vnode/src/tsdb/tsdbOpen.c index fa54c811ffc158339fda4b34cad47ba7c4f2fdac..943263e1a3c65ebf980b353e6a1b69ba52868a22 100644 --- a/source/dnode/vnode/src/tsdb/tsdbOpen.c +++ b/source/dnode/vnode/src/tsdb/tsdbOpen.c @@ -74,7 +74,7 @@ int tsdbOpen(SVnode *pVnode, STsdb **ppTsdb, const char *dir, STsdbKeepCfg *pKee goto _err; } - tsdbDebug("vgId:%d tsdb is opened for %s, days:%d, keep:%d,%d,%d", TD_VID(pVnode), pTsdb->path, pTsdb->keepCfg.days, + tsdbDebug("vgId:%d, tsdb is opened for %s, days:%d, keep:%d,%d,%d", TD_VID(pVnode), pTsdb->path, pTsdb->keepCfg.days, pTsdb->keepCfg.keep0, pTsdb->keepCfg.keep1, pTsdb->keepCfg.keep2); *ppTsdb = pTsdb; @@ -99,7 +99,7 @@ int tsdbClose(STsdb **pTsdb) { int tsdbLockRepo(STsdb *pTsdb) { int code = taosThreadMutexLock(&pTsdb->mutex); if (code != 0) { - tsdbError("vgId:%d failed to lock tsdb since %s", REPO_ID(pTsdb), strerror(errno)); + tsdbError("vgId:%d, failed to lock tsdb since %s", REPO_ID(pTsdb), strerror(errno)); terrno = TAOS_SYSTEM_ERROR(code); return -1; } @@ -112,7 +112,7 @@ int tsdbUnlockRepo(STsdb *pTsdb) { pTsdb->repoLocked = false; int code = taosThreadMutexUnlock(&pTsdb->mutex); if (code != 0) { - tsdbError("vgId:%d failed to unlock tsdb since %s", REPO_ID(pTsdb), strerror(errno)); + tsdbError("vgId:%d, failed to unlock tsdb since %s", REPO_ID(pTsdb), strerror(errno)); terrno = TAOS_SYSTEM_ERROR(code); return -1; } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index c0b97f7536963d28045d8391273ffcf0a1b15876..5f2ea80078fa676ab29752ec33dd07883e3e7802 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -14,6 +14,7 @@ */ #include "tsdb.h" +#include "vnode.h" #define EXTRA_BYTES 2 #define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC) @@ -140,12 +141,6 @@ typedef struct STsdbReadHandle { STSchema* pSchema; } STsdbReadHandle; -typedef struct STableGroupSupporter { - int32_t numOfCols; - SColIndex* pCols; - SSchema* pTagSchema; -} STableGroupSupporter; - static STimeWindow updateLastrowForEachGroup(STableListInfo* pList); static int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableListInfo* pList); static int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle); @@ -211,12 +206,6 @@ int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT* pHandle) { return rows; } - // STableData* pMem = NULL; - // STableData* pIMem = NULL; - - // SMemTable* pMemT = pMemRef->snapshot.mem; - // SMemTable* pIMemT = pMemRef->snapshot.imem; - size_t size = taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo); for (int32_t i = 0; i < size; ++i) { STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, i); @@ -259,8 +248,8 @@ static SArray* createCheckInfoFromTableGroup(STsdbReadHandle* pTsdbReadHandle, S } taosArrayPush(pTableCheckInfo, &info); - tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId, - info.lastKey, pTsdbReadHandle->idStr); + tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId, info.lastKey, + pTsdbReadHandle->idStr); } // TODO group table according to the tag value. @@ -317,34 +306,34 @@ static int64_t getEarliestValidTimestamp(STsdb* pTsdb) { return now - (tsTickPerMin[pCfg->precision] * pCfg->keep2) + 1; // needs to add one tick } -static void setQueryTimewindow(STsdbReadHandle* pTsdbReadHandle, SQueryTableDataCond* pCond) { - pTsdbReadHandle->window = pCond->twindow; +static void setQueryTimewindow(STsdbReadHandle* pTsdbReadHandle, SQueryTableDataCond* pCond, int32_t tWinIdx) { + pTsdbReadHandle->window = pCond->twindows[tWinIdx]; bool updateTs = false; int64_t startTs = getEarliestValidTimestamp(pTsdbReadHandle->pTsdb); if (ASCENDING_TRAVERSE(pTsdbReadHandle->order)) { if (startTs > pTsdbReadHandle->window.skey) { pTsdbReadHandle->window.skey = startTs; - pCond->twindow.skey = startTs; + pCond->twindows[tWinIdx].skey = startTs; updateTs = true; } } else { if (startTs > pTsdbReadHandle->window.ekey) { pTsdbReadHandle->window.ekey = startTs; - pCond->twindow.ekey = startTs; + pCond->twindows[tWinIdx].ekey = startTs; updateTs = true; } } if (updateTs) { tsdbDebug("%p update the query time window, old:%" PRId64 " - %" PRId64 ", new:%" PRId64 " - %" PRId64 ", %s", - pTsdbReadHandle, pCond->twindow.skey, pCond->twindow.ekey, pTsdbReadHandle->window.skey, - pTsdbReadHandle->window.ekey, pTsdbReadHandle->idStr); + pTsdbReadHandle, pCond->twindows[tWinIdx].skey, pCond->twindows[tWinIdx].ekey, + pTsdbReadHandle->window.skey, pTsdbReadHandle->window.ekey, pTsdbReadHandle->idStr); } } static STsdb* getTsdbByRetentions(SVnode* pVnode, STsdbReadHandle* pReadHandle, TSKEY winSKey, SRetention* retentions) { - if (vnodeIsRollup(pVnode)) { + if (VND_IS_RSMA(pVnode)) { int level = 0; int64_t now = taosGetTimestamp(pVnode->config.tsdbCfg.precision); @@ -363,13 +352,16 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, STsdbReadHandle* pReadHandle, } if (level == TSDB_RETENTION_L0) { - tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L0); + tsdbDebug("vgId:%d, read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, + TSDB_RETENTION_L0); return VND_RSMA0(pVnode); } else if (level == TSDB_RETENTION_L1) { - tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L1); + tsdbDebug("vgId:%d, read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, + TSDB_RETENTION_L1); return VND_RSMA1(pVnode); } else { - tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L2); + tsdbDebug("vgId:%d, read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, + TSDB_RETENTION_L2); return VND_RSMA2(pVnode); } } @@ -382,7 +374,7 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond* goto _end; } - STsdb* pTsdb = getTsdbByRetentions(pVnode, pReadHandle, pCond->twindow.skey, pVnode->config.tsdbCfg.retentions); + STsdb* pTsdb = getTsdbByRetentions(pVnode, pReadHandle, pCond->twindows[0].skey, pVnode->config.tsdbCfg.retentions); pReadHandle->order = pCond->order; pReadHandle->pTsdb = pTsdb; @@ -408,11 +400,11 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond* } assert(pCond != NULL); - setQueryTimewindow(pReadHandle, pCond); + setQueryTimewindow(pReadHandle, pCond, 0); if (pCond->numOfCols > 0) { int32_t rowLen = 0; - for(int32_t i = 0; i < pCond->numOfCols; ++i) { + for (int32_t i = 0; i < pCond->numOfCols; ++i) { rowLen += pCond->colList[i].bytes; } @@ -447,10 +439,10 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond* } pReadHandle->suppInfo.defaultLoadColumn = getDefaultLoadColumns(pReadHandle, true); - pReadHandle->suppInfo.slotIds = - taosMemoryMalloc(sizeof(int32_t) * taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn)); - pReadHandle->suppInfo.plist = - taosMemoryCalloc(taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn), POINTER_BYTES); + + size_t size = taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn); + pReadHandle->suppInfo.slotIds = taosMemoryCalloc(size, sizeof(int32_t)); + pReadHandle->suppInfo.plist = taosMemoryCalloc(size, POINTER_BYTES); } pReadHandle->pDataCols = tdNewDataCols(1000, pVnode->config.tsdbCfg.maxRows); @@ -471,6 +463,39 @@ _end: return NULL; } +static int32_t setCurrentSchema(SVnode* pVnode, STsdbReadHandle* pTsdbReadHandle) { + STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, 0); + + int32_t sversion = 1; + + SMetaReader mr = {0}; + metaReaderInit(&mr, pVnode->pMeta, 0); + int32_t code = metaGetTableEntryByUid(&mr, pCheckInfo->tableId); + if (code != TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + metaReaderClear(&mr); + return terrno; + } + + if (mr.me.type == TSDB_CHILD_TABLE) { + tb_uid_t suid = mr.me.ctbEntry.suid; + code = metaGetTableEntryByUid(&mr, suid); + if (code != TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + metaReaderClear(&mr); + return terrno; + } + sversion = mr.me.stbEntry.schemaRow.version; + } else { + ASSERT(mr.me.type == TSDB_NORMAL_TABLE); + sversion = mr.me.ntbEntry.schemaRow.version; + } + + metaReaderClear(&mr); + pTsdbReadHandle->pSchema = metaGetTbTSchema(pVnode->pMeta, pCheckInfo->tableId, sversion); + return TSDB_CODE_SUCCESS; +} + tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId, uint64_t taskId) { STsdbReadHandle* pTsdbReadHandle = tsdbQueryTablesImpl(pVnode, pCond, qId, taskId); @@ -490,9 +515,12 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableL return NULL; } - STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, 0); + int32_t code = setCurrentSchema(pVnode, pTsdbReadHandle); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return NULL; + } - pTsdbReadHandle->pSchema = metaGetTbTSchema(pVnode->pMeta, pCheckInfo->tableId, 1); int32_t numOfCols = taosArrayGetSize(pTsdbReadHandle->suppInfo.defaultLoadColumn); int16_t* ids = pTsdbReadHandle->suppInfo.defaultLoadColumn->pData; @@ -520,7 +548,7 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableL return (tsdbReaderT)pTsdbReadHandle; } -void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) { +void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, int32_t tWinIdx) { STsdbReadHandle* pTsdbReadHandle = queryHandle; if (emptyQueryTimewindow(pTsdbReadHandle)) { @@ -533,7 +561,7 @@ void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) { } pTsdbReadHandle->order = pCond->order; - pTsdbReadHandle->window = pCond->twindow; + setQueryTimewindow(pTsdbReadHandle, pCond, tWinIdx); pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL; pTsdbReadHandle->cur.fid = -1; pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER; @@ -558,11 +586,12 @@ void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) { resetCheckInfo(pTsdbReadHandle); } -void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, STableListInfo* tableList) { +void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, STableListInfo* tableList, + int32_t tWinIdx) { STsdbReadHandle* pTsdbReadHandle = queryHandle; pTsdbReadHandle->order = pCond->order; - pTsdbReadHandle->window = pCond->twindow; + pTsdbReadHandle->window = pCond->twindows[tWinIdx]; pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL; pTsdbReadHandle->cur.fid = -1; pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER; @@ -602,7 +631,7 @@ void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCon tsdbReaderT tsdbQueryLastRow(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* pList, uint64_t qId, uint64_t taskId) { - pCond->twindow = updateLastrowForEachGroup(pList); + pCond->twindows[0] = updateLastrowForEachGroup(pList); // no qualified table if (taosArrayGetSize(pList->pTableList) == 0) { @@ -620,7 +649,7 @@ tsdbReaderT tsdbQueryLastRow(SVnode* pVnode, SQueryTableDataCond* pCond, STableL return NULL; } - assert(pCond->order == TSDB_ORDER_ASC && pCond->twindow.skey <= pCond->twindow.ekey); + assert(pCond->order == TSDB_ORDER_ASC && pCond->twindows[0].skey <= pCond->twindows[0].ekey); if (pTsdbReadHandle->cachelastrow) { pTsdbReadHandle->type = TSDB_QUERY_TYPE_LAST; } @@ -660,7 +689,7 @@ SArray* tsdbGetQueriedTableList(tsdbReaderT* pHandle) { } // leave only one table for each group -//static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) { +// static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) { // assert(pGroupList); // size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList); // @@ -692,7 +721,7 @@ SArray* tsdbGetQueriedTableList(tsdbReaderT* pHandle) { // return pNew; //} -//tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, +// tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, // uint64_t qId, uint64_t taskId) { // STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); // @@ -1299,7 +1328,6 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* if ((ascScan && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) || (!ascScan && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) { - bool cacheDataInFileBlockHole = (ascScan && (key != TSKEY_INITIAL_VAL && key < binfo.window.skey)) || (!ascScan && (key != TSKEY_INITIAL_VAL && key > binfo.window.ekey)); if (cacheDataInFileBlockHole) { @@ -1342,7 +1370,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* pTsdbReadHandle->realNumOfRows = binfo.rows; cur->rows = binfo.rows; - cur->win = binfo.window; + cur->win = binfo.window; cur->mixBlock = false; cur->blockCompleted = true; @@ -1353,9 +1381,9 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* cur->lastKey = binfo.window.skey - 1; cur->pos = -1; } - } else { // partially copy to dest buffer + } else { // partially copy to dest buffer // make sure to only load once - bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows -1 && (!ascScan))); + bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows - 1 && (!ascScan))); if (pTsdbReadHandle->outputCapacity < binfo.rows && firstTimeExtract) { code = doLoadFileDataBlock(pTsdbReadHandle, pBlock, pCheckInfo, cur->slot); if (code != TSDB_CODE_SUCCESS) { @@ -1368,7 +1396,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* } if (pTsdbReadHandle->outputCapacity >= binfo.rows) { - ASSERT(cur->blockCompleted); + ASSERT(cur->blockCompleted || cur->mixBlock); } if (cur->rows == binfo.rows) { @@ -1864,7 +1892,7 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); - int32_t step = ascScan? 1 : -1; + int32_t step = ascScan ? 1 : -1; int32_t start = cur->pos; int32_t end = endPos; @@ -1879,8 +1907,8 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa // the time window should always be ascending order: skey <= ekey cur->win = (STimeWindow){.skey = tsArray[start], .ekey = tsArray[end]}; cur->mixBlock = (numOfRows != pBlockInfo->rows); - cur->lastKey = tsArray[endPos] + step; - cur->blockCompleted = (ascScan? (endPos == pBlockInfo->rows - 1):(endPos == 0)); + cur->lastKey = tsArray[endPos] + step; + cur->blockCompleted = (ascScan ? (endPos == pBlockInfo->rows - 1) : (endPos == 0)); // The value of pos may be -1 or pBlockInfo->rows, and it is invalid in both cases. int32_t pos = endPos + step; @@ -1896,7 +1924,7 @@ int32_t getEndPosInDataBlock(STsdbReadHandle* pTsdbReadHandle, SDataBlockInfo* p // NOTE: reverse the order to find the end position in data block int32_t endPos = -1; bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); - int32_t order = ascScan? TSDB_ORDER_DESC : TSDB_ORDER_ASC; + int32_t order = ascScan ? TSDB_ORDER_DESC : TSDB_ORDER_ASC; SQueryFilePos* cur = &pTsdbReadHandle->cur; SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0]; @@ -1956,7 +1984,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf assert(pCols->numOfRows == pBlock->numOfRows && tsArray[0] == pBlock->keyFirst && tsArray[pBlock->numOfRows - 1] == pBlock->keyLast); - bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); + bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); int32_t step = ascScan ? 1 : -1; // for search the endPos, so the order needs to reverse @@ -1967,8 +1995,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf STimeWindow* pWin = &blockInfo.window; tsdbDebug("%p uid:%" PRIu64 " start merge data block, file block range:%" PRIu64 "-%" PRIu64 - " rows:%d, start:%d, end:%d, %s", pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows, - cur->pos, endPos, pTsdbReadHandle->idStr); + " rows:%d, start:%d, end:%d, %s", + pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows, cur->pos, endPos, + pTsdbReadHandle->idStr); // compared with the data from in-memory buffer, to generate the correct timestamp array list int32_t numOfRows = 0; @@ -2087,8 +2116,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf } // still assign data into current row - numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, - pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); + numOfRows += + mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, + pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; @@ -2153,8 +2183,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf * if cache is empty, load remain file block data. In contrast, if there are remain data in cache, do NOT * copy them all to result buffer, since it may be overlapped with file data block. */ - if (node == NULL || - ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) || + if (node == NULL || ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) || ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) < pTsdbReadHandle->window.ekey) && !ascScan)) { // no data in cache or data in cache is greater than the ekey of time window, load data from file block if (cur->win.skey == TSKEY_INITIAL_VAL) { @@ -2175,7 +2204,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf } cur->blockCompleted = (((pos > endPos || cur->lastKey > pTsdbReadHandle->window.ekey) && ascScan) || - ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan)); + ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan)); if (!ascScan) { TSWAP(cur->win.skey, cur->win.ekey); @@ -2794,6 +2823,12 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int return numOfRows; } +void* tsdbGetIdx(SMeta* pMeta) { + if (pMeta == NULL) { + return NULL; + } + return metaGetIdx(pMeta); +} int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) { SMCtbCursor* pCur = metaOpenCtbCursor(pMeta, uid); @@ -2811,6 +2846,22 @@ int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) { return TSDB_CODE_SUCCESS; } +int32_t tsdbGetCtbIdList(SMeta* pMeta, int64_t suid, SArray* list) { + SMCtbCursor* pCur = metaOpenCtbCursor(pMeta, suid); + + while (1) { + tb_uid_t id = metaCtbCursorNext(pCur); + if (id == 0) { + break; + } + + taosArrayPush(list, &id); + } + + metaCloseCtbCursor(pCur); + return TSDB_CODE_SUCCESS; +} + static void destroyHelper(void* param) { if (param == NULL) { return; @@ -3382,65 +3433,65 @@ int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle) { STimeWindow updateLastrowForEachGroup(STableListInfo* pList) { STimeWindow window = {INT64_MAX, INT64_MIN}; -// int32_t totalNumOfTable = 0; -// SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t)); -// -// // NOTE: starts from the buffer in case of descending timestamp order check data blocks -// size_t numOfGroups = taosArrayGetSize(groupList->pGroupList); -// for (int32_t j = 0; j < numOfGroups; ++j) { -// SArray* pGroup = taosArrayGetP(groupList->pGroupList, j); -// TSKEY key = TSKEY_INITIAL_VAL; -// -// STableKeyInfo keyInfo = {0}; -// -// size_t numOfTables = taosArrayGetSize(pGroup); -// for (int32_t i = 0; i < numOfTables; ++i) { -// STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i); -// -// // if the lastKey equals to INT64_MIN, there is no data in this table -// TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey; -// if (key < lastKey) { -// key = lastKey; -// -// // keyInfo.pTable = pInfo->pTable; -// keyInfo.lastKey = key; -// pInfo->lastKey = key; -// -// if (key < window.skey) { -// window.skey = key; -// } -// -// if (key > window.ekey) { -// window.ekey = key; -// } -// } -// } -// -// // more than one table in each group, only one table left for each group -// // if (keyInfo.pTable != NULL) { -// // totalNumOfTable++; -// // if (taosArrayGetSize(pGroup) == 1) { -// // // do nothing -// // } else { -// // taosArrayClear(pGroup); -// // taosArrayPush(pGroup, &keyInfo); -// // } -// // } else { // mark all the empty groups, and remove it later -// // taosArrayDestroy(pGroup); -// // taosArrayPush(emptyGroup, &j); -// // } -// } -// -// // window does not being updated, so set the original -// if (window.skey == INT64_MAX && window.ekey == INT64_MIN) { -// window = TSWINDOW_INITIALIZER; -// assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups); -// } -// -// taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup)); -// taosArrayDestroy(emptyGroup); -// -// groupList->numOfTables = totalNumOfTable; + // int32_t totalNumOfTable = 0; + // SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t)); + // + // // NOTE: starts from the buffer in case of descending timestamp order check data blocks + // size_t numOfGroups = taosArrayGetSize(groupList->pGroupList); + // for (int32_t j = 0; j < numOfGroups; ++j) { + // SArray* pGroup = taosArrayGetP(groupList->pGroupList, j); + // TSKEY key = TSKEY_INITIAL_VAL; + // + // STableKeyInfo keyInfo = {0}; + // + // size_t numOfTables = taosArrayGetSize(pGroup); + // for (int32_t i = 0; i < numOfTables; ++i) { + // STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i); + // + // // if the lastKey equals to INT64_MIN, there is no data in this table + // TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey; + // if (key < lastKey) { + // key = lastKey; + // + // // keyInfo.pTable = pInfo->pTable; + // keyInfo.lastKey = key; + // pInfo->lastKey = key; + // + // if (key < window.skey) { + // window.skey = key; + // } + // + // if (key > window.ekey) { + // window.ekey = key; + // } + // } + // } + // + // // more than one table in each group, only one table left for each group + // // if (keyInfo.pTable != NULL) { + // // totalNumOfTable++; + // // if (taosArrayGetSize(pGroup) == 1) { + // // // do nothing + // // } else { + // // taosArrayClear(pGroup); + // // taosArrayPush(pGroup, &keyInfo); + // // } + // // } else { // mark all the empty groups, and remove it later + // // taosArrayDestroy(pGroup); + // // taosArrayPush(emptyGroup, &j); + // // } + // } + // + // // window does not being updated, so set the original + // if (window.skey == INT64_MAX && window.ekey == INT64_MIN) { + // window = TSWINDOW_INITIALIZER; + // assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups); + // } + // + // taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup)); + // taosArrayDestroy(emptyGroup); + // + // groupList->numOfTables = totalNumOfTable; return window; } @@ -3471,7 +3522,6 @@ void tsdbRetrieveDataBlockInfo(tsdbReaderT* pTsdbReadHandle, SDataBlockInfo* pDa pDataBlockInfo->rows = cur->rows; pDataBlockInfo->window = cur->win; - // ASSERT(pDataBlockInfo->numOfCols >= (int32_t)(QH_GET_NUM_OF_COLS(pHandle)); } /* @@ -3505,7 +3555,7 @@ int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT* pTsdbReadHandle, SColumnDat return TSDB_CODE_SUCCESS; } - tsdbDebug("vgId:%d succeed to load block statis part for uid %" PRIu64, REPO_ID(pHandle->pTsdb), + tsdbDebug("vgId:%d, succeed to load block statis part for uid %" PRIu64, REPO_ID(pHandle->pTsdb), TSDB_READ_TABLE_UID(&pHandle->rhelper)); int16_t* colIds = pHandle->suppInfo.defaultLoadColumn->pData; @@ -3537,9 +3587,9 @@ int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT* pTsdbReadHandle, SColumnDat if (IS_BSMA_ON(&(pHandle->pSchema->columns[slotIds[i]]))) { if (pHandle->suppInfo.pstatis[i].numOfNull == -1) { // set the column data are all NULL pHandle->suppInfo.pstatis[i].numOfNull = pBlockInfo->compBlock->numOfRows; - } else { - pHandle->suppInfo.plist[i] = &pHandle->suppInfo.pstatis[i]; } + + pHandle->suppInfo.plist[i] = &pHandle->suppInfo.pstatis[i]; } else { *allHave = false; } @@ -3588,108 +3638,6 @@ SArray* tsdbRetrieveDataBlock(tsdbReaderT* pTsdbReadHandle, SArray* pIdList) { } } } -#if 0 -void filterPrepare(void* expr, void* param) { - tExprNode* pExpr = (tExprNode*)expr; - if (pExpr->_node.info != NULL) { - return; - } - - pExpr->_node.info = taosMemoryCalloc(1, sizeof(tQueryInfo)); - - STSchema* pTSSchema = (STSchema*) param; - tQueryInfo* pInfo = pExpr->_node.info; - tVariant* pCond = pExpr->_node.pRight->pVal; - SSchema* pSchema = pExpr->_node.pLeft->pSchema; - - pInfo->sch = *pSchema; - pInfo->optr = pExpr->_node.optr; - pInfo->compare = getComparFunc(pInfo->sch.type, pInfo->optr); - pInfo->indexed = pTSSchema->columns->colId == pInfo->sch.colId; - - if (pInfo->optr == TSDB_RELATION_IN) { - int dummy = -1; - SHashObj *pObj = NULL; - if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { - pObj = taosHashInit(256, taosGetDefaultHashFunction(pInfo->sch.type), true, false); - SArray *arr = (SArray *)(pCond->arr); - for (size_t i = 0; i < taosArrayGetSize(arr); i++) { - char* p = taosArrayGetP(arr, i); - strntolower_s(varDataVal(p), varDataVal(p), varDataLen(p)); - taosHashPut(pObj, varDataVal(p), varDataLen(p), &dummy, sizeof(dummy)); - } - } else { - buildFilterSetFromBinary((void **)&pObj, pCond->pz, pCond->nLen); - } - pInfo->q = (char *)pObj; - } else if (pCond != NULL) { - uint32_t size = pCond->nLen * TSDB_NCHAR_SIZE; - if (size < (uint32_t)pSchema->bytes) { - size = pSchema->bytes; - } - // to make sure tonchar does not cause invalid write, since the '\0' needs at least sizeof(TdUcs4) space. - pInfo->q = taosMemoryCalloc(1, size + TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); - tVariantDump(pCond, pInfo->q, pSchema->type, true); - } -} - -#endif - -static int32_t tableGroupComparFn(const void* p1, const void* p2, const void* param) { -#if 0 - STableGroupSupporter* pTableGroupSupp = (STableGroupSupporter*) param; - STable* pTable1 = ((STableKeyInfo*) p1)->uid; - STable* pTable2 = ((STableKeyInfo*) p2)->uid; - - for (int32_t i = 0; i < pTableGroupSupp->numOfCols; ++i) { - SColIndex* pColIndex = &pTableGroupSupp->pCols[i]; - int32_t colIndex = pColIndex->colIndex; - - assert(colIndex >= TSDB_TBNAME_COLUMN_INDEX); - - char * f1 = NULL; - char * f2 = NULL; - int32_t type = 0; - int32_t bytes = 0; - - if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { - f1 = (char*) TABLE_NAME(pTable1); - f2 = (char*) TABLE_NAME(pTable2); - type = TSDB_DATA_TYPE_BINARY; - bytes = tGetTbnameColumnSchema()->bytes; - } else { - if (pTableGroupSupp->pTagSchema && colIndex < pTableGroupSupp->pTagSchema->numOfCols) { - STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex); - bytes = pCol->bytes; - type = pCol->type; - f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId); - f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId); - } - } - - // this tags value may be NULL - if (f1 == NULL && f2 == NULL) { - continue; - } - - if (f1 == NULL) { - return -1; - } - - if (f2 == NULL) { - return 1; - } - - int32_t ret = doCompare(f1, f2, type, bytes); - if (ret == 0) { - continue; - } else { - return ret; - } - } -#endif - return 0; -} static int tsdbCheckInfoCompar(const void* key1, const void* key2) { if (((STableCheckInfo*)key1)->tableId < ((STableCheckInfo*)key2)->tableId) { @@ -3702,170 +3650,6 @@ static int tsdbCheckInfoCompar(const void* key1, const void* key2) { } } -void createTableGroupImpl(SArray* pGroups, SArray* pTableList, size_t numOfTables, TSKEY skey, - STableGroupSupporter* pSupp, __ext_compar_fn_t compareFn) { - STable* pTable = taosArrayGetP(pTableList, 0); - SArray* g = taosArrayInit(16, sizeof(STableKeyInfo)); - - STableKeyInfo info = {.lastKey = skey}; - taosArrayPush(g, &info); - - for (int32_t i = 1; i < numOfTables; ++i) { - STable** prev = taosArrayGet(pTableList, i - 1); - STable** p = taosArrayGet(pTableList, i); - - int32_t ret = compareFn(prev, p, pSupp); - assert(ret == 0 || ret == -1); - - if (ret == 0) { - STableKeyInfo info1 = {.lastKey = skey}; - taosArrayPush(g, &info1); - } else { - taosArrayPush(pGroups, &g); // current group is ended, start a new group - g = taosArrayInit(16, sizeof(STableKeyInfo)); - - STableKeyInfo info1 = {.lastKey = skey}; - taosArrayPush(g, &info1); - } - } - - taosArrayPush(pGroups, &g); -} - -SArray* createTableGroup(SArray* pTableList, SSchemaWrapper* pTagSchema, SColIndex* pCols, int32_t numOfOrderCols, - TSKEY skey) { - assert(pTableList != NULL); - SArray* pTableGroup = taosArrayInit(1, POINTER_BYTES); - - size_t size = taosArrayGetSize(pTableList); - if (size == 0) { - tsdbDebug("no qualified tables"); - return pTableGroup; - } - - if (numOfOrderCols == 0 || size == 1) { // no group by tags clause or only one table - SArray* sa = taosArrayDup(pTableList); - if (sa == NULL) { - taosArrayDestroy(pTableGroup); - return NULL; - } - - taosArrayPush(pTableGroup, &sa); - tsdbDebug("all %" PRIzu " tables belong to one group", size); - } else { - STableGroupSupporter sup = {0}; - sup.numOfCols = numOfOrderCols; - sup.pTagSchema = pTagSchema->pSchema; - sup.pCols = pCols; - - taosqsort(pTableList->pData, size, sizeof(STableKeyInfo), &sup, tableGroupComparFn); - createTableGroupImpl(pTableGroup, pTableList, size, skey, &sup, tableGroupComparFn); - } - - return pTableGroup; -} - -// static bool tableFilterFp(const void* pNode, void* param) { -// tQueryInfo* pInfo = (tQueryInfo*) param; -// -// STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode*)pNode)); -// -// char* val = NULL; -// if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { -// val = (char*) TABLE_NAME(pTable); -// } else { -// val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId); -// } -// -// if (pInfo->optr == TSDB_RELATION_ISNULL || pInfo->optr == TSDB_RELATION_NOTNULL) { -// if (pInfo->optr == TSDB_RELATION_ISNULL) { -// return (val == NULL) || isNull(val, pInfo->sch.type); -// } else if (pInfo->optr == TSDB_RELATION_NOTNULL) { -// return (val != NULL) && (!isNull(val, pInfo->sch.type)); -// } -// } else if (pInfo->optr == TSDB_RELATION_IN) { -// int type = pInfo->sch.type; -// if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_TIMESTAMP) { -// int64_t v; -// GET_TYPED_DATA(v, int64_t, pInfo->sch.type, val); -// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); -// } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { -// uint64_t v; -// GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val); -// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); -// } -// else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) { -// double v; -// GET_TYPED_DATA(v, double, pInfo->sch.type, val); -// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); -// } else if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR){ -// return NULL != taosHashGet((SHashObj *)pInfo->q, varDataVal(val), varDataLen(val)); -// } -// -// } -// -// int32_t ret = 0; -// if (val == NULL) { //the val is possible to be null, so check it out carefully -// ret = -1; // val is missing in table tags value pairs -// } else { -// ret = pInfo->compare(val, pInfo->q); -// } -// -// switch (pInfo->optr) { -// case TSDB_RELATION_EQUAL: { -// return ret == 0; -// } -// case TSDB_RELATION_NOT_EQUAL: { -// return ret != 0; -// } -// case TSDB_RELATION_GREATER_EQUAL: { -// return ret >= 0; -// } -// case TSDB_RELATION_GREATER: { -// return ret > 0; -// } -// case TSDB_RELATION_LESS_EQUAL: { -// return ret <= 0; -// } -// case TSDB_RELATION_LESS: { -// return ret < 0; -// } -// case TSDB_RELATION_LIKE: { -// return ret == 0; -// } -// case TSDB_RELATION_MATCH: { -// return ret == 0; -// } -// case TSDB_RELATION_NMATCH: { -// return ret == 0; -// } -// case TSDB_RELATION_IN: { -// return ret == 1; -// } -// -// default: -// assert(false); -// } -// -// return true; -//} - -// static void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp -// *param); - -// static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) { -// // // query according to the expression tree -// SExprTraverseSupp supp = { -// .nodeFilterFn = (__result_filter_fn_t)tableFilterFp, -// .setupInfoFn = filterPrepare, -// .pExtInfo = pSTable->tagSchema, -// }; -// -// getTableListfromSkipList(pExpr, pSTable->pIndex, pRes, &supp); -// tExprTreeDestroy(pExpr, destroyHelper); -// return TSDB_CODE_SUCCESS; -//} - static void* doFreeColumnInfoData(SArray* pColumnInfoData) { if (pColumnInfoData == NULL) { return NULL; @@ -3934,263 +3718,3 @@ void tsdbCleanupReadHandle(tsdbReaderT queryHandle) { taosMemoryFreeClear(pTsdbReadHandle); } - -#if 0 - -static void applyFilterToSkipListNode(SSkipList *pSkipList, tExprNode *pExpr, SArray *pResult, SExprTraverseSupp *param) { - SSkipListIterator* iter = tSkipListCreateIter(pSkipList); - - // Scan each node in the skiplist by using iterator - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - if (exprTreeApplyFilter(pExpr, pNode, param)) { - taosArrayPush(pResult, &(SL_GET_NODE_DATA(pNode))); - } - } - - tSkipListDestroyIter(iter); -} - -typedef struct { - char* v; - int32_t optr; -} SEndPoint; - -typedef struct { - SEndPoint* start; - SEndPoint* end; -} SQueryCond; - -// todo check for malloc failure -static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) { - int32_t optr = queryColInfo->optr; - - if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL || - optr == TSDB_RELATION_EQUAL || optr == TSDB_RELATION_NOT_EQUAL) { - pCond->start = taosMemoryCalloc(1, sizeof(SEndPoint)); - pCond->start->optr = queryColInfo->optr; - pCond->start->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) { - pCond->end = taosMemoryCalloc(1, sizeof(SEndPoint)); - pCond->end->optr = queryColInfo->optr; - pCond->end->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_IN) { - pCond->start = taosMemoryCalloc(1, sizeof(SEndPoint)); - pCond->start->optr = queryColInfo->optr; - pCond->start->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_LIKE) { - assert(0); - } else if (optr == TSDB_RELATION_MATCH) { - assert(0); - } else if (optr == TSDB_RELATION_NMATCH) { - assert(0); - } - - return TSDB_CODE_SUCCESS; -} - -static void queryIndexedColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* result) { - SSkipListIterator* iter = NULL; - - SQueryCond cond = {0}; - if (setQueryCond(pQueryInfo, &cond) != TSDB_CODE_SUCCESS) { - //todo handle error - } - - if (cond.start != NULL) { - iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_ASC); - } else { - iter = tSkipListCreateIterFromVal(pSkipList, (char*)(cond.end ? cond.end->v: NULL), pSkipList->type, TSDB_ORDER_DESC); - } - - if (cond.start != NULL) { - int32_t optr = cond.start->optr; - - if (optr == TSDB_RELATION_EQUAL) { // equals - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - if (ret != 0) { - break; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - } else if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL) { // greater equal - bool comp = true; - int32_t ret = 0; - - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - if (comp) { - ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - assert(ret >= 0); - } - - if (ret == 0 && optr == TSDB_RELATION_GREATER) { - continue; - } else { - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - comp = false; - } - } - } else if (optr == TSDB_RELATION_NOT_EQUAL) { // not equal - bool comp = true; - - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0); - if (comp) { - continue; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - - tSkipListDestroyIter(iter); - - comp = true; - iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_DESC); - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0); - if (comp) { - continue; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - - } else if (optr == TSDB_RELATION_IN) { - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - if (ret != 0) { - break; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - - } else { - assert(0); - } - } else { - int32_t optr = cond.end ? cond.end->optr : TSDB_RELATION_INVALID; - if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) { - bool comp = true; - int32_t ret = 0; - - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - - if (comp) { - ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.end->v); - assert(ret <= 0); - } - - if (ret == 0 && optr == TSDB_RELATION_LESS) { - continue; - } else { - STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - comp = false; // no need to compare anymore - } - } - } else { - assert(pQueryInfo->optr == TSDB_RELATION_ISNULL || pQueryInfo->optr == TSDB_RELATION_NOTNULL); - - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - - bool isnull = isNull(SL_GET_NODE_KEY(pSkipList, pNode), pQueryInfo->sch.type); - if ((pQueryInfo->optr == TSDB_RELATION_ISNULL && isnull) || - (pQueryInfo->optr == TSDB_RELATION_NOTNULL && (!isnull))) { - STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - } - } - } - - taosMemoryFree(cond.start); - taosMemoryFree(cond.end); - tSkipListDestroyIter(iter); -} - -static void queryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* res, __result_filter_fn_t filterFp) { - SSkipListIterator* iter = tSkipListCreateIter(pSkipList); - - while (tSkipListIterNext(iter)) { - bool addToResult = false; - - SSkipListNode *pNode = tSkipListIterGet(iter); - - char *pData = SL_GET_NODE_DATA(pNode); - tstr *name = (tstr*) tsdbGetTableName((void*) pData); - - // todo speed up by using hash - if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { - if (pQueryInfo->optr == TSDB_RELATION_IN) { - addToResult = pQueryInfo->compare(name, pQueryInfo->q); - } else if (pQueryInfo->optr == TSDB_RELATION_LIKE || - pQueryInfo->optr == TSDB_RELATION_MATCH || - pQueryInfo->optr == TSDB_RELATION_NMATCH) { - addToResult = !pQueryInfo->compare(name, pQueryInfo->q); - } - } else { - addToResult = filterFp(pNode, pQueryInfo); - } - - if (addToResult) { - STableKeyInfo info = {.pTable = (void*)pData, .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(res, &info); - } - } - - tSkipListDestroyIter(iter); -} - -// Apply the filter expression to each node in the skiplist to acquire the qualified nodes in skip list -//void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param) { -// if (pExpr == NULL) { -// return; -// } -// -// tExprNode *pLeft = pExpr->_node.pLeft; -// tExprNode *pRight = pExpr->_node.pRight; -// -// // column project -// if (pLeft->nodeType != TSQL_NODE_EXPR && pRight->nodeType != TSQL_NODE_EXPR) { -// assert(pLeft->nodeType == TSQL_NODE_COL && (pRight->nodeType == TSQL_NODE_VALUE || pRight->nodeType == TSQL_NODE_DUMMY)); -// -// param->setupInfoFn(pExpr, param->pExtInfo); -// -// tQueryInfo *pQueryInfo = pExpr->_node.info; -// if (pQueryInfo->indexed && (pQueryInfo->optr != TSDB_RELATION_LIKE -// && pQueryInfo->optr != TSDB_RELATION_MATCH && pQueryInfo->optr != TSDB_RELATION_NMATCH -// && pQueryInfo->optr != TSDB_RELATION_IN)) { -// queryIndexedColumn(pSkipList, pQueryInfo, result); -// } else { -// queryIndexlessColumn(pSkipList, pQueryInfo, result, param->nodeFilterFn); -// } -// -// return; -// } -// -// // The value of hasPK is always 0. -// uint8_t weight = pLeft->_node.hasPK + pRight->_node.hasPK; -// assert(weight == 0 && pSkipList != NULL && taosArrayGetSize(result) == 0); -// -// //apply the hierarchical filter expression to every node in skiplist to find the qualified nodes -// applyFilterToSkipListNode(pSkipList, pExpr, result, param); -//} -#endif diff --git a/source/dnode/vnode/src/tsdb/tsdbReadImpl.c b/source/dnode/vnode/src/tsdb/tsdbReadImpl.c index d51521c41c954821163d17a1eddf4a4ddee7f5ad..a6f2ff139437a45edbc9c42e41d1f1de16555d97 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReadImpl.c +++ b/source/dnode/vnode/src/tsdb/tsdbReadImpl.c @@ -87,7 +87,7 @@ int tsdbSetAndOpenReadFSet(SReadH *pReadh, SDFileSet *pSet) { TSDB_FSET_SET_CLOSED(TSDB_READ_FSET(pReadh)); // if (tsdbOpenDFileSet(TSDB_READ_FSET(pReadh), O_RDONLY) < 0) { if (tsdbOpenDFileSet(TSDB_READ_FSET(pReadh), TD_FILE_READ) < 0) { - tsdbError("vgId:%d failed to open file set %d since %s", TSDB_READ_REPO_ID(pReadh), TSDB_FSET_FID(pSet), + tsdbError("vgId:%d, failed to open file set %d since %s", TSDB_READ_REPO_ID(pReadh), TSDB_FSET_FID(pSet), tstrerror(terrno)); return -1; } @@ -107,7 +107,7 @@ int tsdbLoadBlockIdx(SReadH *pReadh) { if (pHeadf->info.offset <= 0) return 0; if (tsdbSeekDFile(pHeadf, pHeadf->info.offset, SEEK_SET) < 0) { - tsdbError("vgId:%d failed to load SBlockIdx part while seek file %s since %s, offset:%u len :%u", + tsdbError("vgId:%d, failed to load SBlockIdx part while seek file %s since %s, offset:%u len :%u", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno), pHeadf->info.offset, pHeadf->info.len); return -1; @@ -117,7 +117,7 @@ int tsdbLoadBlockIdx(SReadH *pReadh) { int64_t nread = tsdbReadDFile(pHeadf, TSDB_READ_BUF(pReadh), pHeadf->info.len); if (nread < 0) { - tsdbError("vgId:%d failed to load SBlockIdx part while read file %s since %s, offset:%u len :%u", + tsdbError("vgId:%d, failed to load SBlockIdx part while read file %s since %s, offset:%u len :%u", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno), pHeadf->info.offset, pHeadf->info.len); return -1; @@ -125,14 +125,14 @@ int tsdbLoadBlockIdx(SReadH *pReadh) { if (nread < pHeadf->info.len) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - tsdbError("vgId:%d SBlockIdx part in file %s is corrupted, offset:%u expected bytes:%u read bytes: %" PRId64, + tsdbError("vgId:%d, SBlockIdx part in file %s is corrupted, offset:%u expected bytes:%u read bytes: %" PRId64, TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), pHeadf->info.offset, pHeadf->info.len, nread); return -1; } if (!taosCheckChecksumWhole((uint8_t *)TSDB_READ_BUF(pReadh), pHeadf->info.len)) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - tsdbError("vgId:%d SBlockIdx part in file %s is corrupted since wrong checksum, offset:%u len :%u", + tsdbError("vgId:%d, SBlockIdx part in file %s is corrupted since wrong checksum, offset:%u len :%u", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), pHeadf->info.offset, pHeadf->info.len); return -1; } @@ -209,7 +209,7 @@ int tsdbLoadBlockInfo(SReadH *pReadh, void *pTarget) { SBlockIdx *pBlkIdx = pReadh->pBlkIdx; if (tsdbSeekDFile(pHeadf, pBlkIdx->offset, SEEK_SET) < 0) { - tsdbError("vgId:%d failed to load SBlockInfo part while seek file %s since %s, offset:%u len:%u", + tsdbError("vgId:%d, failed to load SBlockInfo part while seek file %s since %s, offset:%u len:%u", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno), pBlkIdx->offset, pBlkIdx->len); return -1; } @@ -218,21 +218,21 @@ int tsdbLoadBlockInfo(SReadH *pReadh, void *pTarget) { int64_t nread = tsdbReadDFile(pHeadf, (void *)(pReadh->pBlkInfo), pBlkIdx->len); if (nread < 0) { - tsdbError("vgId:%d failed to load SBlockInfo part while read file %s since %s, offset:%u len :%u", + tsdbError("vgId:%d, failed to load SBlockInfo part while read file %s since %s, offset:%u len :%u", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno), pBlkIdx->offset, pBlkIdx->len); return -1; } if (nread < pBlkIdx->len) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - tsdbError("vgId:%d SBlockInfo part in file %s is corrupted, offset:%u expected bytes:%u read bytes:%" PRId64, + tsdbError("vgId:%d, SBlockInfo part in file %s is corrupted, offset:%u expected bytes:%u read bytes:%" PRId64, TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), pBlkIdx->offset, pBlkIdx->len, nread); return -1; } if (!taosCheckChecksumWhole((uint8_t *)(pReadh->pBlkInfo), pBlkIdx->len)) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - tsdbError("vgId:%d SBlockInfo part in file %s is corrupted since wrong checksum, offset:%u len :%u", + tsdbError("vgId:%d, SBlockInfo part in file %s is corrupted since wrong checksum, offset:%u len :%u", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pHeadf), pBlkIdx->offset, pBlkIdx->len); return -1; } @@ -467,7 +467,7 @@ int tsdbLoadBlockStatis(SReadH *pReadh, SBlock *pBlock) { ASSERT(pBlock->numOfSubBlocks <= 1); if (!pBlock->aggrStat) { - tsdbDebug("vgId:%d no need to load block statis part for uid %" PRIu64 " since not exist", REPO_ID(pReadh->pRepo), + tsdbDebug("vgId:%d, no need to load block statis part for uid %" PRIu64 " since not exist", REPO_ID(pReadh->pRepo), TSDB_READ_TABLE_UID(pReadh)); return TSDB_STATIS_NONE; } @@ -475,7 +475,7 @@ int tsdbLoadBlockStatis(SReadH *pReadh, SBlock *pBlock) { SDFile *pDFileAggr = pBlock->last ? TSDB_READ_SMAL_FILE(pReadh) : TSDB_READ_SMAD_FILE(pReadh); if (tsdbSeekDFile(pDFileAggr, pBlock->aggrOffset, SEEK_SET) < 0) { - tsdbError("vgId:%d failed to load block statis part for uid %" PRIu64 " while seek file %s to offset %" PRIu64 + tsdbError("vgId:%d, failed to load block statis part for uid %" PRIu64 " while seek file %s to offset %" PRIu64 " since %s", TSDB_READ_REPO_ID(pReadh), TSDB_READ_TABLE_UID(pReadh), TSDB_FILE_FULL_NAME(pDFileAggr), (uint64_t)pBlock->aggrOffset, tstrerror(terrno)); @@ -487,7 +487,7 @@ int tsdbLoadBlockStatis(SReadH *pReadh, SBlock *pBlock) { int64_t nreadAggr = tsdbReadDFile(pDFileAggr, (void *)(pReadh->pAggrBlkData), sizeAggr); if (nreadAggr < 0) { - tsdbError("vgId:%d failed to load block statis part for uid %" PRIu64 + tsdbError("vgId:%d, failed to load block statis part for uid %" PRIu64 " while read file %s since %s, offset:%" PRIu64 " len :%" PRIzu, TSDB_READ_REPO_ID(pReadh), TSDB_READ_TABLE_UID(pReadh), TSDB_FILE_FULL_NAME(pDFileAggr), tstrerror(terrno), (uint64_t)pBlock->aggrOffset, sizeAggr); @@ -496,7 +496,7 @@ int tsdbLoadBlockStatis(SReadH *pReadh, SBlock *pBlock) { if (nreadAggr < sizeAggr) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - tsdbError("vgId:%d block statis part for uid %" PRIu64 " in file %s is corrupted, offset:%" PRIu64 + tsdbError("vgId:%d, block statis part for uid %" PRIu64 " in file %s is corrupted, offset:%" PRIu64 " expected bytes:%" PRIzu " read bytes: %" PRId64, TSDB_READ_REPO_ID(pReadh), TSDB_READ_TABLE_UID(pReadh), TSDB_FILE_FULL_NAME(pDFileAggr), (uint64_t)pBlock->aggrOffset, sizeAggr, nreadAggr); @@ -505,7 +505,7 @@ int tsdbLoadBlockStatis(SReadH *pReadh, SBlock *pBlock) { if (!taosCheckChecksumWhole((uint8_t *)(pReadh->pAggrBlkData), (uint32_t)sizeAggr)) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - tsdbError("vgId:%d block statis part for uid %" PRIu64 + tsdbError("vgId:%d, block statis part for uid %" PRIu64 "in file %s is corrupted since wrong checksum, offset:%" PRIu64 " len :%" PRIzu, TSDB_READ_REPO_ID(pReadh), TSDB_READ_TABLE_UID(pReadh), TSDB_FILE_FULL_NAME(pDFileAggr), (uint64_t)pBlock->aggrOffset, sizeAggr); @@ -518,7 +518,7 @@ static int tsdbLoadBlockOffset(SReadH *pReadh, SBlock *pBlock) { ASSERT(pBlock->numOfSubBlocks <= 1); SDFile *pDFile = (pBlock->last) ? TSDB_READ_LAST_FILE(pReadh) : TSDB_READ_DATA_FILE(pReadh); if (tsdbSeekDFile(pDFile, pBlock->offset, SEEK_SET) < 0) { - tsdbError("vgId:%d failed to load block head part while seek file %s to offset %" PRId64 " since %s", + tsdbError("vgId:%d, failed to load block head part while seek file %s to offset %" PRId64 " since %s", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, tstrerror(terrno)); return -1; } @@ -528,14 +528,14 @@ static int tsdbLoadBlockOffset(SReadH *pReadh, SBlock *pBlock) { int64_t nread = tsdbReadDFile(pDFile, (void *)(pReadh->pBlkData), size); if (nread < 0) { - tsdbError("vgId:%d failed to load block head part while read file %s since %s, offset:%" PRId64 " len :%" PRIzu, + tsdbError("vgId:%d, failed to load block head part while read file %s since %s, offset:%" PRId64 " len :%" PRIzu, TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno), (int64_t)pBlock->offset, size); return -1; } if (nread < size) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - tsdbError("vgId:%d block head part in file %s is corrupted, offset:%" PRId64 " expected bytes:%" PRIzu + tsdbError("vgId:%d, block head part in file %s is corrupted, offset:%" PRId64 " expected bytes:%" PRIzu " read bytes: %" PRId64, TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, size, nread); return -1; @@ -543,7 +543,7 @@ static int tsdbLoadBlockOffset(SReadH *pReadh, SBlock *pBlock) { if (!taosCheckChecksumWhole((uint8_t *)(pReadh->pBlkData), (uint32_t)size)) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - tsdbError("vgId:%d block head part in file %s is corrupted since wrong checksum, offset:%" PRId64 " len :%" PRIzu, + tsdbError("vgId:%d, block head part in file %s is corrupted since wrong checksum, offset:%" PRId64 " len :%" PRIzu, TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, size); return -1; } @@ -671,14 +671,14 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat SBlockData *pBlockData = (SBlockData *)TSDB_READ_BUF(pReadh); if (tsdbSeekDFile(pDFile, pBlock->offset, SEEK_SET) < 0) { - tsdbError("vgId:%d failed to load block data part while seek file %s to offset %" PRId64 " since %s", + tsdbError("vgId:%d, failed to load block data part while seek file %s to offset %" PRId64 " since %s", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, tstrerror(terrno)); return -1; } int64_t nread = tsdbReadDFile(pDFile, TSDB_READ_BUF(pReadh), pBlock->len); if (nread < 0) { - tsdbError("vgId:%d failed to load block data part while read file %s since %s, offset:%" PRId64 " len :%d", + tsdbError("vgId:%d, failed to load block data part while read file %s since %s, offset:%" PRId64 " len :%d", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno), (int64_t)pBlock->offset, pBlock->len); return -1; @@ -686,7 +686,7 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat if (nread < pBlock->len) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - tsdbError("vgId:%d block data part in file %s is corrupted, offset:%" PRId64 + tsdbError("vgId:%d, block data part in file %s is corrupted, offset:%" PRId64 " expected bytes:%d read bytes: %" PRId64, TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, pBlock->len, nread); return -1; @@ -695,7 +695,7 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat int32_t tsize = (int32_t)tsdbBlockStatisSize(pBlock->numOfCols, (uint32_t)pBlock->blkVer); if (!taosCheckChecksumWhole((uint8_t *)TSDB_READ_BUF(pReadh), tsize)) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - tsdbError("vgId:%d block head part in file %s is corrupted since wrong checksum, offset:%" PRId64 " len :%d", + tsdbError("vgId:%d, block head part in file %s is corrupted since wrong checksum, offset:%" PRId64 " len :%d", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), (int64_t)pBlock->offset, tsize); return -1; } @@ -750,7 +750,7 @@ static int tsdbLoadBlockDataImpl(SReadH *pReadh, SBlock *pBlock, SDataCols *pDat pBlockCol ? pBlockCol->blen : 0, pBlock->algorithm, pBlock->numOfRows, tLenBitmap, pDataCols->maxPoints, TSDB_READ_COMP_BUF(pReadh), (int)taosTSizeof(TSDB_READ_COMP_BUF(pReadh))) < 0) { - tsdbError("vgId:%d file %s is broken at column %d block offset %" PRId64 " column offset %u", + tsdbError("vgId:%d, file %s is broken at column %d block offset %" PRId64 " column offset %u", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), tcolId, (int64_t)pBlock->offset, toffset); return -1; } @@ -945,21 +945,21 @@ static int tsdbLoadColData(SReadH *pReadh, SDFile *pDFile, SBlock *pBlock, SBloc int64_t offset = pBlock->offset + tsdbBlockStatisSize(pBlock->numOfCols, (uint32_t)pBlock->blkVer) + tsdbGetBlockColOffset(pBlockCol); if (tsdbSeekDFile(pDFile, offset, SEEK_SET) < 0) { - tsdbError("vgId:%d failed to load block column data while seek file %s to offset %" PRId64 " since %s", + tsdbError("vgId:%d, failed to load block column data while seek file %s to offset %" PRId64 " since %s", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), offset, tstrerror(terrno)); return -1; } int64_t nread = tsdbReadDFile(pDFile, TSDB_READ_BUF(pReadh), pBlockCol->len); if (nread < 0) { - tsdbError("vgId:%d failed to load block column data while read file %s since %s, offset:%" PRId64 " len :%d", + tsdbError("vgId:%d, failed to load block column data while read file %s since %s, offset:%" PRId64 " len :%d", TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno), offset, pBlockCol->len); return -1; } if (nread < pBlockCol->len) { terrno = TSDB_CODE_TDB_FILE_CORRUPTED; - tsdbError("vgId:%d block column data in file %s is corrupted, offset:%" PRId64 " expected bytes:%d" PRIzu + tsdbError("vgId:%d, block column data in file %s is corrupted, offset:%" PRId64 " expected bytes:%d" PRIzu " read bytes: %" PRId64, TSDB_READ_REPO_ID(pReadh), TSDB_FILE_FULL_NAME(pDFile), offset, pBlockCol->len, nread); return -1; @@ -968,7 +968,7 @@ static int tsdbLoadColData(SReadH *pReadh, SDFile *pDFile, SBlock *pBlock, SBloc if (tsdbCheckAndDecodeColumnData(pDataCol, pReadh->pBuf, pBlockCol->len, pBlockCol->blen, pBlock->algorithm, pBlock->numOfRows, tLenBitmap, pCfg->maxRows, pReadh->pCBuf, (int32_t)taosTSizeof(pReadh->pCBuf)) < 0) { - tsdbError("vgId:%d file %s is broken at column %d offset %" PRId64, REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), + tsdbError("vgId:%d, file %s is broken at column %d offset %" PRId64, REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), pBlockCol->colId, offset); return -1; } diff --git a/source/dnode/vnode/src/tsdb/tsdbSma.c b/source/dnode/vnode/src/tsdb/tsdbSma.c deleted file mode 100644 index 18cf18dbad32bb1a780d098c0343c8c7894f700b..0000000000000000000000000000000000000000 --- a/source/dnode/vnode/src/tsdb/tsdbSma.c +++ /dev/null @@ -1,2203 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "tsdbSma.h" -#include "tsdb.h" - -static const char *TSDB_SMA_DNAME[] = { - "", // TSDB_SMA_TYPE_BLOCK - "tsma", // TSDB_SMA_TYPE_TIME_RANGE - "rsma", // TSDB_SMA_TYPE_ROLLUP -}; - -#undef _TEST_SMA_PRINT_DEBUG_LOG_ -#define SMA_STORAGE_TSDB_DAYS 30 -#define SMA_STORAGE_TSDB_TIMES 10 -#define SMA_STORAGE_SPLIT_HOURS 24 -#define SMA_KEY_LEN 16 // TSKEY+groupId 8+8 -#define SMA_DROP_EXPIRED_TIME 10 // default is 10 seconds - -#define SMA_STATE_HASH_SLOT 4 -#define SMA_STATE_ITEM_HASH_SLOT 32 - -#define SMA_TEST_INDEX_NAME "smaTestIndexName" // TODO: just for test -#define SMA_TEST_INDEX_UID 2000000001 // TODO: just for test - -typedef struct SRSmaInfo SRSmaInfo; -typedef enum { - SMA_STORAGE_LEVEL_TSDB = 0, // use days of self-defined e.g. vnode${N}/tsdb/tsma/sma_index_uid/v2f200.tsma - SMA_STORAGE_LEVEL_DFILESET = 1 // use days of TS data e.g. vnode${N}/tsdb/tsma/sma_index_uid/v2f1906.tsma -} ESmaStorageLevel; - -typedef struct SPoolMem { - int64_t size; - struct SPoolMem *prev; - struct SPoolMem *next; -} SPoolMem; - -struct SSmaEnv { - TdThreadRwlock lock; - int8_t type; - TXN txn; - SPoolMem *pPool; - SDiskID did; - TDB *dbEnv; // TODO: If it's better to put it in smaIndex level? - char *path; // relative path - SSmaStat *pStat; -}; - -#define SMA_ENV_LOCK(env) ((env)->lock) -#define SMA_ENV_TYPE(env) ((env)->type) -#define SMA_ENV_DID(env) ((env)->did) -#define SMA_ENV_ENV(env) ((env)->dbEnv) -#define SMA_ENV_PATH(env) ((env)->path) -#define SMA_ENV_STAT(env) ((env)->pStat) -#define SMA_ENV_STAT_ITEMS(env) ((env)->pStat->smaStatItems) - -typedef struct { - STsdb *pTsdb; - SDBFile dFile; - const SArray *pDataBlocks; // sma data - int32_t interval; // interval with the precision of DB -} STSmaWriteH; - -typedef struct { - int32_t iter; - int32_t fid; -} SmaFsIter; - -typedef struct { - STsdb *pTsdb; - SDBFile dFile; - int32_t interval; // interval with the precision of DB - int32_t blockSize; // size of SMA block item - int8_t storageLevel; - int8_t days; - SmaFsIter smaFsIter; -} STSmaReadH; - -typedef struct { - /** - * @brief The field 'state' is here to demonstrate if one smaIndex is ready to provide service. - * - TSDB_SMA_STAT_OK: 1) The sma calculation of history data is finished; 2) Or recevied information from - * Streaming Module or TSDB local persistence. - * - TSDB_SMA_STAT_EXPIRED: 1) If sma calculation of history TS data is not finished; 2) Or if the TSDB is open, - * without information about its previous state. - * - TSDB_SMA_STAT_DROPPED: 1)sma dropped - * N.B. only applicable to tsma - */ - int8_t state; // ETsdbSmaStat - SHashObj *expiredWindows; // key: skey of time window, value: N/A - STSma *pSma; // cache schema -} SSmaStatItem; - -#define RSMA_TASK_INFO_HASH_SLOT 8 -struct SRSmaInfo { - void *taskInfo[TSDB_RETENTION_L2]; // qTaskInfo_t -}; - -struct SSmaStat { - union { - SHashObj *smaStatItems; // key: indexUid, value: SSmaStatItem for tsma - SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo; - }; - T_REF_DECLARE() -}; -#define SMA_STAT_ITEMS(s) ((s)->smaStatItems) -#define SMA_STAT_INFO_HASH(s) ((s)->rsmaInfoHash) - -static FORCE_INLINE void tsdbFreeTaskHandle(qTaskInfo_t *taskHandle) { - // Note: free/kill may in RC - qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle); - if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) { - qDestroyTask(otaskHandle); - } -} - -static FORCE_INLINE void *tsdbFreeRSmaInfo(SRSmaInfo *pInfo) { - for (int32_t i = 0; i < TSDB_RETENTION_MAX; ++i) { - if (pInfo->taskInfo[i]) { - tsdbFreeTaskHandle(pInfo->taskInfo[i]); - } - } - return NULL; -} - -// declaration of static functions - -// expired window -static int32_t tsdbUpdateExpiredWindowImpl(STsdb *pTsdb, SSubmitReq *pMsg, int64_t version); -static int32_t tsdbSetExpiredWindow(STsdb *pTsdb, SHashObj *pItemsHash, int64_t indexUid, int64_t winSKey, - int64_t version); -static int32_t tsdbInitSmaStat(SSmaStat **pSmaStat, int8_t smaType); -static void *tsdbFreeSmaStatItem(SSmaStatItem *pSmaStatItem); -static int32_t tsdbDestroySmaState(SSmaStat *pSmaStat, int8_t smaType); -static SSmaEnv *tsdbNewSmaEnv(const STsdb *pTsdb, int8_t smaType, const char *path, SDiskID did); -static int32_t tsdbInitSmaEnv(STsdb *pTsdb, int8_t smaType, const char *path, SDiskID did, SSmaEnv **pEnv); -static int32_t tsdbResetExpiredWindow(STsdb *pTsdb, SSmaStat *pStat, int64_t indexUid, TSKEY skey); -static int32_t tsdbRefSmaStat(STsdb *pTsdb, SSmaStat *pStat); -static int32_t tsdbUnRefSmaStat(STsdb *pTsdb, SSmaStat *pStat); - -// read data -// TODO: This is the basic params, and should wrap the params to a queryHandle. -static int32_t tsdbGetTSmaDataImpl(STsdb *pTsdb, char *pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult); - -// insert data -static int32_t tsdbInitTSmaWriteH(STSmaWriteH *pSmaH, STsdb *pTsdb, const SArray *pDataBlocks, int64_t interval, - int8_t intervalUnit); -static void tsdbDestroyTSmaWriteH(STSmaWriteH *pSmaH); -static int32_t tsdbInitTSmaReadH(STSmaReadH *pSmaH, STsdb *pTsdb, int64_t interval, int8_t intervalUnit); -static int32_t tsdbGetSmaStorageLevel(int64_t interval, int8_t intervalUnit); -static int32_t tsdbSetRSmaDataFile(STSmaWriteH *pSmaH, int32_t fid); -static int32_t tsdbInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyLen, void *pData, int32_t dataLen, - TXN *txn); -static int64_t tsdbGetIntervalByPrecision(int64_t interval, uint8_t intervalUnit, int8_t precision, bool adjusted); -static int32_t tsdbGetTSmaDays(STsdb *pTsdb, int64_t interval, int32_t storageLevel); -static int32_t tsdbSetTSmaDataFile(STSmaWriteH *pSmaH, int64_t indexUid, int32_t fid); -static int32_t tsdbInitTSmaFile(STSmaReadH *pSmaH, int64_t indexUid, TSKEY skey); -static bool tsdbSetAndOpenTSmaFile(STSmaReadH *pReadH, TSKEY *queryKey); -static void tsdbGetSmaDir(int32_t vgId, ETsdbSmaType smaType, char dirName[]); -static int32_t tsdbInsertTSmaDataImpl(STsdb *pTsdb, int64_t indexUid, const char *msg); -static int32_t tsdbInsertRSmaDataImpl(STsdb *pTsdb, const char *msg); - -static FORCE_INLINE int32_t tsdbUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid); -static FORCE_INLINE int32_t tsdbUpdateTbUidListImpl(STsdb *pTsdb, tb_uid_t *suid, SArray *tbUids); -static FORCE_INLINE int32_t tsdbExecuteRSmaImpl(STsdb *pTsdb, const void *pMsg, int32_t inputType, - qTaskInfo_t *taskInfo, STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid, - int8_t level); -// mgmt interface -static int32_t tsdbDropTSmaDataImpl(STsdb *pTsdb, int64_t indexUid); - -// Pool Memory -static SPoolMem *openPool(); -static void clearPool(SPoolMem *pPool); -static void closePool(SPoolMem *pPool); -static void *poolMalloc(void *arg, size_t size); -static void poolFree(void *arg, void *ptr); - -static int tsdbSmaBeginCommit(SSmaEnv *pEnv); -static int tsdbSmaEndCommit(SSmaEnv *pEnv); - -// implementation -static FORCE_INLINE int16_t tsdbTSmaAdd(STsdb *pTsdb, int16_t n) { - return atomic_add_fetch_16(&REPO_TSMA_NUM(pTsdb), n); -} -static FORCE_INLINE int16_t tsdbTSmaSub(STsdb *pTsdb, int16_t n) { - return atomic_sub_fetch_16(&REPO_TSMA_NUM(pTsdb), n); -} - -static FORCE_INLINE int32_t tsdbRLockSma(SSmaEnv *pEnv) { - int code = taosThreadRwlockRdlock(&(pEnv->lock)); - if (code != 0) { - terrno = TAOS_SYSTEM_ERROR(code); - return -1; - } - return 0; -} - -static FORCE_INLINE int32_t tsdbWLockSma(SSmaEnv *pEnv) { - int code = taosThreadRwlockWrlock(&(pEnv->lock)); - if (code != 0) { - terrno = TAOS_SYSTEM_ERROR(code); - return -1; - } - return 0; -} - -static FORCE_INLINE int32_t tsdbUnLockSma(SSmaEnv *pEnv) { - int code = taosThreadRwlockUnlock(&(pEnv->lock)); - if (code != 0) { - terrno = TAOS_SYSTEM_ERROR(code); - return -1; - } - return 0; -} - -static SPoolMem *openPool() { - SPoolMem *pPool = (SPoolMem *)taosMemoryMalloc(sizeof(*pPool)); - - pPool->prev = pPool->next = pPool; - pPool->size = 0; - - return pPool; -} - -static void clearPool(SPoolMem *pPool) { - if (!pPool) return; - - SPoolMem *pMem; - - do { - pMem = pPool->next; - - if (pMem == pPool) break; - - pMem->next->prev = pMem->prev; - pMem->prev->next = pMem->next; - pPool->size -= pMem->size; - - taosMemoryFree(pMem); - } while (1); - - assert(pPool->size == 0); -} - -static void closePool(SPoolMem *pPool) { - if (pPool) { - clearPool(pPool); - taosMemoryFree(pPool); - } -} - -static void *poolMalloc(void *arg, size_t size) { - void *ptr = NULL; - SPoolMem *pPool = (SPoolMem *)arg; - SPoolMem *pMem; - - pMem = (SPoolMem *)taosMemoryMalloc(sizeof(*pMem) + size); - if (!pMem) { - assert(0); - } - - pMem->size = sizeof(*pMem) + size; - pMem->next = pPool->next; - pMem->prev = pPool; - - pPool->next->prev = pMem; - pPool->next = pMem; - pPool->size += pMem->size; - - ptr = (void *)(&pMem[1]); - return ptr; -} - -static void poolFree(void *arg, void *ptr) { - SPoolMem *pPool = (SPoolMem *)arg; - SPoolMem *pMem; - - pMem = &(((SPoolMem *)ptr)[-1]); - - pMem->next->prev = pMem->prev; - pMem->prev->next = pMem->next; - pPool->size -= pMem->size; - - taosMemoryFree(pMem); -} - -int32_t tsdbInitSma(STsdb *pTsdb) { - // tSma - int32_t numOfTSma = taosArrayGetSize(metaGetSmaTbUids(REPO_META(pTsdb), false)); - if (numOfTSma > 0) { - atomic_store_16(&REPO_TSMA_NUM(pTsdb), (int16_t)numOfTSma); - } - // TODO: rSma - return TSDB_CODE_SUCCESS; -} - -static FORCE_INLINE int8_t tsdbSmaStat(SSmaStatItem *pStatItem) { - if (pStatItem) { - return atomic_load_8(&pStatItem->state); - } - return TSDB_SMA_STAT_UNKNOWN; -} - -static FORCE_INLINE bool tsdbSmaStatIsOK(SSmaStatItem *pStatItem, int8_t *state) { - if (!pStatItem) { - return false; - } - - if (state) { - *state = atomic_load_8(&pStatItem->state); - return *state == TSDB_SMA_STAT_OK; - } - return atomic_load_8(&pStatItem->state) == TSDB_SMA_STAT_OK; -} - -static FORCE_INLINE bool tsdbSmaStatIsExpired(SSmaStatItem *pStatItem) { - return pStatItem ? (atomic_load_8(&pStatItem->state) & TSDB_SMA_STAT_EXPIRED) : true; -} - -static FORCE_INLINE bool tsdbSmaStatIsDropped(SSmaStatItem *pStatItem) { - return pStatItem ? (atomic_load_8(&pStatItem->state) & TSDB_SMA_STAT_DROPPED) : true; -} - -static FORCE_INLINE void tsdbSmaStatSetOK(SSmaStatItem *pStatItem) { - if (pStatItem) { - atomic_store_8(&pStatItem->state, TSDB_SMA_STAT_OK); - } -} - -static FORCE_INLINE void tsdbSmaStatSetExpired(SSmaStatItem *pStatItem) { - if (pStatItem) { - atomic_or_fetch_8(&pStatItem->state, TSDB_SMA_STAT_EXPIRED); - } -} - -static FORCE_INLINE void tsdbSmaStatSetDropped(SSmaStatItem *pStatItem) { - if (pStatItem) { - atomic_or_fetch_8(&pStatItem->state, TSDB_SMA_STAT_DROPPED); - } -} - -static void tsdbGetSmaDir(int32_t vgId, ETsdbSmaType smaType, char dirName[]) { - snprintf(dirName, TSDB_FILENAME_LEN, "vnode%svnode%d%s%s", TD_DIRSEP, vgId, TD_DIRSEP, TSDB_SMA_DNAME[smaType]); -} - -static SSmaEnv *tsdbNewSmaEnv(const STsdb *pTsdb, int8_t smaType, const char *path, SDiskID did) { - SSmaEnv *pEnv = NULL; - - pEnv = (SSmaEnv *)taosMemoryCalloc(1, sizeof(SSmaEnv)); - if (!pEnv) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; - } - - SMA_ENV_TYPE(pEnv) = smaType; - - int code = taosThreadRwlockInit(&(pEnv->lock), NULL); - if (code) { - terrno = TAOS_SYSTEM_ERROR(code); - taosMemoryFree(pEnv); - return NULL; - } - - ASSERT(path && (strlen(path) > 0)); - SMA_ENV_PATH(pEnv) = strdup(path); - if (!SMA_ENV_PATH(pEnv)) { - tsdbFreeSmaEnv(pEnv); - return NULL; - } - - SMA_ENV_DID(pEnv) = did; - - if (tsdbInitSmaStat(&SMA_ENV_STAT(pEnv), smaType) != TSDB_CODE_SUCCESS) { - tsdbFreeSmaEnv(pEnv); - return NULL; - } - - char aname[TSDB_FILENAME_LEN] = {0}; - tfsAbsoluteName(REPO_TFS(pTsdb), did, path, aname); - if (tsdbOpenDBEnv(&pEnv->dbEnv, aname) != TSDB_CODE_SUCCESS) { - tsdbFreeSmaEnv(pEnv); - return NULL; - } - - if (!(pEnv->pPool = openPool())) { - tsdbFreeSmaEnv(pEnv); - return NULL; - } - - return pEnv; -} - -static int32_t tsdbInitSmaEnv(STsdb *pTsdb, int8_t smaType, const char *path, SDiskID did, SSmaEnv **pEnv) { - if (!pEnv) { - terrno = TSDB_CODE_INVALID_PTR; - return TSDB_CODE_FAILED; - } - - if (!(*pEnv)) { - if (!(*pEnv = tsdbNewSmaEnv(pTsdb, smaType, path, did))) { - return TSDB_CODE_FAILED; - } - } - - return TSDB_CODE_SUCCESS; -} - -/** - * @brief Release resources allocated for its member fields, not including itself. - * - * @param pSmaEnv - * @return int32_t - */ -void tsdbDestroySmaEnv(SSmaEnv *pSmaEnv) { - if (pSmaEnv) { - tsdbDestroySmaState(pSmaEnv->pStat, SMA_ENV_TYPE(pSmaEnv)); - taosMemoryFreeClear(pSmaEnv->pStat); - taosMemoryFreeClear(pSmaEnv->path); - taosThreadRwlockDestroy(&(pSmaEnv->lock)); - tsdbCloseDBEnv(pSmaEnv->dbEnv); - closePool(pSmaEnv->pPool); - } -} - -void *tsdbFreeSmaEnv(SSmaEnv *pSmaEnv) { - tsdbDestroySmaEnv(pSmaEnv); - taosMemoryFreeClear(pSmaEnv); - return NULL; -} - -static int32_t tsdbRefSmaStat(STsdb *pTsdb, SSmaStat *pStat) { - if (!pStat) return 0; - - int ref = T_REF_INC(pStat); - tsdbDebug("vgId:%d ref sma stat:%p, val:%d", REPO_ID(pTsdb), pStat, ref); - return 0; -} - -static int32_t tsdbUnRefSmaStat(STsdb *pTsdb, SSmaStat *pStat) { - if (!pStat) return 0; - - int ref = T_REF_DEC(pStat); - tsdbDebug("vgId:%d unref sma stat:%p, val:%d", REPO_ID(pTsdb), pStat, ref); - return 0; -} - -static int32_t tsdbInitSmaStat(SSmaStat **pSmaStat, int8_t smaType) { - ASSERT(pSmaStat != NULL); - - if (*pSmaStat) { // no lock - return TSDB_CODE_SUCCESS; - } - - /** - * 1. Lazy mode utilized when init SSmaStat to update expired window(or hungry mode when tsdbNew). - * 2. Currently, there is mutex lock when init SSmaEnv, thus no need add lock on SSmaStat, and please add lock if - * tsdbInitSmaStat invoked in other multithread environment later. - */ - if (!(*pSmaStat)) { - *pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat)); - if (!(*pSmaStat)) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return TSDB_CODE_FAILED; - } - - if (smaType == TSDB_SMA_TYPE_ROLLUP) { - SMA_STAT_INFO_HASH(*pSmaStat) = taosHashInit( - RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); - - if (!SMA_STAT_INFO_HASH(*pSmaStat)) { - taosMemoryFreeClear(*pSmaStat); - return TSDB_CODE_FAILED; - } - } else if (smaType == TSDB_SMA_TYPE_TIME_RANGE) { - SMA_STAT_ITEMS(*pSmaStat) = - taosHashInit(SMA_STATE_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); - - if (!SMA_STAT_ITEMS(*pSmaStat)) { - taosMemoryFreeClear(*pSmaStat); - return TSDB_CODE_FAILED; - } - } else { - ASSERT(0); - } - } - return TSDB_CODE_SUCCESS; -} - -static SSmaStatItem *tsdbNewSmaStatItem(int8_t state) { - SSmaStatItem *pItem = NULL; - - pItem = (SSmaStatItem *)taosMemoryCalloc(1, sizeof(SSmaStatItem)); - if (pItem) { - pItem->state = state; - pItem->expiredWindows = taosHashInit(SMA_STATE_ITEM_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP), - true, HASH_ENTRY_LOCK); - if (!pItem->expiredWindows) { - taosMemoryFreeClear(pItem); - } - } - return pItem; -} - -static void *tsdbFreeSmaStatItem(SSmaStatItem *pSmaStatItem) { - if (pSmaStatItem) { - tdDestroyTSma(pSmaStatItem->pSma); - taosMemoryFreeClear(pSmaStatItem->pSma); - taosHashCleanup(pSmaStatItem->expiredWindows); - taosMemoryFreeClear(pSmaStatItem); - } - return NULL; -} - -/** - * @brief Release resources allocated for its member fields, not including itself. - * - * @param pSmaStat - * @return int32_t - */ -int32_t tsdbDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) { - if (pSmaStat) { - // TODO: use taosHashSetFreeFp when taosHashSetFreeFp is ready. - if (smaType == TSDB_SMA_TYPE_TIME_RANGE) { - void *item = taosHashIterate(SMA_STAT_ITEMS(pSmaStat), NULL); - while (item) { - SSmaStatItem *pItem = *(SSmaStatItem **)item; - tsdbFreeSmaStatItem(pItem); - item = taosHashIterate(SMA_STAT_ITEMS(pSmaStat), item); - } - taosHashCleanup(SMA_STAT_ITEMS(pSmaStat)); - } else if (smaType == TSDB_SMA_TYPE_ROLLUP) { - void *infoHash = taosHashIterate(SMA_STAT_INFO_HASH(pSmaStat), NULL); - while (infoHash) { - SRSmaInfo *pInfoHash = *(SRSmaInfo **)infoHash; - tsdbFreeRSmaInfo(pInfoHash); - infoHash = taosHashIterate(SMA_STAT_INFO_HASH(pSmaStat), infoHash); - } - taosHashCleanup(SMA_STAT_INFO_HASH(pSmaStat)); - } else { - ASSERT(0); - } - } - return TSDB_CODE_SUCCESS; -} - -static int32_t tsdbCheckAndInitSmaEnv(STsdb *pTsdb, int8_t smaType) { - SSmaEnv *pEnv = NULL; - - // return if already init - switch (smaType) { - case TSDB_SMA_TYPE_TIME_RANGE: - if ((pEnv = (SSmaEnv *)atomic_load_ptr(&REPO_TSMA_ENV(pTsdb)))) { - return TSDB_CODE_SUCCESS; - } - break; - case TSDB_SMA_TYPE_ROLLUP: - if ((pEnv = (SSmaEnv *)atomic_load_ptr(&REPO_RSMA_ENV(pTsdb)))) { - return TSDB_CODE_SUCCESS; - } - break; - default: - terrno = TSDB_CODE_INVALID_PARA; - return TSDB_CODE_FAILED; - } - - // init sma env - tsdbLockRepo(pTsdb); - pEnv = (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_load_ptr(&REPO_TSMA_ENV(pTsdb)) - : atomic_load_ptr(&REPO_RSMA_ENV(pTsdb)); - if (!pEnv) { - char rname[TSDB_FILENAME_LEN] = {0}; - - SDiskID did = {0}; - tfsAllocDisk(REPO_TFS(pTsdb), TFS_PRIMARY_LEVEL, &did); - if (did.level < 0 || did.id < 0) { - tsdbUnlockRepo(pTsdb); - return TSDB_CODE_FAILED; - } - tsdbGetSmaDir(REPO_ID(pTsdb), smaType, rname); - - if (tfsMkdirRecurAt(REPO_TFS(pTsdb), rname, did) != TSDB_CODE_SUCCESS) { - tsdbUnlockRepo(pTsdb); - return TSDB_CODE_FAILED; - } - - if (tsdbInitSmaEnv(pTsdb, smaType, rname, did, &pEnv) != TSDB_CODE_SUCCESS) { - tsdbUnlockRepo(pTsdb); - return TSDB_CODE_FAILED; - } - - (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&REPO_TSMA_ENV(pTsdb), pEnv) - : atomic_store_ptr(&REPO_RSMA_ENV(pTsdb), pEnv); - } - tsdbUnlockRepo(pTsdb); - - return TSDB_CODE_SUCCESS; -}; - -static int32_t tsdbSetExpiredWindow(STsdb *pTsdb, SHashObj *pItemsHash, int64_t indexUid, int64_t winSKey, - int64_t version) { - SSmaStatItem *pItem = taosHashGet(pItemsHash, &indexUid, sizeof(indexUid)); - if (!pItem) { - // TODO: use TSDB_SMA_STAT_EXPIRED and update by stream computing later - pItem = tsdbNewSmaStatItem(TSDB_SMA_STAT_OK); // TODO use the real state - if (!pItem) { - // Response to stream computing: OOM - // For query, if the indexUid not found, the TSDB should tell query module to query raw TS data. - return TSDB_CODE_FAILED; - } - - // cache smaMeta - STSma *pSma = metaGetSmaInfoByIndex(REPO_META(pTsdb), indexUid, true); - if (!pSma) { - terrno = TSDB_CODE_TDB_NO_SMA_INDEX_IN_META; - taosHashCleanup(pItem->expiredWindows); - taosMemoryFree(pItem); - tsdbWarn("vgId:%d update expired window failed for smaIndex %" PRIi64 " since %s", REPO_ID(pTsdb), indexUid, - tstrerror(terrno)); - return TSDB_CODE_FAILED; - } - pItem->pSma = pSma; - - if (taosHashPut(pItemsHash, &indexUid, sizeof(indexUid), &pItem, sizeof(pItem)) != 0) { - // If error occurs during put smaStatItem, free the resources of pItem - taosHashCleanup(pItem->expiredWindows); - taosMemoryFree(pItem); - return TSDB_CODE_FAILED; - } - } else if (!(pItem = *(SSmaStatItem **)pItem)) { - terrno = TSDB_CODE_INVALID_PTR; - return TSDB_CODE_FAILED; - } - - if (taosHashPut(pItem->expiredWindows, &winSKey, sizeof(TSKEY), &version, sizeof(version)) != 0) { - // If error occurs during taosHashPut expired windows, remove the smaIndex from pTsdb->pSmaStat, thus TSDB would - // tell query module to query raw TS data. - // N.B. - // 1) It is assumed to be extemely little probability event of fail to taosHashPut. - // 2) This would solve the inconsistency to some extent, but not completely, unless we record all expired - // windows failed to put into hash table. - taosHashCleanup(pItem->expiredWindows); - taosMemoryFreeClear(pItem->pSma); - taosHashRemove(pItemsHash, &indexUid, sizeof(indexUid)); - tsdbWarn("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window fail", REPO_ID(pTsdb), indexUid, - winSKey); - return TSDB_CODE_FAILED; - } - - tsdbDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window succeed", REPO_ID(pTsdb), indexUid, - winSKey); - return TSDB_CODE_SUCCESS; -} - -/** - * @brief Update expired window according to msg from stream computing module. - * - * @param pTsdb - * @param msg SSubmitReq - * @return int32_t - */ -int32_t tsdbUpdateExpiredWindowImpl(STsdb *pTsdb, SSubmitReq *pMsg, int64_t version) { - // no time-range-sma, just return success - if (atomic_load_16(&REPO_TSMA_NUM(pTsdb)) <= 0) { - tsdbTrace("vgId:%d not update expire window since no tSma", REPO_ID(pTsdb)); - return TSDB_CODE_SUCCESS; - } - - if (!REPO_META(pTsdb)) { - terrno = TSDB_CODE_INVALID_PTR; - return TSDB_CODE_FAILED; - } - - if (tsdbCheckAndInitSmaEnv(pTsdb, TSDB_SMA_TYPE_TIME_RANGE) != TSDB_CODE_SUCCESS) { - terrno = TSDB_CODE_TDB_INIT_FAILED; - return TSDB_CODE_FAILED; - } - - // Firstly, assume that tSma can only be created on super table/normal table. - // getActiveTimeWindow - - SSmaEnv *pEnv = REPO_TSMA_ENV(pTsdb); - SSmaStat *pStat = SMA_ENV_STAT(pEnv); - SHashObj *pItemsHash = SMA_ENV_STAT_ITEMS(pEnv); - - TASSERT(pEnv && pStat && pItemsHash); - - // basic procedure - // TODO: optimization - tsdbRefSmaStat(pTsdb, pStat); - - SSubmitMsgIter msgIter = {0}; - SSubmitBlk *pBlock = NULL; - SInterval interval = {0}; - TSKEY lastWinSKey = INT64_MIN; - - if (tInitSubmitMsgIter(pMsg, &msgIter) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_FAILED; - } - - while (true) { - tGetSubmitMsgNext(&msgIter, &pBlock); - if (!pBlock) break; - - STSmaWrapper *pSW = NULL; - STSma *pTSma = NULL; - - SSubmitBlkIter blkIter = {0}; - if (tInitSubmitBlkIter(&msgIter, pBlock, &blkIter) != TSDB_CODE_SUCCESS) { - pSW = tdFreeTSmaWrapper(pSW); - break; - } - - while (true) { - STSRow *row = tGetSubmitBlkNext(&blkIter); - if (!row) { - tdFreeTSmaWrapper(pSW); - break; - } - if (!pSW || (pTSma->tableUid != pBlock->suid)) { - if (pSW) { - pSW = tdFreeTSmaWrapper(pSW); - } - if (!(pSW = metaGetSmaInfoByTable(REPO_META(pTsdb), pBlock->suid))) { - break; - } - if ((pSW->number) <= 0 || !pSW->tSma) { - pSW = tdFreeTSmaWrapper(pSW); - break; - } - - pTSma = pSW->tSma; - - interval.interval = pTSma->interval; - interval.intervalUnit = pTSma->intervalUnit; - interval.offset = pTSma->offset; - interval.precision = REPO_CFG(pTsdb)->precision; - interval.sliding = pTSma->sliding; - interval.slidingUnit = pTSma->slidingUnit; - } - - TSKEY winSKey = taosTimeTruncate(TD_ROW_KEY(row), &interval, interval.precision); - - if (lastWinSKey != winSKey) { - lastWinSKey = winSKey; - tsdbSetExpiredWindow(pTsdb, pItemsHash, pTSma->indexUid, winSKey, version); - } else { - tsdbDebug("vgId:%d smaIndex %" PRIi64 ", put skey %" PRIi64 " to expire window ignore as duplicated", - REPO_ID(pTsdb), pTSma->indexUid, winSKey); - } - } - } - - tsdbUnRefSmaStat(pTsdb, pStat); - - return TSDB_CODE_SUCCESS; -} - -/** - * @brief When sma data received from stream computing, make the relative expired window valid. - * - * @param pTsdb - * @param pStat - * @param indexUid - * @param skey - * @return int32_t - */ -static int32_t tsdbResetExpiredWindow(STsdb *pTsdb, SSmaStat *pStat, int64_t indexUid, TSKEY skey) { - SSmaStatItem *pItem = NULL; - - tsdbRefSmaStat(pTsdb, pStat); - - if (pStat && SMA_STAT_ITEMS(pStat)) { - pItem = taosHashGet(SMA_STAT_ITEMS(pStat), &indexUid, sizeof(indexUid)); - } - if ((pItem) && ((pItem = *(SSmaStatItem **)pItem))) { - // pItem resides in hash buffer all the time unless drop sma index - // TODO: multithread protect - if (taosHashRemove(pItem->expiredWindows, &skey, sizeof(TSKEY)) != 0) { - // error handling - tsdbUnRefSmaStat(pTsdb, pStat); - tsdbWarn("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " fail", REPO_ID(pTsdb), - skey, indexUid); - return TSDB_CODE_FAILED; - } - tsdbDebug("vgId:%d remove skey %" PRIi64 " from expired window for sma index %" PRIi64 " succeed", REPO_ID(pTsdb), - skey, indexUid); - // TODO: use a standalone interface to received state upate notification from stream computing module. - /** - * @brief state - * - When SMA env init in TSDB, its status is TSDB_SMA_STAT_OK. - * - In startup phase of stream computing module, it should notify the SMA env in TSDB to expired if needed(e.g. - * when batch data caculation not finised) - * - When TSDB_SMA_STAT_OK, the stream computing module should also notify that to the SMA env in TSDB. - */ - pItem->state = TSDB_SMA_STAT_OK; - } else { - // error handling - tsdbUnRefSmaStat(pTsdb, pStat); - tsdbWarn("vgId:%d expired window %" PRIi64 " not exists for sma index %" PRIi64, REPO_ID(pTsdb), skey, indexUid); - return TSDB_CODE_FAILED; - } - - tsdbUnRefSmaStat(pTsdb, pStat); - return TSDB_CODE_SUCCESS; -} - -/** - * @brief Judge the tSma storage level - * - * @param interval - * @param intervalUnit - * @return int32_t - */ -static int32_t tsdbGetSmaStorageLevel(int64_t interval, int8_t intervalUnit) { - // TODO: configurable for SMA_STORAGE_SPLIT_HOURS? - switch (intervalUnit) { - case TIME_UNIT_HOUR: - if (interval < SMA_STORAGE_SPLIT_HOURS) { - return SMA_STORAGE_LEVEL_DFILESET; - } - break; - case TIME_UNIT_MINUTE: - if (interval < 60 * SMA_STORAGE_SPLIT_HOURS) { - return SMA_STORAGE_LEVEL_DFILESET; - } - break; - case TIME_UNIT_SECOND: - if (interval < 3600 * SMA_STORAGE_SPLIT_HOURS) { - return SMA_STORAGE_LEVEL_DFILESET; - } - break; - case TIME_UNIT_MILLISECOND: - if (interval < 3600 * 1e3 * SMA_STORAGE_SPLIT_HOURS) { - return SMA_STORAGE_LEVEL_DFILESET; - } - break; - case TIME_UNIT_MICROSECOND: - if (interval < 3600 * 1e6 * SMA_STORAGE_SPLIT_HOURS) { - return SMA_STORAGE_LEVEL_DFILESET; - } - break; - case TIME_UNIT_NANOSECOND: - if (interval < 3600 * 1e9 * SMA_STORAGE_SPLIT_HOURS) { - return SMA_STORAGE_LEVEL_DFILESET; - } - break; - default: - break; - } - return SMA_STORAGE_LEVEL_TSDB; -} - -/** - * @brief Insert TSma data blocks to DB File build by B+Tree - * - * @param pSmaH - * @param smaKey tableUid-colId-skeyOfWindow(8-2-8) - * @param keyLen - * @param pData - * @param dataLen - * @return int32_t - */ -static int32_t tsdbInsertTSmaBlocks(STSmaWriteH *pSmaH, void *smaKey, int32_t keyLen, void *pData, int32_t dataLen, - TXN *txn) { - SDBFile *pDBFile = &pSmaH->dFile; - - // TODO: insert tsma data blocks into B+Tree(TTB) - if (tsdbSaveSmaToDB(pDBFile, smaKey, keyLen, pData, dataLen, txn) != 0) { - tsdbWarn("vgId:%d insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " fail", - REPO_ID(pSmaH->pTsdb), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen); - return TSDB_CODE_FAILED; - } - tsdbDebug("vgId:%d insert tsma data blocks into %s: smaKey %" PRIx64 "-%" PRIx64 ", dataLen %" PRIu32 " succeed", - REPO_ID(pSmaH->pTsdb), pDBFile->path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), dataLen); - -#ifdef _TEST_SMA_PRINT_DEBUG_LOG_ - uint32_t valueSize = 0; - void *data = tsdbGetSmaDataByKey(pDBFile, smaKey, keyLen, &valueSize); - ASSERT(data != NULL); - for (uint32_t v = 0; v < valueSize; v += 8) { - tsdbWarn("vgId:%d insert sma data val[%d] %" PRIi64, REPO_ID(pSmaH->pTsdb), v, *(int64_t *)POINTER_SHIFT(data, v)); - } -#endif - return TSDB_CODE_SUCCESS; -} - -/** - * @brief Approximate value for week/month/year. - * - * @param interval - * @param intervalUnit - * @param precision - * @param adjusted Interval already adjusted according to DB precision - * @return int64_t - */ -static int64_t tsdbGetIntervalByPrecision(int64_t interval, uint8_t intervalUnit, int8_t precision, bool adjusted) { - if (adjusted) { - return interval; - } - - switch (intervalUnit) { - case TIME_UNIT_YEAR: // approximate value - interval *= 365 * 86400 * 1e3; - break; - case TIME_UNIT_MONTH: // approximate value - interval *= 30 * 86400 * 1e3; - break; - case TIME_UNIT_WEEK: // approximate value - interval *= 7 * 86400 * 1e3; - break; - case TIME_UNIT_DAY: // the interval for tSma calculation must <= day - interval *= 86400 * 1e3; - break; - case TIME_UNIT_HOUR: - interval *= 3600 * 1e3; - break; - case TIME_UNIT_MINUTE: - interval *= 60 * 1e3; - break; - case TIME_UNIT_SECOND: - interval *= 1e3; - break; - default: - break; - } - - switch (precision) { - case TSDB_TIME_PRECISION_MILLI: - if (TIME_UNIT_MICROSECOND == intervalUnit) { // us - return interval / 1e3; - } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // nano second - return interval / 1e6; - } else { // ms - return interval; - } - break; - case TSDB_TIME_PRECISION_MICRO: - if (TIME_UNIT_MICROSECOND == intervalUnit) { // us - return interval; - } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns - return interval / 1e3; - } else { // ms - return interval * 1e3; - } - break; - case TSDB_TIME_PRECISION_NANO: - if (TIME_UNIT_MICROSECOND == intervalUnit) { // us - return interval * 1e3; - } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns - return interval; - } else { // ms - return interval * 1e6; - } - break; - default: // ms - if (TIME_UNIT_MICROSECOND == intervalUnit) { // us - return interval / 1e3; - } else if (TIME_UNIT_NANOSECOND == intervalUnit) { // ns - return interval / 1e6; - } else { // ms - return interval; - } - break; - } - return interval; -} - -static int32_t tsdbInitTSmaWriteH(STSmaWriteH *pSmaH, STsdb *pTsdb, const SArray *pDataBlocks, int64_t interval, - int8_t intervalUnit) { - pSmaH->pTsdb = pTsdb; - pSmaH->interval = tsdbGetIntervalByPrecision(interval, intervalUnit, REPO_CFG(pTsdb)->precision, true); - pSmaH->pDataBlocks = pDataBlocks; - pSmaH->dFile.fid = TSDB_IVLD_FID; - return TSDB_CODE_SUCCESS; -} - -static void tsdbDestroyTSmaWriteH(STSmaWriteH *pSmaH) { - if (pSmaH) { - tsdbCloseDBF(&pSmaH->dFile); - } -} - -static int32_t tsdbSetTSmaDataFile(STSmaWriteH *pSmaH, int64_t indexUid, int32_t fid) { - STsdb *pTsdb = pSmaH->pTsdb; - ASSERT(!pSmaH->dFile.path && !pSmaH->dFile.pDB); - - pSmaH->dFile.fid = fid; - char tSmaFile[TSDB_FILENAME_LEN] = {0}; - snprintf(tSmaFile, TSDB_FILENAME_LEN, "%" PRIi64 "%sv%df%d.tsma", indexUid, TD_DIRSEP, REPO_ID(pTsdb), fid); - pSmaH->dFile.path = strdup(tSmaFile); - - return TSDB_CODE_SUCCESS; -} - -/** - * @brief - * - * @param pTsdb - * @param interval Interval calculated by DB's precision - * @param storageLevel - * @return int32_t - */ -static int32_t tsdbGetTSmaDays(STsdb *pTsdb, int64_t interval, int32_t storageLevel) { - STsdbKeepCfg *pCfg = REPO_KEEP_CFG(pTsdb); - int32_t daysPerFile = pCfg->days; - - if (storageLevel == SMA_STORAGE_LEVEL_TSDB) { - int32_t days = SMA_STORAGE_TSDB_TIMES * (interval / tsTickPerMin[pCfg->precision]); - daysPerFile = days > SMA_STORAGE_TSDB_DAYS ? days : SMA_STORAGE_TSDB_DAYS; - } - - return daysPerFile; -} - -static int tsdbSmaBeginCommit(SSmaEnv *pEnv) { - TXN *pTxn = &pEnv->txn; - // start a new txn - tdbTxnOpen(pTxn, 0, poolMalloc, poolFree, pEnv->pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED); - if (tdbBegin(pEnv->dbEnv, pTxn) != 0) { - tsdbWarn("tsdbSma tdb begin commit fail"); - return -1; - } - return 0; -} - -static int tsdbSmaEndCommit(SSmaEnv *pEnv) { - TXN *pTxn = &pEnv->txn; - - // Commit current txn - if (tdbCommit(pEnv->dbEnv, pTxn) != 0) { - tsdbWarn("tsdbSma tdb end commit fail"); - return -1; - } - tdbTxnClose(pTxn); - clearPool(pEnv->pPool); - return 0; -} - -/** - * @brief Insert/Update Time-range-wise SMA data. - * - If interval < SMA_STORAGE_SPLIT_HOURS(e.g. 24), save the SMA data as a part of DFileSet to e.g. - * v3f1900.tsma.${sma_index_name}. The days is the same with that for TS data files. - * - If interval >= SMA_STORAGE_SPLIT_HOURS, save the SMA data to e.g. vnode3/tsma/v3f632.tsma.${sma_index_name}. The - * days is 30 times of the interval, and the minimum days is SMA_STORAGE_TSDB_DAYS(30d). - * - The destination file of one data block for some interval is determined by its start TS key. - * - * @param pTsdb - * @param msg - * @return int32_t - */ -static int32_t tsdbInsertTSmaDataImpl(STsdb *pTsdb, int64_t indexUid, const char *msg) { - STsdbCfg *pCfg = REPO_CFG(pTsdb); - const SArray *pDataBlocks = (const SArray *)msg; - - // TODO: destroy SSDataBlocks(msg) - - // For super table aggregation, the sma data is stored in vgroup calculated from the hash value of stable name. Thus - // the sma data would arrive ahead of the update-expired-window msg. - if (tsdbCheckAndInitSmaEnv(pTsdb, TSDB_SMA_TYPE_TIME_RANGE) != TSDB_CODE_SUCCESS) { - terrno = TSDB_CODE_TDB_INIT_FAILED; - return TSDB_CODE_FAILED; - } - - if (!pDataBlocks) { - terrno = TSDB_CODE_INVALID_PTR; - tsdbWarn("vgId:%d insert tSma data failed since pDataBlocks is NULL", REPO_ID(pTsdb)); - return terrno; - } - - if (taosArrayGetSize(pDataBlocks) <= 0) { - terrno = TSDB_CODE_INVALID_PARA; - tsdbWarn("vgId:%d insert tSma data failed since pDataBlocks is empty", REPO_ID(pTsdb)); - return TSDB_CODE_FAILED; - } - - SSmaEnv *pEnv = REPO_TSMA_ENV(pTsdb); - SSmaStat *pStat = SMA_ENV_STAT(pEnv); - SSmaStatItem *pItem = NULL; - - tsdbRefSmaStat(pTsdb, pStat); - - if (pStat && SMA_STAT_ITEMS(pStat)) { - pItem = taosHashGet(SMA_STAT_ITEMS(pStat), &indexUid, sizeof(indexUid)); - } - - if (!pItem || !(pItem = *(SSmaStatItem **)pItem) || tsdbSmaStatIsDropped(pItem)) { - terrno = TSDB_CODE_TDB_INVALID_SMA_STAT; - tsdbUnRefSmaStat(pTsdb, pStat); - return TSDB_CODE_FAILED; - } - - STSma *pSma = pItem->pSma; - STSmaWriteH tSmaH = {0}; - - if (tsdbInitTSmaWriteH(&tSmaH, pTsdb, pDataBlocks, pSma->interval, pSma->intervalUnit) != 0) { - return TSDB_CODE_FAILED; - } - - char rPath[TSDB_FILENAME_LEN] = {0}; - char aPath[TSDB_FILENAME_LEN] = {0}; - snprintf(rPath, TSDB_FILENAME_LEN, "%s%s%" PRIi64, SMA_ENV_PATH(pEnv), TD_DIRSEP, indexUid); - tfsAbsoluteName(REPO_TFS(pTsdb), SMA_ENV_DID(pEnv), rPath, aPath); - if (!taosCheckExistFile(aPath)) { - if (tfsMkdirRecurAt(REPO_TFS(pTsdb), rPath, SMA_ENV_DID(pEnv)) != TSDB_CODE_SUCCESS) { - tsdbUnRefSmaStat(pTsdb, pStat); - return TSDB_CODE_FAILED; - } - } - - // Step 1: Judge the storage level and days - int32_t storageLevel = tsdbGetSmaStorageLevel(pSma->interval, pSma->intervalUnit); - int32_t daysPerFile = tsdbGetTSmaDays(pTsdb, tSmaH.interval, storageLevel); - - char smaKey[SMA_KEY_LEN] = {0}; // key: skey + groupId - char dataBuf[512] = {0}; // val: aggr data // TODO: handle 512 buffer? - void *pDataBuf = NULL; - int32_t sz = taosArrayGetSize(pDataBlocks); - for (int32_t i = 0; i < sz; ++i) { - SSDataBlock *pDataBlock = taosArrayGet(pDataBlocks, i); - int32_t colNum = pDataBlock->info.numOfCols; - int32_t rows = pDataBlock->info.rows; - int32_t rowSize = pDataBlock->info.rowSize; - int64_t groupId = pDataBlock->info.groupId; - for (int32_t j = 0; j < rows; ++j) { - printf("|"); - TSKEY skey = TSKEY_INITIAL_VAL; // the start key of TS window by interval - void *pSmaKey = &smaKey; - bool isStartKey = false; - - int32_t tlen = 0; // reset the len - pDataBuf = &dataBuf; // reset the buf - for (int32_t k = 0; k < colNum; ++k) { - SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); - void *var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes); - switch (pColInfoData->info.type) { - case TSDB_DATA_TYPE_TIMESTAMP: - if (!isStartKey) { - isStartKey = true; - skey = *(TSKEY *)var; - printf("= skey %" PRIi64 " groupId = %" PRIi64 "|", skey, groupId); - tsdbEncodeTSmaKey(groupId, skey, &pSmaKey); - } else { - printf(" %" PRIi64 " |", *(int64_t *)var); - tlen += taosEncodeFixedI64(&pDataBuf, *(int64_t *)var); - break; - } - break; - case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_UTINYINT: - printf(" %15d |", *(uint8_t *)var); - tlen += taosEncodeFixedU8(&pDataBuf, *(uint8_t *)var); - break; - case TSDB_DATA_TYPE_TINYINT: - printf(" %15d |", *(int8_t *)var); - tlen += taosEncodeFixedI8(&pDataBuf, *(int8_t *)var); - break; - case TSDB_DATA_TYPE_SMALLINT: - printf(" %15d |", *(int16_t *)var); - tlen += taosEncodeFixedI16(&pDataBuf, *(int16_t *)var); - break; - case TSDB_DATA_TYPE_USMALLINT: - printf(" %15d |", *(uint16_t *)var); - tlen += taosEncodeFixedU16(&pDataBuf, *(uint16_t *)var); - break; - case TSDB_DATA_TYPE_INT: - printf(" %15d |", *(int32_t *)var); - tlen += taosEncodeFixedI32(&pDataBuf, *(int32_t *)var); - break; - case TSDB_DATA_TYPE_FLOAT: - printf(" %15f |", *(float *)var); - tlen += taosEncodeBinary(&pDataBuf, var, sizeof(float)); - break; - case TSDB_DATA_TYPE_UINT: - printf(" %15u |", *(uint32_t *)var); - tlen += taosEncodeFixedU32(&pDataBuf, *(uint32_t *)var); - break; - case TSDB_DATA_TYPE_BIGINT: - printf(" %15ld |", *(int64_t *)var); - tlen += taosEncodeFixedI64(&pDataBuf, *(int64_t *)var); - break; - case TSDB_DATA_TYPE_DOUBLE: - printf(" %15lf |", *(double *)var); - tlen += taosEncodeBinary(&pDataBuf, var, sizeof(double)); - case TSDB_DATA_TYPE_UBIGINT: - printf(" %15lu |", *(uint64_t *)var); - tlen += taosEncodeFixedU64(&pDataBuf, *(uint64_t *)var); - break; - case TSDB_DATA_TYPE_NCHAR: { - char tmpChar[100] = {0}; - strncpy(tmpChar, varDataVal(var), varDataLen(var)); - printf(" %s |", tmpChar); - tlen += taosEncodeBinary(&pDataBuf, varDataVal(var), varDataLen(var)); - break; - } - case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY - char tmpChar[100] = {0}; - strncpy(tmpChar, varDataVal(var), varDataLen(var)); - printf(" %s |", tmpChar); - tlen += taosEncodeBinary(&pDataBuf, varDataVal(var), varDataLen(var)); - break; - } - case TSDB_DATA_TYPE_VARBINARY: - // TODO: add binary/varbinary - TASSERT(0); - default: - printf("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type); - TASSERT(0); - break; - } - } - // if ((tlen > 0) && (skey != TSKEY_INITIAL_VAL)) { - if (tlen > 0) { - int32_t fid = (int32_t)(TSDB_KEY_FID(skey, daysPerFile, pCfg->precision)); - - // Step 2: Set the DFile for storage of SMA index, and iterate/split the TSma data and store to B+Tree index - // file - // - Set and open the DFile or the B+Tree file - // TODO: tsdbStartTSmaCommit(); - if (fid != tSmaH.dFile.fid) { - if (tSmaH.dFile.fid != TSDB_IVLD_FID) { - tsdbSmaEndCommit(pEnv); - tsdbCloseDBF(&tSmaH.dFile); - } - tsdbSetTSmaDataFile(&tSmaH, indexUid, fid); - if (tsdbOpenDBF(pEnv->dbEnv, &tSmaH.dFile) != 0) { - tsdbWarn("vgId:%d open DB file %s failed since %s", REPO_ID(pTsdb), - tSmaH.dFile.path ? tSmaH.dFile.path : "path is NULL", tstrerror(terrno)); - tsdbDestroyTSmaWriteH(&tSmaH); - tsdbUnRefSmaStat(pTsdb, pStat); - return TSDB_CODE_FAILED; - } - tsdbSmaBeginCommit(pEnv); - } - - if (tsdbInsertTSmaBlocks(&tSmaH, &smaKey, SMA_KEY_LEN, dataBuf, tlen, &pEnv->txn) != 0) { - tsdbWarn("vgId:%d insert tsma data blocks fail for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64 - " since %s", - REPO_ID(pTsdb), indexUid, skey, groupId, tstrerror(terrno)); - tsdbSmaEndCommit(pEnv); - tsdbDestroyTSmaWriteH(&tSmaH); - tsdbUnRefSmaStat(pTsdb, pStat); - return TSDB_CODE_FAILED; - } - tsdbDebug("vgId:%d insert tsma data blocks success for index %" PRIi64 ", skey %" PRIi64 ", groupId %" PRIi64, - REPO_ID(pTsdb), indexUid, skey, groupId); - // TODO:tsdbEndTSmaCommit(); - - // Step 3: reset the SSmaStat - tsdbResetExpiredWindow(pTsdb, pStat, indexUid, skey); - } else { - tsdbWarn("vgId:%d invalid data skey:%" PRIi64 ", tlen %" PRIi32 " during insert tSma data for %" PRIi64, - REPO_ID(pTsdb), skey, tlen, indexUid); - } - - printf("\n"); - } - } - tsdbSmaEndCommit(pEnv); // TODO: not commit for every insert - tsdbDestroyTSmaWriteH(&tSmaH); - tsdbUnRefSmaStat(pTsdb, pStat); - - return TSDB_CODE_SUCCESS; -} - -/** - * @brief Drop tSma data and local cache - * - insert/query reference - * @param pTsdb - * @param msg - * @return int32_t - */ -static int32_t tsdbDropTSmaDataImpl(STsdb *pTsdb, int64_t indexUid) { - SSmaEnv *pEnv = atomic_load_ptr(&REPO_TSMA_ENV(pTsdb)); - - // clear local cache - if (pEnv) { - tsdbDebug("vgId:%d drop tSma local cache for %" PRIi64, REPO_ID(pTsdb), indexUid); - - SSmaStatItem *pItem = taosHashGet(SMA_ENV_STAT_ITEMS(pEnv), &indexUid, sizeof(indexUid)); - if ((pItem) || ((pItem = *(SSmaStatItem **)pItem))) { - if (tsdbSmaStatIsDropped(pItem)) { - tsdbDebug("vgId:%d tSma stat is already dropped for %" PRIi64, REPO_ID(pTsdb), indexUid); - return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode - } - - tsdbWLockSma(pEnv); - if (tsdbSmaStatIsDropped(pItem)) { - tsdbUnLockSma(pEnv); - tsdbDebug("vgId:%d tSma stat is already dropped for %" PRIi64, REPO_ID(pTsdb), indexUid); - return TSDB_CODE_TDB_INVALID_ACTION; // TODO: duplicate drop msg would be intercepted by mnode - } - tsdbSmaStatSetDropped(pItem); - tsdbUnLockSma(pEnv); - - int32_t nSleep = 0; - int32_t refVal = INT32_MAX; - while (true) { - if ((refVal = T_REF_VAL_GET(SMA_ENV_STAT(pEnv))) <= 0) { - tsdbDebug("vgId:%d drop index %" PRIi64 " since refVal=%d", REPO_ID(pTsdb), indexUid, refVal); - break; - } - tsdbDebug("vgId:%d wait 1s to drop index %" PRIi64 " since refVal=%d", REPO_ID(pTsdb), indexUid, refVal); - taosSsleep(1); - if (++nSleep > SMA_DROP_EXPIRED_TIME) { - tsdbDebug("vgId:%d drop index %" PRIi64 " after wait %d (refVal=%d)", REPO_ID(pTsdb), indexUid, nSleep, - refVal); - break; - }; - } - - tsdbFreeSmaStatItem(pItem); - tsdbDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64 " in local cache", REPO_ID(pTsdb), indexUid); - } - } - // clear sma data files - // TODO: - return TSDB_CODE_SUCCESS; -} - -static int32_t tsdbSetRSmaDataFile(STSmaWriteH *pSmaH, int32_t fid) { - STsdb *pTsdb = pSmaH->pTsdb; - - char tSmaFile[TSDB_FILENAME_LEN] = {0}; - snprintf(tSmaFile, TSDB_FILENAME_LEN, "v%df%d.rsma", REPO_ID(pTsdb), fid); - pSmaH->dFile.path = strdup(tSmaFile); - - return TSDB_CODE_SUCCESS; -} - -static int32_t tsdbInsertRSmaDataImpl(STsdb *pTsdb, const char *msg) { - STsdbCfg *pCfg = REPO_CFG(pTsdb); - const SArray *pDataBlocks = (const SArray *)msg; - SSmaEnv *pEnv = atomic_load_ptr(&REPO_RSMA_ENV(pTsdb)); - int64_t indexUid = SMA_TEST_INDEX_UID; - - if (!pEnv) { - terrno = TSDB_CODE_INVALID_PTR; - tsdbWarn("vgId:%d insert rSma data failed since pTSmaEnv is NULL", REPO_ID(pTsdb)); - return terrno; - } - - if (!pDataBlocks) { - terrno = TSDB_CODE_INVALID_PTR; - tsdbWarn("vgId:%d insert rSma data failed since pDataBlocks is NULL", REPO_ID(pTsdb)); - return terrno; - } - - if (taosArrayGetSize(pDataBlocks) <= 0) { - terrno = TSDB_CODE_INVALID_PARA; - tsdbWarn("vgId:%d insert rSma data failed since pDataBlocks is empty", REPO_ID(pTsdb)); - return TSDB_CODE_FAILED; - } - - SSmaStat *pStat = SMA_ENV_STAT(pEnv); - SSmaStatItem *pItem = NULL; - - tsdbRefSmaStat(pTsdb, pStat); - - if (pStat && SMA_STAT_ITEMS(pStat)) { - pItem = taosHashGet(SMA_STAT_ITEMS(pStat), &indexUid, sizeof(indexUid)); - } - - if (!pItem || !(pItem = *(SSmaStatItem **)pItem) || tsdbSmaStatIsDropped(pItem)) { - terrno = TSDB_CODE_TDB_INVALID_SMA_STAT; - tsdbUnRefSmaStat(pTsdb, pStat); - return TSDB_CODE_FAILED; - } - - STSma *pSma = pItem->pSma; - - STSmaWriteH tSmaH = {0}; - - if (tsdbInitTSmaWriteH(&tSmaH, pTsdb, pDataBlocks, pSma->interval, pSma->intervalUnit) != 0) { - return TSDB_CODE_FAILED; - } - - char rPath[TSDB_FILENAME_LEN] = {0}; - char aPath[TSDB_FILENAME_LEN] = {0}; - snprintf(rPath, TSDB_FILENAME_LEN, "%s%s%" PRIi64, SMA_ENV_PATH(pEnv), TD_DIRSEP, indexUid); - tfsAbsoluteName(REPO_TFS(pTsdb), SMA_ENV_DID(pEnv), rPath, aPath); - if (!taosCheckExistFile(aPath)) { - if (tfsMkdirRecurAt(REPO_TFS(pTsdb), rPath, SMA_ENV_DID(pEnv)) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_FAILED; - } - } - - // Step 1: Judge the storage level and days - int32_t storageLevel = tsdbGetSmaStorageLevel(pSma->interval, pSma->intervalUnit); - int32_t daysPerFile = tsdbGetTSmaDays(pTsdb, tSmaH.interval, storageLevel); -#if 0 - int32_t fid = (int32_t)(TSDB_KEY_FID(pData->skey, daysPerFile, pCfg->precision)); - - // Step 2: Set the DFile for storage of SMA index, and iterate/split the TSma data and store to B+Tree index file - // - Set and open the DFile or the B+Tree file - // TODO: tsdbStartTSmaCommit(); - tsdbSetTSmaDataFile(&tSmaH, pData, indexUid, fid); - if (tsdbOpenDBF(pTsdb->pTSmaEnv->dbEnv, &tSmaH.dFile) != 0) { - tsdbWarn("vgId:%d open DB file %s failed since %s", REPO_ID(pTsdb), - tSmaH.dFile.path ? tSmaH.dFile.path : "path is NULL", tstrerror(terrno)); - tsdbDestroyTSmaWriteH(&tSmaH); - return TSDB_CODE_FAILED; - } - - if (tsdbInsertTSmaDataSection(&tSmaH, pData) != 0) { - tsdbWarn("vgId:%d insert tSma data section failed since %s", REPO_ID(pTsdb), tstrerror(terrno)); - tsdbDestroyTSmaWriteH(&tSmaH); - return TSDB_CODE_FAILED; - } - // TODO:tsdbEndTSmaCommit(); - - // Step 3: reset the SSmaStat - tsdbResetExpiredWindow(pTsdb, SMA_ENV_STAT(pTsdb->pTSmaEnv), pData->indexUid, pData->skey); -#endif - - tsdbDestroyTSmaWriteH(&tSmaH); - tsdbUnRefSmaStat(pTsdb, pStat); - return TSDB_CODE_SUCCESS; -} - -/** - * @brief - * - * @param pSmaH - * @param pTsdb - * @param interval - * @param intervalUnit - * @return int32_t - */ -static int32_t tsdbInitTSmaReadH(STSmaReadH *pSmaH, STsdb *pTsdb, int64_t interval, int8_t intervalUnit) { - pSmaH->pTsdb = pTsdb; - pSmaH->interval = tsdbGetIntervalByPrecision(interval, intervalUnit, REPO_CFG(pTsdb)->precision, true); - pSmaH->storageLevel = tsdbGetSmaStorageLevel(interval, intervalUnit); - pSmaH->days = tsdbGetTSmaDays(pTsdb, pSmaH->interval, pSmaH->storageLevel); - return TSDB_CODE_SUCCESS; -} - -/** - * @brief Init of tSma FS - * - * @param pReadH - * @param indexUid - * @param skey - * @return int32_t - */ -static int32_t tsdbInitTSmaFile(STSmaReadH *pSmaH, int64_t indexUid, TSKEY skey) { - STsdb *pTsdb = pSmaH->pTsdb; - - int32_t fid = (int32_t)(TSDB_KEY_FID(skey, pSmaH->days, REPO_CFG(pTsdb)->precision)); - char tSmaFile[TSDB_FILENAME_LEN] = {0}; - snprintf(tSmaFile, TSDB_FILENAME_LEN, "%" PRIi64 "%sv%df%d.tsma", indexUid, TD_DIRSEP, REPO_ID(pTsdb), fid); - pSmaH->dFile.path = strdup(tSmaFile); - pSmaH->smaFsIter.iter = 0; - pSmaH->smaFsIter.fid = fid; - return TSDB_CODE_SUCCESS; -} - -/** - * @brief Set and open tSma file if it has key locates in queryWin. - * - * @param pReadH - * @param param - * @param queryWin - * @return true - * @return false - */ -static bool tsdbSetAndOpenTSmaFile(STSmaReadH *pReadH, TSKEY *queryKey) { - SArray *smaFs = pReadH->pTsdb->fs->cstatus->sf; - int32_t nSmaFs = taosArrayGetSize(smaFs); - - tsdbCloseDBF(&pReadH->dFile); - -#if 0 - while (pReadH->smaFsIter.iter < nSmaFs) { - void *pSmaFile = taosArrayGet(smaFs, pReadH->smaFsIter.iter); - if (pSmaFile) { // match(indexName, queryWindow) - // TODO: select the file by index_name ... - pReadH->dFile = pSmaFile; - ++pReadH->smaFsIter.iter; - break; - } - ++pReadH->smaFsIter.iter; - } - - if (pReadH->pDFile) { - tsdbDebug("vg%d: smaFile %s matched", REPO_ID(pReadH->pTsdb), "[pSmaFile dir]"); - return true; - } -#endif - - return false; -} - -/** - * @brief - * - * @param pTsdb Return the data between queryWin and fill the pData. - * @param pData - * @param indexUid - * @param pQuerySKey - * @param nMaxResult The query invoker should control the nMaxResult need to return to avoid OOM. - * @return int32_t - */ -static int32_t tsdbGetTSmaDataImpl(STsdb *pTsdb, char *pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult) { - SSmaEnv *pEnv = atomic_load_ptr(&REPO_TSMA_ENV(pTsdb)); - SSmaStat *pStat = NULL; - - if (!pEnv) { - terrno = TSDB_CODE_INVALID_PTR; - tsdbWarn("vgId:%d getTSmaDataImpl failed since pTSmaEnv is NULL", REPO_ID(pTsdb)); - return TSDB_CODE_FAILED; - } - - pStat = SMA_ENV_STAT(pEnv); - - tsdbRefSmaStat(pTsdb, pStat); - SSmaStatItem *pItem = taosHashGet(SMA_ENV_STAT_ITEMS(pEnv), &indexUid, sizeof(indexUid)); - if (!pItem || !(pItem = *(SSmaStatItem **)pItem)) { - // Normally pItem should not be NULL, mark all windows as expired and notify query module to fetch raw TS data if - // it's NULL. - tsdbUnRefSmaStat(pTsdb, pStat); - terrno = TSDB_CODE_TDB_INVALID_ACTION; - tsdbDebug("vgId:%d getTSmaDataImpl failed since no index %" PRIi64, REPO_ID(pTsdb), indexUid); - return TSDB_CODE_FAILED; - } - -#if 0 - int32_t nQueryWin = taosArrayGetSize(pQuerySKey); - for (int32_t n = 0; n < nQueryWin; ++n) { - TSKEY skey = taosArrayGet(pQuerySKey, n); - if (taosHashGet(pItem->expiredWindows, &skey, sizeof(TSKEY))) { - // TODO: mark this window as expired. - } - } -#endif - -#if 1 - int8_t smaStat = 0; - if (!tsdbSmaStatIsOK(pItem, &smaStat)) { // TODO: multiple check for large scale sma query - tsdbUnRefSmaStat(pTsdb, pStat); - terrno = TSDB_CODE_TDB_INVALID_SMA_STAT; - tsdbWarn("vgId:%d getTSmaDataImpl failed from index %" PRIi64 " since %s %" PRIi8, REPO_ID(pTsdb), indexUid, - tstrerror(terrno), smaStat); - return TSDB_CODE_FAILED; - } - - if (taosHashGet(pItem->expiredWindows, &querySKey, sizeof(TSKEY))) { - // TODO: mark this window as expired. - tsdbDebug("vgId:%d skey %" PRIi64 " of window exists in expired window for index %" PRIi64, REPO_ID(pTsdb), - querySKey, indexUid); - } else { - tsdbDebug("vgId:%d skey %" PRIi64 " of window not in expired window for index %" PRIi64, REPO_ID(pTsdb), querySKey, - indexUid); - } - - STSma *pTSma = pItem->pSma; -#endif - - STSmaReadH tReadH = {0}; - tsdbInitTSmaReadH(&tReadH, pTsdb, pTSma->interval, pTSma->intervalUnit); - tsdbCloseDBF(&tReadH.dFile); - - tsdbUnRefSmaStat(pTsdb, pStat); - - tsdbInitTSmaFile(&tReadH, indexUid, querySKey); - if (tsdbOpenDBF(pEnv->dbEnv, &tReadH.dFile) != 0) { - tsdbWarn("vgId:%d open DBF %s failed since %s", REPO_ID(pTsdb), tReadH.dFile.path, tstrerror(terrno)); - return TSDB_CODE_FAILED; - } - - char smaKey[SMA_KEY_LEN] = {0}; - void *pSmaKey = &smaKey; - int64_t queryGroupId = 1; - tsdbEncodeTSmaKey(queryGroupId, querySKey, (void **)&pSmaKey); - - tsdbDebug("vgId:%d get sma data from %s: smaKey %" PRIx64 "-%" PRIx64 ", keyLen %d", REPO_ID(pTsdb), - tReadH.dFile.path, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), SMA_KEY_LEN); - - void *result = NULL; - int32_t valueSize = 0; - if (!(result = tsdbGetSmaDataByKey(&tReadH.dFile, smaKey, SMA_KEY_LEN, &valueSize))) { - tsdbWarn("vgId:%d get sma data failed from smaIndex %" PRIi64 ", smaKey %" PRIx64 "-%" PRIx64 " since %s", - REPO_ID(pTsdb), indexUid, *(int64_t *)smaKey, *(int64_t *)POINTER_SHIFT(smaKey, 8), tstrerror(terrno)); - tsdbCloseDBF(&tReadH.dFile); - return TSDB_CODE_FAILED; - } - -#ifdef _TEST_SMA_PRINT_DEBUG_LOG_ - for (uint32_t v = 0; v < valueSize; v += 8) { - tsdbWarn("vgId:%d get sma data v[%d]=%" PRIi64, REPO_ID(pTsdb), v, *(int64_t *)POINTER_SHIFT(result, v)); - } -#endif - taosMemoryFreeClear(result); // TODO: fill the result to output - -#if 0 - int32_t nResult = 0; - int64_t lastKey = 0; - - while (true) { - if (nResult >= nMaxResult) { - break; - } - - // set and open the file according to the STSma param - if (tsdbSetAndOpenTSmaFile(&tReadH, queryWin)) { - char bTree[100] = "\0"; - while (strncmp(bTree, "has more nodes", 100) == 0) { - if (nResult >= nMaxResult) { - break; - } - // tsdbGetDataFromBTree(bTree, queryWin, lastKey) - // fill the pData - ++nResult; - } - } - } -#endif - // read data from file and fill the result - tsdbCloseDBF(&tReadH.dFile); - return TSDB_CODE_SUCCESS; -} - -int32_t tsdbCreateTSma(STsdb *pTsdb, char *pMsg) { - SSmaCfg vCreateSmaReq = {0}; - if (!tDeserializeSVCreateTSmaReq(pMsg, &vCreateSmaReq)) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - tsdbWarn("vgId:%d tsma create msg received but deserialize failed since %s", REPO_ID(pTsdb), terrstr(terrno)); - return -1; - } - - tsdbDebug("vgId:%d tsma create msg %s:%" PRIi64 " for table %" PRIi64 " received", REPO_ID(pTsdb), - vCreateSmaReq.tSma.indexName, vCreateSmaReq.tSma.indexUid, vCreateSmaReq.tSma.tableUid); - - // record current timezone of server side - vCreateSmaReq.tSma.timezoneInt = tsTimezone; - - if (metaCreateTSma(REPO_META(pTsdb), &vCreateSmaReq) < 0) { - // TODO: handle error - tsdbWarn("vgId:%d tsma %s:%" PRIi64 " create failed for table %" PRIi64 " since %s", REPO_ID(pTsdb), - vCreateSmaReq.tSma.indexName, vCreateSmaReq.tSma.indexUid, vCreateSmaReq.tSma.tableUid, terrstr(terrno)); - tdDestroyTSma(&vCreateSmaReq.tSma); - return -1; - } - - tsdbTSmaAdd(pTsdb, 1); - - tdDestroyTSma(&vCreateSmaReq.tSma); - // TODO: return directly or go on follow steps? - return TSDB_CODE_SUCCESS; -} - -int32_t tsdbDropTSma(STsdb *pTsdb, char *pMsg) { - SVDropTSmaReq vDropSmaReq = {0}; - if (!tDeserializeSVDropTSmaReq(pMsg, &vDropSmaReq)) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - - // TODO: send msg to stream computing to drop tSma - // if ((send msg to stream computing) < 0) { - // tdDestroyTSma(&vCreateSmaReq); - // return -1; - // } - // - - if (metaDropTSma(REPO_META(pTsdb), vDropSmaReq.indexUid) < 0) { - // TODO: handle error - return -1; - } - - if (tsdbDropTSmaData(pTsdb, vDropSmaReq.indexUid) < 0) { - // TODO: handle error - return -1; - } - - tsdbTSmaSub(pTsdb, 1); - - // TODO: return directly or go on follow steps? - return TSDB_CODE_SUCCESS; -} - -/** - * @brief Check and init qTaskInfo_t, only applicable to stable with SRSmaParam. - * - * @param pTsdb - * @param pMeta - * @param pReq - * @return int32_t - */ -int32_t tsdbRegisterRSma(STsdb *pTsdb, SMeta *pMeta, SVCreateStbReq *pReq, SMsgCb *pMsgCb) { - if (!pReq->rollup) { - tsdbDebug("vgId:%d return directly since no rollup for stable %s %" PRIi64, REPO_ID(pTsdb), pReq->name, pReq->suid); - return TSDB_CODE_SUCCESS; - } - - SRSmaParam *param = &pReq->pRSmaParam; - - if ((param->qmsg1Len == 0) && (param->qmsg2Len == 0)) { - tsdbWarn("vgId:%d no qmsg1/qmsg2 for rollup stable %s %" PRIi64, REPO_ID(pTsdb), pReq->name, pReq->suid); - return TSDB_CODE_SUCCESS; - } - - if (tsdbCheckAndInitSmaEnv(pTsdb, TSDB_SMA_TYPE_ROLLUP) != TSDB_CODE_SUCCESS) { - terrno = TSDB_CODE_TDB_INIT_FAILED; - return TSDB_CODE_FAILED; - } - - SSmaEnv *pEnv = REPO_RSMA_ENV(pTsdb); - SSmaStat *pStat = SMA_ENV_STAT(pEnv); - SRSmaInfo *pRSmaInfo = NULL; - - pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), &pReq->suid, sizeof(tb_uid_t)); - if (pRSmaInfo) { - tsdbWarn("vgId:%d rsma info already exists for stb: %s, %" PRIi64, REPO_ID(pTsdb), pReq->name, pReq->suid); - return TSDB_CODE_SUCCESS; - } - - pRSmaInfo = (SRSmaInfo *)taosMemoryCalloc(1, sizeof(SRSmaInfo)); - if (!pRSmaInfo) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return TSDB_CODE_FAILED; - } - - STqReadHandle *pReadHandle = tqInitSubmitMsgScanner(pMeta); - if (!pReadHandle) { - taosMemoryFree(pRSmaInfo); - terrno = TSDB_CODE_OUT_OF_MEMORY; - return TSDB_CODE_FAILED; - } - - SReadHandle handle = { - .reader = pReadHandle, - .meta = pMeta, - .pMsgCb = pMsgCb, - }; - - if (param->qmsg1) { - pRSmaInfo->taskInfo[0] = qCreateStreamExecTaskInfo(param->qmsg1, &handle); - if (!pRSmaInfo->taskInfo[0]) { - taosMemoryFree(pRSmaInfo); - taosMemoryFree(pReadHandle); - return TSDB_CODE_FAILED; - } - } - - if (param->qmsg2) { - pRSmaInfo->taskInfo[1] = qCreateStreamExecTaskInfo(param->qmsg2, &handle); - if (!pRSmaInfo->taskInfo[1]) { - taosMemoryFree(pRSmaInfo); - taosMemoryFree(pReadHandle); - return TSDB_CODE_FAILED; - } - } - - if (taosHashPut(SMA_STAT_INFO_HASH(pStat), &pReq->suid, sizeof(tb_uid_t), &pRSmaInfo, sizeof(pRSmaInfo)) != - TSDB_CODE_SUCCESS) { - return TSDB_CODE_FAILED; - } else { - tsdbDebug("vgId:%d register rsma info succeed for suid:%" PRIi64, REPO_ID(pTsdb), pReq->suid); - } - - return TSDB_CODE_SUCCESS; -} - -/** - * @brief store suid/[uids], prefer to use array and then hash - * - * @param pStore - * @param suid - * @param uid - * @return int32_t - */ -static int32_t tsdbUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid) { - // prefer to store suid/uids in array - if ((suid == pStore->suid) || (pStore->suid == 0)) { - if (pStore->suid == 0) { - pStore->suid = suid; - } - if (uid) { - if (!pStore->tbUids) { - if (!(pStore->tbUids = taosArrayInit(1, sizeof(tb_uid_t)))) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return TSDB_CODE_FAILED; - } - } - if (!taosArrayPush(pStore->tbUids, uid)) { - return TSDB_CODE_FAILED; - } - } - } else { - // store other suid/uids in hash when multiple stable/table included in 1 batch of request - if (!pStore->uidHash) { - pStore->uidHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK); - if (!pStore->uidHash) { - return TSDB_CODE_FAILED; - } - } - if (uid) { - SArray *uidArray = taosHashGet(pStore->uidHash, &suid, sizeof(tb_uid_t)); - if (uidArray && ((uidArray = *(SArray **)uidArray))) { - taosArrayPush(uidArray, uid); - } else { - SArray *pUidArray = taosArrayInit(1, sizeof(tb_uid_t)); - if (!pUidArray) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return TSDB_CODE_FAILED; - } - if (!taosArrayPush(pUidArray, uid)) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return TSDB_CODE_FAILED; - } - if (taosHashPut(pStore->uidHash, &suid, sizeof(suid), &pUidArray, sizeof(pUidArray)) != 0) { - return TSDB_CODE_FAILED; - } - } - } else { - if (taosHashPut(pStore->uidHash, &suid, sizeof(suid), NULL, 0) != 0) { - return TSDB_CODE_FAILED; - } - } - } - return TSDB_CODE_SUCCESS; -} - -void tsdbUidStoreDestory(STbUidStore *pStore) { - if (pStore) { - if (pStore->uidHash) { - if (pStore->tbUids) { - // When pStore->tbUids not NULL, the pStore->uidHash has k/v; otherwise pStore->uidHash only has keys. - void *pIter = taosHashIterate(pStore->uidHash, NULL); - while (pIter) { - SArray *arr = *(SArray **)pIter; - taosArrayDestroy(arr); - pIter = taosHashIterate(pStore->uidHash, pIter); - } - } - taosHashCleanup(pStore->uidHash); - } - taosArrayDestroy(pStore->tbUids); - } -} - -void *tsdbUidStoreFree(STbUidStore *pStore) { - if (pStore) { - tsdbUidStoreDestory(pStore); - taosMemoryFree(pStore); - } - return NULL; -} - -/** - * @brief fetch suid/uids when create child tables of rollup SMA - * - * @param pTsdb - * @param ppStore - * @param suid - * @param uid - * @return int32_t - */ -int32_t tsdbFetchTbUidList(STsdb *pTsdb, STbUidStore **ppStore, tb_uid_t suid, tb_uid_t uid) { - SSmaEnv *pEnv = REPO_RSMA_ENV((STsdb *)pTsdb); - - // only applicable to rollup SMA ctables - if (!pEnv) { - return TSDB_CODE_SUCCESS; - } - - SSmaStat *pStat = SMA_ENV_STAT(pEnv); - SHashObj *infoHash = NULL; - if (!pStat || !(infoHash = SMA_STAT_INFO_HASH(pStat))) { - terrno = TSDB_CODE_TDB_INVALID_SMA_STAT; - return TSDB_CODE_FAILED; - } - - // info cached when create rsma stable and return directly for non-rsma ctables - if (!taosHashGet(infoHash, &suid, sizeof(tb_uid_t))) { - return TSDB_CODE_SUCCESS; - } - - ASSERT(ppStore != NULL); - - if (!(*ppStore)) { - if (tsdbUidStoreInit(ppStore) != 0) { - return TSDB_CODE_FAILED; - } - } - - if (tsdbUidStorePut(*ppStore, suid, &uid) != 0) { - *ppStore = tsdbUidStoreFree(*ppStore); - return TSDB_CODE_FAILED; - } - - return TSDB_CODE_SUCCESS; -} - -static FORCE_INLINE int32_t tsdbUpdateTbUidListImpl(STsdb *pTsdb, tb_uid_t *suid, SArray *tbUids) { - SSmaEnv *pEnv = REPO_RSMA_ENV(pTsdb); - SSmaStat *pStat = SMA_ENV_STAT(pEnv); - SRSmaInfo *pRSmaInfo = NULL; - - if (!suid || !tbUids) { - terrno = TSDB_CODE_INVALID_PTR; - tsdbError("vgId:%d failed to get rsma info for uid:%" PRIi64 " since %s", REPO_ID(pTsdb), *suid, terrstr(terrno)); - return TSDB_CODE_FAILED; - } - - pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), suid, sizeof(tb_uid_t)); - if (!pRSmaInfo || !(pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) { - tsdbError("vgId:%d failed to get rsma info for uid:%" PRIi64, REPO_ID(pTsdb), *suid); - terrno = TSDB_CODE_TDB_INVALID_SMA_STAT; - return TSDB_CODE_FAILED; - } - - if (pRSmaInfo->taskInfo[0] && (qUpdateQualifiedTableId(pRSmaInfo->taskInfo[0], tbUids, true) != 0)) { - tsdbError("vgId:%d update tbUidList failed for uid:%" PRIi64 " since %s", REPO_ID(pTsdb), *suid, terrstr(terrno)); - return TSDB_CODE_FAILED; - } else { - tsdbDebug("vgId:%d update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, REPO_ID(pTsdb), - pRSmaInfo->taskInfo[0], *suid, *(int64_t *)taosArrayGet(tbUids, 0)); - } - - if (pRSmaInfo->taskInfo[1] && (qUpdateQualifiedTableId(pRSmaInfo->taskInfo[1], tbUids, true) != 0)) { - tsdbError("vgId:%d update tbUidList failed for uid:%" PRIi64 " since %s", REPO_ID(pTsdb), *suid, terrstr(terrno)); - return TSDB_CODE_FAILED; - } else { - tsdbDebug("vgId:%d update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, REPO_ID(pTsdb), - pRSmaInfo->taskInfo[1], *suid, *(int64_t *)taosArrayGet(tbUids, 0)); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t tsdbUpdateTbUidList(STsdb *pTsdb, STbUidStore *pStore) { - if (!pStore || (taosArrayGetSize(pStore->tbUids) == 0)) { - return TSDB_CODE_SUCCESS; - } - - if (tsdbUpdateTbUidListImpl(pTsdb, &pStore->suid, pStore->tbUids) != TSDB_CODE_SUCCESS) { - return TSDB_CODE_FAILED; - } - - void *pIter = taosHashIterate(pStore->uidHash, NULL); - while (pIter) { - tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); - SArray *pTbUids = *(SArray **)pIter; - - if (tsdbUpdateTbUidListImpl(pTsdb, pTbSuid, pTbUids) != TSDB_CODE_SUCCESS) { - taosHashCancelIterate(pStore->uidHash, pIter); - return TSDB_CODE_FAILED; - } - - pIter = taosHashIterate(pStore->uidHash, pIter); - } - return TSDB_CODE_SUCCESS; -} - -static int32_t tsdbProcessSubmitReq(STsdb *pTsdb, int64_t version, void *pReq) { - if (!pReq) { - terrno = TSDB_CODE_INVALID_PTR; - return TSDB_CODE_FAILED; - } - - SSubmitReq *pSubmitReq = (SSubmitReq *)pReq; - - if (tsdbInsertData(pTsdb, version, pSubmitReq, NULL) < 0) { - return TSDB_CODE_FAILED; - } - - return TSDB_CODE_SUCCESS; -} - -static int32_t tsdbFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) { - ASSERT(pMsg != NULL); - SSubmitMsgIter msgIter = {0}; - SSubmitBlk *pBlock = NULL; - SSubmitBlkIter blkIter = {0}; - STSRow *row = NULL; - - terrno = TSDB_CODE_SUCCESS; - - if (tInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1; - while (true) { - if (tGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1; - - if (!pBlock) break; - tsdbUidStorePut(pStore, msgIter.suid, NULL); - pStore->uid = msgIter.uid; // TODO: remove, just for debugging - } - - if (terrno != TSDB_CODE_SUCCESS) return -1; - return 0; -} - -static FORCE_INLINE int32_t tsdbExecuteRSmaImpl(STsdb *pTsdb, const void *pMsg, int32_t inputType, - qTaskInfo_t *taskInfo, STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid, - int8_t level) { - SArray *pResult = NULL; - - if (!taskInfo) { - tsdbDebug("vgId:%d no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, REPO_ID(pTsdb), level, suid); - return TSDB_CODE_SUCCESS; - } - - tsdbDebug("vgId:%d execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, REPO_ID(pTsdb), level, taskInfo, - suid); - - qSetStreamInput(taskInfo, pMsg, inputType); - while (1) { - SSDataBlock *output = NULL; - uint64_t ts; - if (qExecTask(taskInfo, &output, &ts) < 0) { - ASSERT(false); - } - if (!output) { - break; - } - if (!pResult) { - pResult = taosArrayInit(0, sizeof(SSDataBlock)); - if (!pResult) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return TSDB_CODE_FAILED; - } - } - - taosArrayPush(pResult, output); - } - - if (taosArrayGetSize(pResult) > 0) { - blockDebugShowData(pResult); - STsdb *sinkTsdb = (level == TSDB_RETENTION_L1 ? pTsdb->pVnode->pRSma1 : pTsdb->pVnode->pRSma2); - SSubmitReq *pReq = NULL; - if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, TD_VID(pTsdb->pVnode), uid, suid) != 0) { - taosArrayDestroy(pResult); - return TSDB_CODE_FAILED; - } - if (tsdbProcessSubmitReq(sinkTsdb, INT64_MAX, pReq) != 0) { - taosArrayDestroy(pResult); - taosMemoryFreeClear(pReq); - return TSDB_CODE_FAILED; - } - taosMemoryFreeClear(pReq); - } else { - tsdbWarn("vgId:%d no rsma % " PRIi8 " data generated since %s", REPO_ID(pTsdb), level, tstrerror(terrno)); - } - - taosArrayDestroy(pResult); - - return TSDB_CODE_SUCCESS; -} - -static int32_t tsdbExecuteRSma(STsdb *pTsdb, const void *pMsg, int32_t inputType, tb_uid_t suid, tb_uid_t uid) { - SSmaEnv *pEnv = REPO_RSMA_ENV(pTsdb); - if (!pEnv) { - // only applicable when rsma env exists - return TSDB_CODE_SUCCESS; - } - - ASSERT(uid != 0); // TODO: remove later - - SSmaStat *pStat = SMA_ENV_STAT(pEnv); - SRSmaInfo *pRSmaInfo = NULL; - - pRSmaInfo = taosHashGet(SMA_STAT_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); - - if (!pRSmaInfo || !(pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) { - tsdbDebug("vgId:%d no rsma info for suid:%" PRIu64, REPO_ID(pTsdb), suid); - return TSDB_CODE_SUCCESS; - } - if (!pRSmaInfo->taskInfo[0]) { - tsdbDebug("vgId:%d no rsma qTaskInfo for suid:%" PRIu64, REPO_ID(pTsdb), suid); - return TSDB_CODE_SUCCESS; - } - - if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) { - // TODO: use the proper schema instead of 0, and cache STSchema in cache - STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, suid, 1); - if (!pTSchema) { - terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; - return TSDB_CODE_FAILED; - } - tsdbExecuteRSmaImpl(pTsdb, pMsg, inputType, pRSmaInfo->taskInfo[0], pTSchema, suid, uid, TSDB_RETENTION_L1); - tsdbExecuteRSmaImpl(pTsdb, pMsg, inputType, pRSmaInfo->taskInfo[1], pTSchema, suid, uid, TSDB_RETENTION_L2); - taosMemoryFree(pTSchema); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t tsdbTriggerRSma(STsdb *pTsdb, void *pMsg, int32_t inputType) { - SSmaEnv *pEnv = REPO_RSMA_ENV(pTsdb); - if (!pEnv) { - // only applicable when rsma env exists - return TSDB_CODE_SUCCESS; - } - - if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) { - STbUidStore uidStore = {0}; - tsdbFetchSubmitReqSuids(pMsg, &uidStore); - - if (uidStore.suid != 0) { - tsdbExecuteRSma(pTsdb, pMsg, inputType, uidStore.suid, uidStore.uid); - - void *pIter = taosHashIterate(uidStore.uidHash, NULL); - while (pIter) { - tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); - tsdbExecuteRSma(pTsdb, pMsg, inputType, *pTbSuid, 0); - pIter = taosHashIterate(uidStore.uidHash, pIter); - } - - tsdbUidStoreDestory(&uidStore); - } - } - return TSDB_CODE_SUCCESS; -} - -#if 0 -/** - * @brief Get the start TS key of the last data block of one interval/sliding. - * - * @param pTsdb - * @param param - * @param result - * @return int32_t - * 1) Return 0 and fill the result if the check procedure is normal; - * 2) Return -1 if error occurs during the check procedure. - */ -int32_t tsdbGetTSmaStatus(STsdb *pTsdb, void *smaIndex, void *result) { - const char *procedure = ""; - if (strncmp(procedure, "get the start TS key of the last data block", 100) != 0) { - return -1; - } - // fill the result - return TSDB_CODE_SUCCESS; -} - -/** - * @brief Remove the tSma data files related to param between pWin. - * - * @param pTsdb - * @param param - * @param pWin - * @return int32_t - */ -int32_t tsdbRemoveTSmaData(STsdb *pTsdb, void *smaIndex, STimeWindow *pWin) { - // for ("tSmaFiles of param-interval-sliding between pWin") { - // // remove the tSmaFile - // } - return TSDB_CODE_SUCCESS; -} -#endif - -// TODO: Who is responsible for resource allocate and release? -int32_t tsdbInsertTSmaData(STsdb *pTsdb, int64_t indexUid, const char *msg) { - int32_t code = TSDB_CODE_SUCCESS; - if ((code = tsdbInsertTSmaDataImpl(pTsdb, indexUid, msg)) < 0) { - tsdbWarn("vgId:%d insert tSma data failed since %s", REPO_ID(pTsdb), tstrerror(terrno)); - } - // TODO: destroy SSDataBlocks(msg) - return code; -} - -int32_t tsdbUpdateSmaWindow(STsdb *pTsdb, SSubmitReq *pMsg, int64_t version) { - int32_t code = TSDB_CODE_SUCCESS; - if ((code = tsdbUpdateExpiredWindowImpl(pTsdb, pMsg, version)) < 0) { - tsdbWarn("vgId:%d update expired sma window failed since %s", REPO_ID(pTsdb), tstrerror(terrno)); - } - return code; -} - -int32_t tsdbInsertRSmaData(STsdb *pTsdb, char *msg) { - int32_t code = TSDB_CODE_SUCCESS; - if ((code = tsdbInsertRSmaDataImpl(pTsdb, msg)) < 0) { - tsdbWarn("vgId:%d insert rSma data failed since %s", REPO_ID(pTsdb), tstrerror(terrno)); - } - return code; -} - -int32_t tsdbGetTSmaData(STsdb *pTsdb, char *pData, int64_t indexUid, TSKEY querySKey, int32_t nMaxResult) { - int32_t code = TSDB_CODE_SUCCESS; - if ((code = tsdbGetTSmaDataImpl(pTsdb, pData, indexUid, querySKey, nMaxResult)) < 0) { - tsdbWarn("vgId:%d get tSma data failed since %s", REPO_ID(pTsdb), tstrerror(terrno)); - } - return code; -} - -int32_t tsdbDropTSmaData(STsdb *pTsdb, int64_t indexUid) { - int32_t code = TSDB_CODE_SUCCESS; - if ((code = tsdbDropTSmaDataImpl(pTsdb, indexUid)) < 0) { - tsdbWarn("vgId:%d drop tSma data failed since %s", REPO_ID(pTsdb), tstrerror(terrno)); - } - return code; -} \ No newline at end of file diff --git a/source/dnode/vnode/src/tsdb/tsdbTDBImpl.c b/source/dnode/vnode/src/tsdb/tsdbTDBImpl.c deleted file mode 100644 index a553f32bee0ad4d0df24ca844ad2616e5c4157ae..0000000000000000000000000000000000000000 --- a/source/dnode/vnode/src/tsdb/tsdbTDBImpl.c +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#define ALLOW_FORBID_FUNC - -#include "tsdb.h" - -int32_t tsdbOpenDBEnv(TDB **ppEnv, const char *path) { - int ret = 0; - - if (path == NULL) return -1; - - ret = tdbOpen(path, 4096, 256, ppEnv); // use as param - - if (ret != 0) { - tsdbError("Failed to create tsdb db env, ret = %d", ret); - return -1; - } - - return 0; -} - -int32_t tsdbCloseDBEnv(TDB *pEnv) { return tdbClose(pEnv); } - -static inline int tsdbSmaKeyCmpr(const void *arg1, int len1, const void *arg2, int len2) { - const SSmaKey *pKey1 = (const SSmaKey *)arg1; - const SSmaKey *pKey2 = (const SSmaKey *)arg2; - - ASSERT(len1 == len2 && len1 == sizeof(SSmaKey)); - - if (pKey1->skey < pKey2->skey) { - return -1; - } else if (pKey1->skey > pKey2->skey) { - return 1; - } - if (pKey1->groupId < pKey2->groupId) { - return -1; - } else if (pKey1->groupId > pKey2->groupId) { - return 1; - } - - return 0; -} - -static int32_t tsdbOpenDBDb(TTB **ppDB, TDB *pEnv, const char *pFName) { - int ret; - tdb_cmpr_fn_t compFunc; - - // Create a database - compFunc = tsdbSmaKeyCmpr; - ret = tdbTbOpen(pFName, -1, -1, compFunc, pEnv, ppDB); - - return 0; -} - -static int32_t tsdbCloseDBDb(TTB *pDB) { return tdbTbClose(pDB); } - -int32_t tsdbOpenDBF(TDB *pEnv, SDBFile *pDBF) { - // TEnv is shared by a group of SDBFile - if (!pEnv || !pDBF) { - terrno = TSDB_CODE_INVALID_PTR; - return -1; - } - - // Open DBF - if (tsdbOpenDBDb(&(pDBF->pDB), pEnv, pDBF->path) < 0) { - terrno = TSDB_CODE_TDB_INIT_FAILED; - tsdbCloseDBDb(pDBF->pDB); - return -1; - } - - return 0; -} - -int32_t tsdbCloseDBF(SDBFile *pDBF) { - int32_t ret = 0; - if (pDBF->pDB) { - ret = tsdbCloseDBDb(pDBF->pDB); - pDBF->pDB = NULL; - } - taosMemoryFreeClear(pDBF->path); - return ret; -} - -int32_t tsdbSaveSmaToDB(SDBFile *pDBF, void *pKey, int32_t keyLen, void *pVal, int32_t valLen, TXN *txn) { - int32_t ret; - - ret = tdbTbInsert(pDBF->pDB, pKey, keyLen, pVal, valLen, txn); - if (ret < 0) { - tsdbError("Failed to create insert sma data into db, ret = %d", ret); - return -1; - } - - return 0; -} - -void *tsdbGetSmaDataByKey(SDBFile *pDBF, const void *pKey, int32_t keyLen, int32_t *valLen) { - void *pVal = NULL; - int ret; - - ret = tdbTbGet(pDBF->pDB, pKey, keyLen, &pVal, valLen); - - if (ret < 0) { - tsdbError("Failed to get sma data from db, ret = %d", ret); - return NULL; - } - - ASSERT(*valLen >= 0); - - // TODO: lock? - // TODO: Would the key/value be destoryed during return the data? - // TODO: How about the key is updated while value length is changed? The original value buffer would be freed - // automatically? - - return pVal; -} \ No newline at end of file diff --git a/source/dnode/vnode/src/tsdb/tsdbWrite.c b/source/dnode/vnode/src/tsdb/tsdbWrite.c index aab4da26a37119c6e0044849b1494ede2f33e552..6faf6bd1679c36dd0c9fdc0bed538f74cafc13cd 100644 --- a/source/dnode/vnode/src/tsdb/tsdbWrite.c +++ b/source/dnode/vnode/src/tsdb/tsdbWrite.c @@ -28,7 +28,7 @@ int tsdbInsertData(STsdb *pTsdb, int64_t version, SSubmitReq *pMsg, SSubmitRsp * // scan and convert if (tsdbScanAndConvertSubmitMsg(pTsdb, pMsg) < 0) { if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) { - tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pTsdb), tstrerror(terrno)); + tsdbError("vgId:%d, failed to insert data since %s", REPO_ID(pTsdb), tstrerror(terrno)); } return -1; } @@ -59,7 +59,7 @@ static FORCE_INLINE int tsdbCheckRowRange(STsdb *pTsdb, STable *pTable, STSRow * TSKEY now) { TSKEY rowKey = TD_ROW_KEY(row); if (rowKey < minKey || rowKey > maxKey) { - tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64 + tsdbError("vgId:%d, table %s tid %d uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64 " maxKey %" PRId64 " row key %" PRId64, REPO_ID(pTsdb), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), now, minKey, maxKey, rowKey); @@ -75,7 +75,7 @@ static FORCE_INLINE int tsdbCheckRowRange(STsdb *pTsdb, tb_uid_t uid, STSRow *ro TSKEY now) { TSKEY rowKey = TD_ROW_KEY(row); if (rowKey < minKey || rowKey > maxKey) { - tsdbError("vgId:%d table uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64 + tsdbError("vgId:%d, table uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64 " maxKey %" PRId64 " row key %" PRId64, REPO_ID(pTsdb), uid, now, minKey, maxKey, rowKey); terrno = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE; @@ -115,7 +115,7 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq *pMsg) { #if 0 if (pBlock->tid <= 0 || pBlock->tid >= pMeta->maxTables) { - tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pTsdb), pBlock->uid, + tsdbError("vgId:%d, failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pTsdb), pBlock->uid, pBlock->tid); terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; return -1; @@ -123,14 +123,14 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq *pMsg) { STable *pTable = pMeta->tables[pBlock->tid]; if (pTable == NULL || TABLE_UID(pTable) != pBlock->uid) { - tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pTsdb), pBlock->uid, + tsdbError("vgId:%d, failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pTsdb), pBlock->uid, pBlock->tid); terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; return -1; } if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { - tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pTsdb), TABLE_CHAR_NAME(pTable)); + tsdbError("vgId:%d, invalid action trying to insert a super table %s", REPO_ID(pTsdb), TABLE_CHAR_NAME(pTable)); terrno = TSDB_CODE_TDB_INVALID_ACTION; return -1; } diff --git a/source/dnode/vnode/src/vnd/vnodeBufPool.c b/source/dnode/vnode/src/vnd/vnodeBufPool.c index 9122913cda69d05889e1f575a5da4b61ef4a03a9..9ca4dd6efb981acdf2ff271635b7e146052c7a40 100644 --- a/source/dnode/vnode/src/vnd/vnodeBufPool.c +++ b/source/dnode/vnode/src/vnd/vnodeBufPool.c @@ -30,7 +30,7 @@ int vnodeOpenBufPool(SVnode *pVnode, int64_t size) { // create pool ret = vnodeBufPoolCreate(size, &pPool); if (ret < 0) { - vError("vgId:%d failed to open vnode buffer pool since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to open vnode buffer pool since %s", TD_VID(pVnode), tstrerror(terrno)); vnodeCloseBufPool(pVnode); return -1; } @@ -40,7 +40,7 @@ int vnodeOpenBufPool(SVnode *pVnode, int64_t size) { pVnode->pPool = pPool; } - vDebug("vgId:%d vnode buffer pool is opened, pool size: %" PRId64, TD_VID(pVnode), size); + vDebug("vgId:%d, vnode buffer pool is opened, pool size: %" PRId64, TD_VID(pVnode), size); return 0; } @@ -53,7 +53,7 @@ int vnodeCloseBufPool(SVnode *pVnode) { vnodeBufPoolDestroy(pPool); } - vDebug("vgId:%d vnode buffer pool is closed", TD_VID(pVnode)); + vDebug("vgId:%d, vnode buffer pool is closed", TD_VID(pVnode)); return 0; } diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index a66ecc493d7cbef19370349568398d084dc5bc27..e8fa2ed3c140312d3f64d42fbf5449178c67a772 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -56,6 +56,8 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) { if (tjsonAddIntegerToObject(pJson, "szBuf", pCfg->szBuf) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "isHeap", pCfg->isHeap) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "isWeak", pCfg->isWeak) < 0) return -1; + if (tjsonAddIntegerToObject(pJson, "isTsma", pCfg->isTsma) < 0) return -1; + if (tjsonAddIntegerToObject(pJson, "isRsma", pCfg->isRsma) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "precision", pCfg->tsdbCfg.precision) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "update", pCfg->tsdbCfg.update) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "compression", pCfg->tsdbCfg.compression) < 0) return -1; @@ -130,6 +132,10 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { if(code < 0) return -1; tjsonGetNumberValue(pJson, "isWeak", pCfg->isWeak, code); if(code < 0) return -1; + tjsonGetNumberValue(pJson, "isTsma", pCfg->isTsma, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "isRsma", pCfg->isRsma, code); + if(code < 0) return -1; tjsonGetNumberValue(pJson, "precision", pCfg->tsdbCfg.precision, code); if(code < 0) return -1; tjsonGetNumberValue(pJson, "update", pCfg->tsdbCfg.update, code); diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index b4fbd01c633c87ae8d14c707a9bfba2cbb0511a0..3715866bb88f3ae030d5f65c7ce938e69120f466 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -42,29 +42,29 @@ int vnodeBegin(SVnode *pVnode) { // begin meta if (metaBegin(pVnode->pMeta) < 0) { - vError("vgId:%d failed to begin meta since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to begin meta since %s", TD_VID(pVnode), tstrerror(terrno)); return -1; } // begin tsdb if (pVnode->pSma) { if (tsdbBegin(VND_RSMA0(pVnode)) < 0) { - vError("vgId:%d failed to begin rsma0 since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to begin rsma0 since %s", TD_VID(pVnode), tstrerror(terrno)); return -1; } if (tsdbBegin(VND_RSMA1(pVnode)) < 0) { - vError("vgId:%d failed to begin rsma1 since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to begin rsma1 since %s", TD_VID(pVnode), tstrerror(terrno)); return -1; } if (tsdbBegin(VND_RSMA2(pVnode)) < 0) { - vError("vgId:%d failed to begin rsma2 since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to begin rsma2 since %s", TD_VID(pVnode), tstrerror(terrno)); return -1; } } else { if (tsdbBegin(pVnode->pTsdb) < 0) { - vError("vgId:%d failed to begin tsdb since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to begin tsdb since %s", TD_VID(pVnode), tstrerror(terrno)); return -1; } } @@ -110,7 +110,7 @@ int vnodeSaveInfo(const char *dir, const SVnodeInfo *pInfo) { // free info binary taosMemoryFree(data); - vInfo("vgId:%d vnode info is saved, fname: %s", pInfo->config.vgId, fname); + vInfo("vgId:%d, vnode info is saved, fname: %s", pInfo->config.vgId, fname); return 0; @@ -132,7 +132,7 @@ int vnodeCommitInfo(const char *dir, const SVnodeInfo *pInfo) { return -1; } - vInfo("vgId:%d vnode info is committed", pInfo->config.vgId); + vInfo("vgId:%d, vnode info is committed", pInfo->config.vgId); return 0; } @@ -210,7 +210,7 @@ int vnodeCommit(SVnode *pVnode) { SVnodeInfo info = {0}; char dir[TSDB_FILENAME_LEN]; - vInfo("vgId:%d start to commit, version: %" PRId64, TD_VID(pVnode), pVnode->state.applied); + vInfo("vgId:%d, start to commit, version: %" PRId64, TD_VID(pVnode), pVnode->state.applied); pVnode->onCommit = pVnode->inUse; pVnode->inUse = NULL; @@ -230,7 +230,7 @@ int vnodeCommit(SVnode *pVnode) { return -1; } - if(vnodeIsRollup(pVnode)) { + if (VND_IS_RSMA(pVnode)) { if (tsdbCommit(VND_RSMA0(pVnode)) < 0) { ASSERT(0); return -1; @@ -250,7 +250,6 @@ int vnodeCommit(SVnode *pVnode) { } } - if (tqCommit(pVnode->pTq) < 0) { ASSERT(0); return -1; @@ -269,7 +268,7 @@ int vnodeCommit(SVnode *pVnode) { pVnode->pPool = pVnode->onCommit; pVnode->onCommit = NULL; - vInfo("vgId:%d commit over", TD_VID(pVnode)); + vInfo("vgId:%d, commit over", TD_VID(pVnode)); return 0; } diff --git a/source/dnode/vnode/src/vnd/vnodeModule.c b/source/dnode/vnode/src/vnd/vnodeModule.c index efae74b55a95525c105c7a8c3de3e887a0f3b2d2..d0aede145eb8640e1e9031160d5ab7573d4a74e8 100644 --- a/source/dnode/vnode/src/vnd/vnodeModule.c +++ b/source/dnode/vnode/src/vnd/vnodeModule.c @@ -69,6 +69,9 @@ int vnodeInit(int nthreads) { if (walInit() < 0) { return -1; } + if (tqInit() < 0) { + return -1; + } return 0; } @@ -94,6 +97,9 @@ void vnodeCleanup() { taosMemoryFreeClear(vnodeGlobal.threads); taosThreadCondDestroy(&(vnodeGlobal.hasTask)); taosThreadMutexDestroy(&(vnodeGlobal.mutex)); + + walCleanUp(); + tqCleanUp(); } int vnodeScheduleTask(int (*execute)(void*), void* arg) { @@ -155,4 +161,4 @@ static void* loop(void* arg) { } return NULL; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index a90bb7afcb6847ba0cb803d7a7e58720159bf10f..a85b8306165326bc07f643718e9b67201e668de6 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -23,13 +23,13 @@ int vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs) { // check config if (vnodeCheckCfg(pCfg) < 0) { - vError("vgId:%d failed to create vnode since: %s", pCfg->vgId, tstrerror(terrno)); + vError("vgId:%d, failed to create vnode since: %s", pCfg->vgId, tstrerror(terrno)); return -1; } // create vnode env if (tfsMkdir(pTfs, path) < 0) { - vError("vgId:%d failed to create vnode since: %s", pCfg->vgId, tstrerror(terrno)); + vError("vgId:%d, failed to create vnode since: %s", pCfg->vgId, tstrerror(terrno)); return -1; } @@ -39,11 +39,11 @@ int vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs) { info.state.applied = -1; if (vnodeSaveInfo(dir, &info) < 0 || vnodeCommitInfo(dir, &info) < 0) { - vError("vgId:%d failed to save vnode config since %s", pCfg->vgId, tstrerror(terrno)); + vError("vgId:%d, failed to save vnode config since %s", pCfg->vgId, tstrerror(terrno)); return -1; } - vInfo("vgId:%d vnode is created", pCfg->vgId); + vInfo("vgId:%d, vnode is created", pCfg->vgId); return 0; } @@ -70,7 +70,7 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) { pVnode = (SVnode *)taosMemoryCalloc(1, sizeof(*pVnode) + strlen(path) + 1); if (pVnode == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - vError("vgId:%d failed to open vnode since %s", info.config.vgId, tstrerror(terrno)); + vError("vgId:%d, failed to open vnode since %s", info.config.vgId, tstrerror(terrno)); return NULL; } @@ -86,25 +86,25 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) { // open buffer pool if (vnodeOpenBufPool(pVnode, pVnode->config.isHeap ? 0 : pVnode->config.szBuf / 3) < 0) { - vError("vgId:%d failed to open vnode buffer pool since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to open vnode buffer pool since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // open meta if (metaOpen(pVnode, &pVnode->pMeta) < 0) { - vError("vgId:%d failed to open vnode meta since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to open vnode meta since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // open tsdb - if (!vnodeIsRollup(pVnode) && tsdbOpen(pVnode, &VND_TSDB(pVnode), VNODE_TSDB_DIR, TSDB_TYPE_TSDB) < 0) { - vError("vgId:%d failed to open vnode tsdb since %s", TD_VID(pVnode), tstrerror(terrno)); + if (!VND_IS_RSMA(pVnode) && tsdbOpen(pVnode, &VND_TSDB(pVnode), VNODE_TSDB_DIR, NULL) < 0) { + vError("vgId:%d, failed to open vnode tsdb since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // open sma if (smaOpen(pVnode)) { - vError("vgId:%d failed to open vnode sma since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to open vnode sma since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } @@ -113,7 +113,7 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) { taosRealPath(tdir, NULL, sizeof(tdir)); pVnode->pWal = walOpen(tdir, &(pVnode->config.walCfg)); if (pVnode->pWal == NULL) { - vError("vgId:%d failed to open vnode wal since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to open vnode wal since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } @@ -122,27 +122,27 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) { taosRealPath(tdir, NULL, sizeof(tdir)); pVnode->pTq = tqOpen(tdir, pVnode, pVnode->pWal); if (pVnode->pTq == NULL) { - vError("vgId:%d failed to open vnode tq since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to open vnode tq since %s", TD_VID(pVnode), tstrerror(terrno)); goto _err; } // open query if (vnodeQueryOpen(pVnode)) { - vError("vgId:%d failed to open vnode query since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to open vnode query since %s", TD_VID(pVnode), tstrerror(terrno)); terrno = TSDB_CODE_OUT_OF_MEMORY; goto _err; } // vnode begin if (vnodeBegin(pVnode) < 0) { - vError("vgId:%d failed to begin since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to begin since %s", TD_VID(pVnode), tstrerror(terrno)); terrno = TSDB_CODE_OUT_OF_MEMORY; goto _err; } // open sync if (vnodeSyncOpen(pVnode, dir)) { - vError("vgId:%d failed to open sync since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to open sync since %s", TD_VID(pVnode), tstrerror(terrno)); terrno = TSDB_CODE_OUT_OF_MEMORY; goto _err; } @@ -189,4 +189,4 @@ void vnodeStop(SVnode *pVnode) {} int64_t vnodeGetSyncHandle(SVnode *pVnode) { return pVnode->sync; } -void vnodeGetSnapshot(SVnode *pVnode, SSnapshot *pSnapshot) { pSnapshot->lastApplyIndex = pVnode->state.committed; } \ No newline at end of file +void vnodeGetSnapshot(SVnode *pVnode, SSnapshot *pSnapshot) { pSnapshot->lastApplyIndex = pVnode->state.committed; } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index ae7ec5a950dfe91bc967e99de441ce648e8c4e10..47eaf2d5a5e270cabbb33290682bd9c7f096b5c3 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -22,9 +22,10 @@ static int vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pReq, static int vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp); static int vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp); static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp); -static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int len, SRpcMsg *pRsp); +static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp); int32_t vnodePreprocessReq(SVnode *pVnode, SRpcMsg *pMsg) { + int32_t code = 0; SDecoder dc = {0}; switch (pMsg->msgType) { @@ -38,9 +39,11 @@ int32_t vnodePreprocessReq(SVnode *pVnode, SRpcMsg *pMsg) { tDecodeI32v(&dc, &nReqs); for (int32_t iReq = 0; iReq < nReqs; iReq++) { tb_uid_t uid = tGenIdPI64(); + char *name = NULL; tStartDecode(&dc); tDecodeI32v(&dc, NULL); + tDecodeCStr(&dc, &name); *(int64_t *)(dc.data + dc.pos) = uid; *(int64_t *)(dc.data + dc.pos + 8) = ctime; @@ -64,12 +67,18 @@ int32_t vnodePreprocessReq(SVnode *pVnode, SRpcMsg *pMsg) { if (pBlock == NULL) break; if (msgIter.schemaLen > 0) { - uid = tGenIdPI64(); + char *name = NULL; tDecoderInit(&dc, pBlock->data, msgIter.schemaLen); tStartDecode(&dc); tDecodeI32v(&dc, NULL); + tDecodeCStr(&dc, &name); + + uid = metaGetTableEntryUidByName(pVnode->pMeta, name); + if (uid == 0) { + uid = tGenIdPI64(); + } *(int64_t *)(dc.data + dc.pos) = uid; *(int64_t *)(dc.data + dc.pos + 8) = ctime; pBlock->uid = htobe64(uid); @@ -80,11 +89,14 @@ int32_t vnodePreprocessReq(SVnode *pVnode, SRpcMsg *pMsg) { } } break; + case TDMT_VND_ALTER_REPLICA: { + code = vnodeSyncAlter(pVnode, pMsg); + } break; default: break; } - return 0; + return code; } int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg *pRsp) { @@ -93,7 +105,7 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg int len; int ret; - vTrace("vgId:%d start to process write request %s, version %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), + vTrace("vgId:%d, start to process write request %s, version %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), version); pVnode->state.applied = version; @@ -146,23 +158,23 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg pMsg->contLen - sizeof(SMsgHead)) < 0) { } } break; - case TDMT_VND_ALTER_VNODE: + case TDMT_VND_ALTER_CONFIG: break; default: ASSERT(0); break; } - vDebug("vgId:%d process %s request success, version: %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), version); + vTrace("vgId:%d, process %s request success, version: %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), version); if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) { - vError("vgId:%d failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno)); + vError("vgId:%d, failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno)); return -1; } // commit if need if (vnodeShouldCommit(pVnode)) { - vInfo("vgId:%d commit at version %" PRId64, TD_VID(pVnode), version); + vInfo("vgId:%d, commit at version %" PRId64, TD_VID(pVnode), version); // commit current change vnodeCommit(pVnode); @@ -173,7 +185,7 @@ int vnodeProcessWriteReq(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg return 0; _err: - vDebug("vgId:%d process %s request failed since %s, version: %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), + vError("vgId:%d, process %s request failed since %s, version: %" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), tstrerror(terrno), version); return -1; } @@ -183,9 +195,9 @@ int vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { SReadHandle handle = {.meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb}; switch (pMsg->msgType) { case TDMT_VND_QUERY: - return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg); + return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0); case TDMT_VND_QUERY_CONTINUE: - return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg); + return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0); default: vError("unknown msg type:%d in query queue", pMsg->msgType); return TSDB_CODE_VND_APP_ERROR; @@ -198,17 +210,16 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); switch (pMsg->msgType) { case TDMT_VND_FETCH: - return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg, 0); case TDMT_VND_FETCH_RSP: - return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg); - case TDMT_VND_RES_READY: - return qWorkerProcessReadyMsg(pVnode, pVnode->pQuery, pMsg); - case TDMT_VND_TASKS_STATUS: - return qWorkerProcessStatusMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg, 0); case TDMT_VND_CANCEL_TASK: - return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg, 0); case TDMT_VND_DROP_TASK: - return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg, 0); + case TDMT_VND_QUERY_HEARTBEAT: + return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg, 0); + case TDMT_VND_TABLE_META: return vnodeGetTableMeta(pVnode, pMsg); case TDMT_VND_CONSUME: @@ -227,9 +238,6 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg); case TDMT_VND_TASK_RECOVER_RSP: return tqProcessTaskRecoverRsp(pVnode->pTq, pMsg); - - case TDMT_VND_QUERY_HEARTBEAT: - return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg); default: vError("unknown msg type:%d in fetch queue", pMsg->msgType); return TSDB_CODE_VND_APP_ERROR; @@ -244,6 +252,13 @@ void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) { tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data); } +void vnodeUpdateMetaRsp(SVnode *pVnode, STableMetaRsp *pMetaRsp) { + strcpy(pMetaRsp->dbFName, pVnode->config.dbname); + pMetaRsp->dbId = pVnode->config.dbId; + pMetaRsp->vgId = TD_VID(pVnode); + pMetaRsp->precision = pVnode->config.tsdbCfg.precision; +} + int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) { int32_t ret = TAOS_SYNC_PROPOSE_OTHER_ERROR; @@ -256,7 +271,7 @@ int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) { SMsgHead *pHead = pMsg->pCont; - char logBuf[512]; + char logBuf[512] = {0}; char *syncNodeStr = sync2SimpleStr(pVnode->sync); snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr); syncRpcMsgLog2(logBuf, pMsg); @@ -356,7 +371,7 @@ static int vnodeProcessCreateStbReq(SVnode *pVnode, int64_t version, void *pReq, goto _err; } - tdProcessRSmaCreate(pVnode->pSma, pVnode->pMeta, &req, &pVnode->msgCb); + tdProcessRSmaCreate(pVnode, &req); tDecoderClear(&coder); return 0; @@ -513,12 +528,13 @@ _exit: } static int vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) { - SVAlterTbReq vAlterTbReq = {0}; - SVAlterTbRsp vAlterTbRsp = {0}; - SDecoder dc = {0}; - int rcode = 0; - int ret; - SEncoder ec = {0}; + SVAlterTbReq vAlterTbReq = {0}; + SVAlterTbRsp vAlterTbRsp = {0}; + SDecoder dc = {0}; + int rcode = 0; + int ret; + SEncoder ec = {0}; + STableMetaRsp vMetaRsp = {0}; pRsp->msgType = TDMT_VND_ALTER_TABLE_RSP; pRsp->pCont = NULL; @@ -536,7 +552,7 @@ static int vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pReq, i } // process - if (metaAlterTable(pVnode->pMeta, version, &vAlterTbReq) < 0) { + if (metaAlterTable(pVnode->pMeta, version, &vAlterTbReq, &vMetaRsp) < 0) { vAlterTbRsp.code = TSDB_CODE_INVALID_MSG; tDecoderClear(&dc); rcode = -1; @@ -544,6 +560,11 @@ static int vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pReq, i } tDecoderClear(&dc); + if (NULL != vMetaRsp.pSchemas) { + vnodeUpdateMetaRsp(pVnode, &vMetaRsp); + vAlterTbRsp.pMeta = &vMetaRsp; + } + _exit: tEncodeSize(tEncodeSVAlterTbRsp, &vAlterTbRsp, pRsp->contLen, ret); pRsp->pCont = rpcMallocCont(pRsp->contLen); @@ -674,6 +695,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in int32_t nRows; int32_t tsize, ret; SEncoder encoder = {0}; + SArray *newTbUids = NULL; terrno = TSDB_CODE_SUCCESS; pRsp->code = 0; @@ -694,6 +716,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in } submitRsp.pArray = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(SSubmitBlkRsp)); + newTbUids = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(int64_t)); if (!submitRsp.pArray) { pRsp->code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; @@ -723,6 +746,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in goto _exit; } } + taosArrayPush(newTbUids, &createTbReq.uid); submitBlkRsp.uid = createTbReq.uid; submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2); @@ -750,8 +774,10 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in submitRsp.affectedRows += submitBlkRsp.affectedRows; taosArrayPush(submitRsp.pArray, &submitBlkRsp); } + tqUpdateTbUidList(pVnode->pTq, newTbUids, true); _exit: + taosArrayDestroy(newTbUids); tEncodeSize(tEncodeSSubmitRsp, &submitRsp, tsize, ret); pRsp->pCont = rpcMallocCont(tsize); pRsp->contLen = tsize; @@ -767,28 +793,30 @@ _exit: // TODO: the partial success scenario and the error case // TODO: refactor - if ((terrno == TSDB_CODE_SUCCESS || terrno == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) && - (pRsp->code == TSDB_CODE_SUCCESS)) { + if ((terrno == TSDB_CODE_SUCCESS) && (pRsp->code == TSDB_CODE_SUCCESS)) { tdProcessRSmaSubmit(pVnode->pSma, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK); } return 0; } -static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int len, SRpcMsg *pRsp) { +static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) { SVCreateTSmaReq req = {0}; SDecoder coder; - pRsp->msgType = TDMT_VND_CREATE_SMA_RSP; - pRsp->code = TSDB_CODE_SUCCESS; - pRsp->pCont = NULL; - pRsp->contLen = 0; + if (pRsp) { + pRsp->msgType = TDMT_VND_CREATE_SMA_RSP; + pRsp->code = TSDB_CODE_SUCCESS; + pRsp->pCont = NULL; + pRsp->contLen = 0; + } // decode and process req tDecoderInit(&coder, pReq, len); if (tDecodeSVCreateTSmaReq(&coder, &req) < 0) { - pRsp->code = terrno; + terrno = TSDB_CODE_MSG_DECODE_ERROR; + if (pRsp) pRsp->code = terrno; goto _err; } @@ -796,18 +824,30 @@ static int vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq req.timezoneInt = tsTimezone; if (tdProcessTSmaCreate(pVnode->pSma, version, (const char *)&req) < 0) { - pRsp->code = terrno; + if (pRsp) pRsp->code = terrno; goto _err; } tDecoderClear(&coder); - vDebug("vgId:%d success to create tsma %s:%" PRIi64 " for table %" PRIi64, TD_VID(pVnode), req.indexName, - req.indexUid, req.tableUid); + vDebug("vgId:%d, success to create tsma %s:%" PRIi64 " version %" PRIi64 " for table %" PRIi64, TD_VID(pVnode), + req.indexName, req.indexUid, version, req.tableUid); return 0; _err: tDecoderClear(&coder); - vError("vgId:%d failed to create tsma %s:%" PRIi64 " for table %" PRIi64 " since %s", TD_VID(pVnode), req.indexName, - req.indexUid, req.tableUid, terrstr(terrno)); + vError("vgId:%d, failed to create tsma %s:%" PRIi64 " version %" PRIi64 "for table %" PRIi64 " since %s", + TD_VID(pVnode), req.indexName, req.indexUid, version, req.tableUid, terrstr(terrno)); return -1; } + +/** + * @brief specific for smaDstVnode + * + * @param pVnode + * @param pCont + * @param contLen + * @return int32_t + */ +int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen) { + return vnodeProcessCreateTSmaReq(pVnode, 1, pCont, contLen, NULL); +} diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index d8f3110a16fbd118e966a34d2d8d8d8c58519f54..37f765d786bf14476f64643bb3803a9e21690b51 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -27,6 +27,7 @@ static int32_t vnodeSyncGetSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot); int32_t vnodeSyncOpen(SVnode *pVnode, char *path) { SSyncInfo syncInfo = { .vgId = pVnode->config.vgId, + .isStandBy = pVnode->config.standby, .syncCfg = pVnode->config.syncCfg, .pWal = pVnode->pWal, .msgcb = NULL, @@ -49,28 +50,74 @@ int32_t vnodeSyncOpen(SVnode *pVnode, char *path) { return 0; } +int32_t vnodeSyncAlter(SVnode *pVnode, SRpcMsg *pMsg) { + SAlterVnodeReq req = {0}; + if (tDeserializeSAlterVnodeReq((char *)pMsg->pCont + sizeof(SMsgHead), pMsg->contLen - sizeof(SMsgHead), &req) != 0) { + terrno = TSDB_CODE_INVALID_MSG; + return TSDB_CODE_INVALID_MSG; + } + + vInfo("vgId:%d, start to alter vnode replica to %d", TD_VID(pVnode), req.replica); + SSyncCfg cfg = {.replicaNum = req.replica, .myIndex = req.selfIndex}; + for (int32_t r = 0; r < req.replica; ++r) { + SNodeInfo *pNode = &cfg.nodeInfo[r]; + tstrncpy(pNode->nodeFqdn, req.replicas[r].fqdn, sizeof(pNode->nodeFqdn)); + pNode->nodePort = req.replicas[r].port; + vInfo("vgId:%d, replica:%d %s:%u", TD_VID(pVnode), r, pNode->nodeFqdn, pNode->nodePort); + } + + int32_t code = syncReconfig(pVnode->sync, &cfg); + if (code == TAOS_SYNC_PROPOSE_SUCCESS) { + // todo refactor + SRpcMsg rsp = {.info = pMsg->info, .code = terrno}; + tmsgSendRsp(&rsp); + return TSDB_CODE_ACTION_IN_PROGRESS; + } + + return code; +} + void vnodeSyncStart(SVnode *pVnode) { syncSetMsgCb(pVnode->sync, &pVnode->msgCb); - syncStart(pVnode->sync); + if (pVnode->config.standby) { + syncStartStandBy(pVnode->sync); + } else { + syncStart(pVnode->sync); + } } void vnodeSyncClose(SVnode *pVnode) { syncStop(pVnode->sync); } -int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { +int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { int32_t code = tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg); if (code != 0) { rpcFreeCont(pMsg->pCont); + pMsg->pCont = NULL; } return code; } -int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { return tmsgSendReq(pEpSet, pMsg); } +int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { + int32_t code = tmsgSendReq(pEpSet, pMsg); + if (code != 0) { + rpcFreeCont(pMsg->pCont); + pMsg->pCont = NULL; + } + return code; +} int32_t vnodeSyncGetSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot) { vnodeGetSnapshot(pFsm->data, pSnapshot); return 0; } +void vnodeSyncReconfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) { + SVnode *pVnode = pFsm->data; + vInfo("vgId:%d, sync reconfig is confirmed", TD_VID(pVnode)); + + // todo rpc response here +} + void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) { SyncIndex beginIndex = SYNC_INDEX_INVALID; if (pFsm->FpGetSnapshot != NULL) { @@ -80,27 +127,19 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) } if (cbMeta.index > beginIndex) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf( logBuf, sizeof(logBuf), "==callback== ==CommitCb== execute, pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, beginIndex :%ld\n", pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), beginIndex); syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg); - SVnode *pVnode = (SVnode *)(pFsm->data); + SVnode *pVnode = pFsm->data; SyncApplyMsg *pSyncApplyMsg = syncApplyMsgBuild2(pMsg, pVnode->config.vgId, &cbMeta); SRpcMsg applyMsg; syncApplyMsg2RpcMsg(pSyncApplyMsg, &applyMsg); syncApplyMsgDestroy(pSyncApplyMsg); - /* - SRpcMsg applyMsg; - applyMsg = *pMsg; - applyMsg.pCont = rpcMallocCont(applyMsg.contLen); - assert(applyMsg.contLen == pMsg->contLen); - memcpy(applyMsg.pCont, pMsg->pCont, applyMsg.contLen); - */ - // recover handle for response SRpcMsg saveRpcMsg; int32_t ret = syncGetAndDelRespRpc(pVnode->sync, cbMeta.seqNum, &saveRpcMsg); @@ -115,7 +154,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) tmsgPutToQueue(&(pVnode->msgCb), APPLY_QUEUE, &applyMsg); } else { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==CommitCb== do not execute, pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, " "beginIndex :%ld\n", @@ -126,7 +165,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) } void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state)); @@ -134,7 +173,7 @@ void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMet } void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state)); syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg); @@ -148,9 +187,7 @@ SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) { pFsm->FpRollBackCb = vnodeSyncRollBackMsg; pFsm->FpGetSnapshot = vnodeSyncGetSnapshot; pFsm->FpRestoreFinishCb = NULL; - pFsm->FpSnapshotRead = NULL; - pFsm->FpSnapshotApply = NULL; - pFsm->FpReConfigCb = NULL; + pFsm->FpReConfigCb = vnodeSyncReconfig; return pFsm; } \ No newline at end of file diff --git a/source/dnode/vnode/test/tsdbSmaTest.cpp b/source/dnode/vnode/test/tsdbSmaTest.cpp index ab617cb18660bc6663b500d7ef9da60a5c2d9fa5..3b8c94e413ee866441f5d7514e13986d31fc4137 100644 --- a/source/dnode/vnode/test/tsdbSmaTest.cpp +++ b/source/dnode/vnode/test/tsdbSmaTest.cpp @@ -147,8 +147,8 @@ TEST(testCase, tSma_Meta_Encode_Decode_Test) { // resource release taosMemoryFreeClear(pSW); - tdDestroyTSma(&tSma); - tdDestroyTSmaWrapper(&dstTSmaWrapper); + tDestroyTSma(&tSma); + tDestroyTSmaWrapper(&dstTSmaWrapper); } #endif @@ -218,7 +218,7 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) { printf("tagsFilter1 = %s\n", qSmaCfg->tagsFilter != NULL ? qSmaCfg->tagsFilter : ""); EXPECT_STRCASEEQ(qSmaCfg->indexName, smaIndexName1); EXPECT_EQ(qSmaCfg->tableUid, tSma.tableUid); - tdDestroyTSma(qSmaCfg); + tDestroyTSma(qSmaCfg); taosMemoryFreeClear(qSmaCfg); qSmaCfg = metaGetSmaInfoByIndex(pMeta, indexUid2, true); @@ -229,7 +229,7 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) { printf("tagsFilter2 = %s\n", qSmaCfg->tagsFilter != NULL ? qSmaCfg->tagsFilter : ""); EXPECT_STRCASEEQ(qSmaCfg->indexName, smaIndexName2); EXPECT_EQ(qSmaCfg->interval, tSma.interval); - tdDestroyTSma(qSmaCfg); + tDestroyTSma(qSmaCfg); taosMemoryFreeClear(qSmaCfg); // get index name by table uid @@ -265,7 +265,7 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) { EXPECT_EQ((pSW->tSma + 1)->indexUid, indexUid2); EXPECT_EQ((pSW->tSma + 1)->tableUid, tbUid); - tdDestroyTSmaWrapper(pSW); + tDestroyTSmaWrapper(pSW); taosMemoryFreeClear(pSW); // get all sma table uids @@ -282,7 +282,7 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) { metaRemoveSmaFromDb(pMeta, indexUid1); metaRemoveSmaFromDb(pMeta, indexUid2); - tdDestroyTSma(&tSma); + tDestroyTSma(&tSma); metaClose(pMeta); } #endif @@ -368,7 +368,7 @@ TEST(testCase, tSma_Data_Insert_Query_Test) { SDiskCfg pDisks = {0}; pDisks.level = 0; pDisks.primary = 1; - strncpy(pDisks.dir, "/var/lib/taos", TSDB_FILENAME_LEN); + strncpy(pDisks.dir, TD_DATA_DIR_PATH, TSDB_FILENAME_LEN); int32_t numOfDisks = 1; pTsdb->pTfs = tfsOpen(&pDisks, numOfDisks); EXPECT_NE(pTsdb->pTfs, nullptr); @@ -576,7 +576,7 @@ TEST(testCase, tSma_Data_Insert_Query_Test) { taosArrayDestroy(pDataBlocks); // release meta - tdDestroyTSma(&tSma); + tDestroyTSma(&tSma); tfsClose(pTsdb->pTfs); tsdbClose(pTsdb); metaClose(pMeta); diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index 40bd3659a300b58b707774cce0a45728c9755ee3..239d719fa80eb3b4fbf9b075e165dc50051b3e52 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -49,19 +49,21 @@ enum { }; enum { - CTG_ACT_UPDATE_VG = 0, - CTG_ACT_UPDATE_TBL, - CTG_ACT_REMOVE_DB, - CTG_ACT_REMOVE_STB, - CTG_ACT_REMOVE_TBL, - CTG_ACT_UPDATE_USER, - CTG_ACT_MAX + CTG_OP_UPDATE_VGROUP = 0, + CTG_OP_UPDATE_TB_META, + CTG_OP_DROP_DB_CACHE, + CTG_OP_DROP_STB_META, + CTG_OP_DROP_TB_META, + CTG_OP_UPDATE_USER, + CTG_OP_UPDATE_VG_EPSET, + CTG_OP_MAX }; typedef enum { CTG_TASK_GET_QNODE = 0, CTG_TASK_GET_DB_VGROUP, CTG_TASK_GET_DB_CFG, + CTG_TASK_GET_DB_INFO, CTG_TASK_GET_TB_META, CTG_TASK_GET_TB_HASH, CTG_TASK_GET_INDEX, @@ -98,6 +100,10 @@ typedef struct SCtgDbCfgCtx { char dbFName[TSDB_DB_FNAME_LEN]; } SCtgDbCfgCtx; +typedef struct SCtgDbInfoCtx { + char dbFName[TSDB_DB_FNAME_LEN]; +} SCtgDbInfoCtx; + typedef struct SCtgTbHashCtx { char dbFName[TSDB_DB_FNAME_LEN]; SName* pName; @@ -182,6 +188,7 @@ typedef struct SCtgJob { int32_t dbCfgNum; int32_t indexNum; int32_t userNum; + int32_t dbInfoNum; } SCtgJob; typedef struct SCtgMsgCtx { @@ -285,16 +292,22 @@ typedef struct SCtgUpdateUserMsg { SGetUserAuthRsp userAuth; } SCtgUpdateUserMsg; +typedef struct SCtgUpdateEpsetMsg { + SCatalog* pCtg; + char dbFName[TSDB_DB_FNAME_LEN]; + int32_t vgId; + SEpSet epSet; +} SCtgUpdateEpsetMsg; -typedef struct SCtgMetaAction { - int32_t act; +typedef struct SCtgCacheOperation { + int32_t opId; void *data; - bool syncReq; + bool syncOp; uint64_t seqId; -} SCtgMetaAction; +} SCtgCacheOperation; typedef struct SCtgQNode { - SCtgMetaAction action; + SCtgCacheOperation op; struct SCtgQNode *next; } SCtgQNode; @@ -321,24 +334,24 @@ typedef struct SCatalogMgmt { } SCatalogMgmt; typedef uint32_t (*tableNameHashFp)(const char *, uint32_t); -typedef int32_t (*ctgActFunc)(SCtgMetaAction *); +typedef int32_t (*ctgOpFunc)(SCtgCacheOperation *); -typedef struct SCtgAction { - int32_t actId; +typedef struct SCtgOperation { + int32_t opId; char name[32]; - ctgActFunc func; -} SCtgAction; + ctgOpFunc func; +} SCtgOperation; -#define CTG_QUEUE_ADD() atomic_add_fetch_64(&gCtgMgmt.queue.qRemainNum, 1) -#define CTG_QUEUE_SUB() atomic_sub_fetch_64(&gCtgMgmt.queue.qRemainNum, 1) +#define CTG_QUEUE_INC() atomic_add_fetch_64(&gCtgMgmt.queue.qRemainNum, 1) +#define CTG_QUEUE_DEC() atomic_sub_fetch_64(&gCtgMgmt.queue.qRemainNum, 1) -#define CTG_STAT_ADD(_item, _n) atomic_add_fetch_64(&(_item), _n) -#define CTG_STAT_SUB(_item, _n) atomic_sub_fetch_64(&(_item), _n) +#define CTG_STAT_INC(_item, _n) atomic_add_fetch_64(&(_item), _n) +#define CTG_STAT_DEC(_item, _n) atomic_sub_fetch_64(&(_item), _n) #define CTG_STAT_GET(_item) atomic_load_64(&(_item)) -#define CTG_RUNTIME_STAT_ADD(item, n) (CTG_STAT_ADD(gCtgMgmt.stat.runtime.item, n)) -#define CTG_CACHE_STAT_ADD(item, n) (CTG_STAT_ADD(gCtgMgmt.stat.cache.item, n)) -#define CTG_CACHE_STAT_SUB(item, n) (CTG_STAT_SUB(gCtgMgmt.stat.cache.item, n)) +#define CTG_RT_STAT_INC(item, n) (CTG_STAT_INC(gCtgMgmt.stat.runtime.item, n)) +#define CTG_CACHE_STAT_INC(item, n) (CTG_STAT_INC(gCtgMgmt.stat.cache.item, n)) +#define CTG_CACHE_STAT_DEC(item, n) (CTG_STAT_DEC(gCtgMgmt.stat.cache.item, n)) #define CTG_IS_META_NULL(type) ((type) == META_TYPE_NULL_TABLE) #define CTG_IS_META_CTABLE(type) ((type) == META_TYPE_CTABLE) @@ -435,12 +448,13 @@ int32_t ctgdShowCacheInfo(void); int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq); int32_t ctgGetTbMetaFromCache(CTG_PARAMS, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta); -int32_t ctgActUpdateVg(SCtgMetaAction *action); -int32_t ctgActUpdateTb(SCtgMetaAction *action); -int32_t ctgActRemoveDB(SCtgMetaAction *action); -int32_t ctgActRemoveStb(SCtgMetaAction *action); -int32_t ctgActRemoveTb(SCtgMetaAction *action); -int32_t ctgActUpdateUser(SCtgMetaAction *action); +int32_t ctgOpUpdateVgroup(SCtgCacheOperation *action); +int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *action); +int32_t ctgOpDropDbCache(SCtgCacheOperation *action); +int32_t ctgOpDropStbMeta(SCtgCacheOperation *action); +int32_t ctgOpDropTbMeta(SCtgCacheOperation *action); +int32_t ctgOpUpdateUser(SCtgCacheOperation *action); +int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation); int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache); void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache); void ctgReleaseVgInfo(SCtgDBCache *dbCache); @@ -449,12 +463,13 @@ int32_t ctgTbMetaExistInCache(SCatalog* pCtg, char *dbFName, char* tbName, int32 int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta); int32_t ctgReadTbVerFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tver, int32_t *tbType, uint64_t *suid, char *stbName); int32_t ctgChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFName, AUTH_TYPE type, bool *inCache, bool *pass); -int32_t ctgPutRmDBToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId); -int32_t ctgPutRmStbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq); -int32_t ctgPutRmTbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq); -int32_t ctgPutUpdateVgToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq); -int32_t ctgPutUpdateTbToQueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq); -int32_t ctgPutUpdateUserToQueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq); +int32_t ctgDropDbCacheEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId); +int32_t ctgDropStbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq); +int32_t ctgDropTbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq); +int32_t ctgUpdateVgroupEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq); +int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq); +int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq); +int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEpSet* pEpSet); int32_t ctgMetaRentInit(SCtgRentMgmt *mgmt, uint32_t rentSec, int8_t type); int32_t ctgMetaRentAdd(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size); int32_t ctgMetaRentGet(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_t size); diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index 861de1ab607cef30b7edcfed61e2cae4461e2a90..7e0efe22dbfa2dd61a5c662c1103a1b98f579da8 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -41,9 +41,9 @@ int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq tNameGetFullDbName(pTableName, dbFName); if (TSDB_SUPER_TABLE == tblMeta->tableType) { - CTG_ERR_JRET(ctgPutRmStbToQueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, tblMeta->suid, syncReq)); + CTG_ERR_JRET(ctgDropStbMetaEnqueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, tblMeta->suid, syncReq)); } else { - CTG_ERR_JRET(ctgPutRmTbToQueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, syncReq)); + CTG_ERR_JRET(ctgDropTbMetaEnqueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, syncReq)); } _return: @@ -72,7 +72,7 @@ int32_t ctgGetDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, con CTG_ERR_JRET(ctgCloneVgInfo(DbOut.dbVgroup, pInfo)); - CTG_ERR_RET(ctgPutUpdateVgToQueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, false)); + CTG_ERR_RET(ctgUpdateVgroupEnqueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, false)); return TSDB_CODE_SUCCESS; @@ -108,13 +108,13 @@ int32_t ctgRefreshDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, if (code) { if (CTG_DB_NOT_EXIST(code) && (NULL != dbCache)) { ctgDebug("db no longer exist, dbFName:%s, dbId:%" PRIx64, input.db, input.dbId); - ctgPutRmDBToQueue(pCtg, input.db, input.dbId); + ctgDropDbCacheEnqueue(pCtg, input.db, input.dbId); } CTG_ERR_RET(code); } - CTG_ERR_RET(ctgPutUpdateVgToQueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, true)); + CTG_ERR_RET(ctgUpdateVgroupEnqueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, true)); return TSDB_CODE_SUCCESS; } @@ -201,7 +201,7 @@ int32_t ctgRefreshTbMeta(CTG_PARAMS, SCtgTbMetaCtx* ctx, STableMetaOutput **pOut CTG_ERR_JRET(ctgCloneMetaOutput(output, pOutput)); } - CTG_ERR_JRET(ctgPutUpdateTbToQueue(pCtg, output, syncReq)); + CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, syncReq)); return TSDB_CODE_SUCCESS; @@ -298,9 +298,9 @@ _return: } if (TSDB_SUPER_TABLE == ctx->tbInfo.tbType) { - ctgPutRmStbToQueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, ctx->tbInfo.suid, false); + ctgDropStbMetaEnqueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, ctx->tbInfo.suid, false); } else { - ctgPutRmTbToQueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, false); + ctgDropTbMetaEnqueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, false); } } @@ -314,6 +314,36 @@ _return: CTG_RET(code); } +int32_t ctgUpdateTbMeta(SCatalog* pCtg, STableMetaRsp *rspMsg, bool syncOp) { + STableMetaOutput *output = taosMemoryCalloc(1, sizeof(STableMetaOutput)); + if (NULL == output) { + ctgError("malloc %d failed", (int32_t)sizeof(STableMetaOutput)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + int32_t code = 0; + + strcpy(output->dbFName, rspMsg->dbFName); + strcpy(output->tbName, rspMsg->tbName); + + output->dbId = rspMsg->dbId; + + SET_META_TYPE_TABLE(output->metaType); + + CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, rspMsg->tableType == TSDB_SUPER_TABLE, &output->tbMeta)); + + CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, syncOp)); + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(output->tbMeta); + taosMemoryFreeClear(output); + + CTG_RET(code); +} + int32_t ctgChkAuth(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* user, const char* dbFName, AUTH_TYPE type, bool *pass) { bool inCache = false; @@ -348,7 +378,7 @@ int32_t ctgChkAuth(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const c _return: - ctgPutUpdateUserToQueue(pCtg, &authRsp, false); + ctgUpdateUserEnqueue(pCtg, &authRsp, false); return TSDB_CODE_SUCCESS; } @@ -558,7 +588,7 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle) { *catalogHandle = clusterCtg; - CTG_CACHE_STAT_ADD(clusterNum, 1); + CTG_CACHE_STAT_INC(clusterNum, 1); return TSDB_CODE_SUCCESS; @@ -579,7 +609,7 @@ void catalogFreeHandle(SCatalog* pCtg) { return; } - CTG_CACHE_STAT_SUB(clusterNum, 1); + CTG_CACHE_STAT_DEC(clusterNum, 1); uint64_t clusterId = pCtg->clusterId; @@ -670,7 +700,7 @@ int32_t catalogUpdateDBVgInfo(SCatalog* pCtg, const char* dbFName, uint64_t dbId CTG_ERR_JRET(TSDB_CODE_CTG_INVALID_INPUT); } - code = ctgPutUpdateVgToQueue(pCtg, dbFName, dbId, dbInfo, false); + code = ctgUpdateVgroupEnqueue(pCtg, dbFName, dbId, dbInfo, false); _return: @@ -691,7 +721,7 @@ int32_t catalogRemoveDB(SCatalog* pCtg, const char* dbFName, uint64_t dbId) { CTG_API_LEAVE(TSDB_CODE_SUCCESS); } - CTG_ERR_JRET(ctgPutRmDBToQueue(pCtg, dbFName, dbId)); + CTG_ERR_JRET(ctgDropDbCacheEnqueue(pCtg, dbFName, dbId)); CTG_API_LEAVE(TSDB_CODE_SUCCESS); @@ -701,7 +731,19 @@ _return: } int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet) { - return 0; + CTG_API_ENTER(); + + int32_t code = 0; + + if (NULL == pCtg || NULL == dbFName || NULL == epSet) { + CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); + } + + CTG_ERR_JRET(ctgUpdateVgEpsetEnqueue(pCtg, (char*)dbFName, vgId, epSet)); + +_return: + + CTG_API_LEAVE(code); } int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName) { @@ -738,7 +780,7 @@ int32_t catalogRemoveStbMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId, CTG_API_LEAVE(TSDB_CODE_SUCCESS); } - CTG_ERR_JRET(ctgPutRmStbToQueue(pCtg, dbFName, dbId, stbName, suid, true)); + CTG_ERR_JRET(ctgDropStbMetaEnqueue(pCtg, dbFName, dbId, stbName, suid, true)); CTG_API_LEAVE(TSDB_CODE_SUCCESS); @@ -767,38 +809,17 @@ int32_t catalogGetSTableMeta(SCatalog* pCtg, void * pTrans, const SEpSet* pMgmtE CTG_API_LEAVE(ctgGetTbMeta(CTG_PARAMS_LIST(), &ctx, pTableMeta)); } -int32_t catalogUpdateSTableMeta(SCatalog* pCtg, STableMetaRsp *rspMsg) { +int32_t catalogUpdateTableMeta(SCatalog* pCtg, STableMetaRsp *pMsg) { CTG_API_ENTER(); - if (NULL == pCtg || NULL == rspMsg) { + if (NULL == pCtg || NULL == pMsg) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - STableMetaOutput *output = taosMemoryCalloc(1, sizeof(STableMetaOutput)); - if (NULL == output) { - ctgError("malloc %d failed", (int32_t)sizeof(STableMetaOutput)); - CTG_API_LEAVE(TSDB_CODE_CTG_MEM_ERROR); - } - int32_t code = 0; - - strcpy(output->dbFName, rspMsg->dbFName); - strcpy(output->tbName, rspMsg->tbName); - - output->dbId = rspMsg->dbId; - - SET_META_TYPE_TABLE(output->metaType); - - CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, true, &output->tbMeta)); - - CTG_ERR_JRET(ctgPutUpdateTbToQueue(pCtg, output, false)); - - CTG_API_LEAVE(code); + CTG_ERR_JRET(ctgUpdateTbMeta(pCtg, pMsg, true)); _return: - - taosMemoryFreeClear(output->tbMeta); - taosMemoryFreeClear(output); CTG_API_LEAVE(code); } @@ -978,7 +999,7 @@ int32_t catalogGetAllMeta(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, } if (pReq->qNodeRequired) { - pRsp->pQnodeList = taosArrayInit(10, sizeof(SQueryNodeAddr)); + pRsp->pQnodeList = taosArrayInit(10, sizeof(SQueryNodeLoad)); CTG_ERR_JRET(ctgGetQnodeListFromMnode(CTG_PARAMS_LIST(), pRsp->pQnodeList, NULL)); } @@ -1152,7 +1173,7 @@ int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - CTG_API_LEAVE(ctgPutUpdateUserToQueue(pCtg, pAuth, false)); + CTG_API_LEAVE(ctgUpdateUserEnqueue(pCtg, pAuth, false)); } @@ -1194,7 +1215,7 @@ void catalogDestroy(void) { taosHashCleanup(gCtgMgmt.pCluster); gCtgMgmt.pCluster = NULL; - CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.lock); + if (CTG_IS_LOCKED(&gCtgMgmt.lock) == TD_RWLATCH_WRITE_FLAG_COPY) CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.lock); qInfo("catalog destroyed"); } diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index 0341c3638bfeb6018326d1cbad86ca1363024ad9..eb84bf00a444fb6bc57652ee32abdf44035a0426 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -95,6 +95,30 @@ int32_t ctgInitGetDbCfgTask(SCtgJob *pJob, int32_t taskIdx, char *dbFName) { return TSDB_CODE_SUCCESS; } +int32_t ctgInitGetDbInfoTask(SCtgJob *pJob, int32_t taskIdx, char *dbFName) { + SCtgTask task = {0}; + + task.type = CTG_TASK_GET_DB_INFO; + task.taskId = taskIdx; + task.pJob = pJob; + + task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgDbInfoCtx)); + if (NULL == task.taskCtx) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SCtgDbInfoCtx* ctx = task.taskCtx; + + memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName)); + + taosArrayPush(pJob->pTasks, &task); + + qDebug("QID:%" PRIx64 " task %d type %d initialized, dbFName:%s", pJob->queryId, taskIdx, task.type, dbFName); + + return TSDB_CODE_SUCCESS; +} + + int32_t ctgInitGetTbHashTask(SCtgJob *pJob, int32_t taskIdx, SName *name) { SCtgTask task = {0}; @@ -219,8 +243,9 @@ int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq* int32_t dbCfgNum = (int32_t)taosArrayGetSize(pReq->pDbCfg); int32_t indexNum = (int32_t)taosArrayGetSize(pReq->pIndex); int32_t userNum = (int32_t)taosArrayGetSize(pReq->pUser); + int32_t dbInfoNum = (int32_t)taosArrayGetSize(pReq->pDbInfo); - int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dbCfgNum + indexNum + userNum; + int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dbCfgNum + indexNum + userNum + dbInfoNum; if (taskNum <= 0) { ctgError("empty input for job, taskNum:%d", taskNum); CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT); @@ -249,6 +274,7 @@ int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq* pJob->dbCfgNum = dbCfgNum; pJob->indexNum = indexNum; pJob->userNum = userNum; + pJob->dbInfoNum = dbInfoNum; pJob->pTasks = taosArrayInit(taskNum, sizeof(SCtgTask)); @@ -268,6 +294,11 @@ int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq* CTG_ERR_JRET(ctgInitGetDbCfgTask(pJob, taskIdx++, dbFName)); } + for (int32_t i = 0; i < dbInfoNum; ++i) { + char *dbFName = taosArrayGet(pReq->pDbInfo, i); + CTG_ERR_JRET(ctgInitGetDbInfoTask(pJob, taskIdx++, dbFName)); + } + for (int32_t i = 0; i < tbMetaNum; ++i) { SName *name = taosArrayGet(pReq->pTableMeta, i); CTG_ERR_JRET(ctgInitGetTbMetaTask(pJob, taskIdx++, name)); @@ -395,6 +426,20 @@ int32_t ctgDumpDbCfgRes(SCtgTask* pTask) { return TSDB_CODE_SUCCESS; } +int32_t ctgDumpDbInfoRes(SCtgTask* pTask) { + SCtgJob* pJob = pTask->pJob; + if (NULL == pJob->jobRes.pDbInfo) { + pJob->jobRes.pDbInfo = taosArrayInit(pJob->dbInfoNum, sizeof(SDbInfo)); + if (NULL == pJob->jobRes.pDbInfo) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + taosArrayPush(pJob->jobRes.pDbInfo, pTask->res); + + return TSDB_CODE_SUCCESS; +} + int32_t ctgDumpUdfRes(SCtgTask* pTask) { SCtgJob* pJob = pTask->pJob; if (NULL == pJob->jobRes.pUdfList) { @@ -620,7 +665,7 @@ int32_t ctgHandleGetDbVgRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pM CTG_ERR_JRET(ctgGenerateVgList(pCtg, pOut->dbVgroup->vgHash, (SArray**)&pTask->res)); - CTG_ERR_JRET(ctgPutUpdateVgToQueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); + CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); pOut->dbVgroup = NULL; break; @@ -659,7 +704,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf * CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res)); - CTG_ERR_JRET(ctgPutUpdateVgToQueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); + CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); pOut->dbVgroup = NULL; break; @@ -691,6 +736,11 @@ _return: CTG_RET(code); } +int32_t ctgHandleGetDbInfoRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { + CTG_RET(TSDB_CODE_APP_ERROR); +} + + int32_t ctgHandleGetQnodeRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { int32_t code = 0; CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); @@ -769,7 +819,7 @@ _return: } } - ctgPutUpdateUserToQueue(pCtg, pOut, false); + ctgUpdateUserEnqueue(pCtg, pOut, false); taosMemoryFreeClear(pTask->msgCtx.out); ctgHandleTaskEnd(pTask, code); @@ -933,6 +983,41 @@ int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) { return TSDB_CODE_SUCCESS; } +int32_t ctgLaunchGetDbInfoTask(SCtgTask *pTask) { + int32_t code = 0; + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + SCtgDBCache *dbCache = NULL; + SCtgDbInfoCtx* pCtx = (SCtgDbInfoCtx*)pTask->taskCtx; + + pTask->res = taosMemoryCalloc(1, sizeof(SDbInfo)); + if (NULL == pTask->res) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SDbInfo* pInfo = (SDbInfo*)pTask->res; + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache)); + if (NULL != dbCache) { + pInfo->vgVer = dbCache->vgInfo->vgVersion; + pInfo->dbId = dbCache->dbId; + pInfo->tbNum = dbCache->vgInfo->numOfTable; + } else { + pInfo->vgVer = CTG_DEFAULT_INVALID_VERSION; + } + + CTG_ERR_JRET(ctgHandleTaskEnd(pTask, 0)); + +_return: + + if (dbCache) { + ctgReleaseVgInfo(dbCache); + ctgReleaseDBCache(pCtg, dbCache); + } + + CTG_RET(code); +} + int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) { SCatalog* pCtg = pTask->pJob->pCtg; void *pTrans = pTask->pJob->pTrans; @@ -992,6 +1077,7 @@ SCtgAsyncFps gCtgAsyncFps[] = { {ctgLaunchGetQnodeTask, ctgHandleGetQnodeRsp, ctgDumpQnodeRes}, {ctgLaunchGetDbVgTask, ctgHandleGetDbVgRsp, ctgDumpDbVgRes}, {ctgLaunchGetDbCfgTask, ctgHandleGetDbCfgRsp, ctgDumpDbCfgRes}, + {ctgLaunchGetDbInfoTask, ctgHandleGetDbInfoRsp, ctgDumpDbInfoRes}, {ctgLaunchGetTbMetaTask, ctgHandleGetTbMetaRsp, ctgDumpTbMetaRes}, {ctgLaunchGetTbHashTask, ctgHandleGetTbHashRsp, ctgDumpTbHashRes}, {ctgLaunchGetIndexTask, ctgHandleGetIndexRsp, ctgDumpIndexRes}, diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index 9161c7cb3237f0272ce9ffee71a8c6718c7e1dc9..0f1344c3432b2540c2daa33de33c2f8c570658f0 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -19,37 +19,43 @@ #include "catalogInt.h" #include "systable.h" -SCtgAction gCtgAction[CTG_ACT_MAX] = { +SCtgOperation gCtgCacheOperation[CTG_OP_MAX] = { { - CTG_ACT_UPDATE_VG, + CTG_OP_UPDATE_VGROUP, "update vgInfo", - ctgActUpdateVg + ctgOpUpdateVgroup }, { - CTG_ACT_UPDATE_TBL, + CTG_OP_UPDATE_TB_META, "update tbMeta", - ctgActUpdateTb + ctgOpUpdateTbMeta }, { - CTG_ACT_REMOVE_DB, - "remove DB", - ctgActRemoveDB + CTG_OP_DROP_DB_CACHE, + "drop DB", + ctgOpDropDbCache }, { - CTG_ACT_REMOVE_STB, - "remove stbMeta", - ctgActRemoveStb + CTG_OP_DROP_STB_META, + "drop stbMeta", + ctgOpDropStbMeta }, { - CTG_ACT_REMOVE_TBL, - "remove tbMeta", - ctgActRemoveTb + CTG_OP_DROP_TB_META, + "drop tbMeta", + ctgOpDropTbMeta }, { - CTG_ACT_UPDATE_USER, + CTG_OP_UPDATE_USER, "update user", - ctgActUpdateUser + ctgOpUpdateUser + }, + { + CTG_OP_UPDATE_VG_EPSET, + "update epset", + ctgOpUpdateEpset } + }; @@ -172,7 +178,7 @@ int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCac *pCache = dbCache; - CTG_CACHE_STAT_ADD(vgHitNum, 1); + CTG_CACHE_STAT_INC(vgHitNum, 1); ctgDebug("Got db vgInfo from cache, dbFName:%s", dbFName); @@ -186,7 +192,7 @@ _return: *pCache = NULL; - CTG_CACHE_STAT_ADD(vgMissNum, 1); + CTG_CACHE_STAT_INC(vgMissNum, 1); return TSDB_CODE_SUCCESS; } @@ -273,7 +279,7 @@ int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** ctgReleaseDBCache(pCtg, dbCache); ctgDebug("Got meta from cache, type:%d, dbFName:%s, tbName:%s", tbMeta->tableType, dbFName, ctx->pName->tname); - CTG_CACHE_STAT_ADD(tblHitNum, 1); + CTG_CACHE_STAT_INC(tblHitNum, 1); return TSDB_CODE_SUCCESS; } @@ -306,7 +312,7 @@ int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** ctgReleaseDBCache(pCtg, dbCache); - CTG_CACHE_STAT_ADD(tblHitNum, 1); + CTG_CACHE_STAT_INC(tblHitNum, 1); ctgDebug("Got tbmeta from cache, dbFName:%s, tbName:%s", dbFName, ctx->pName->tname); @@ -317,7 +323,7 @@ _return: ctgReleaseDBCache(pCtg, dbCache); taosMemoryFreeClear(*pTableMeta); - CTG_CACHE_STAT_ADD(tblMissNum, 1); + CTG_CACHE_STAT_INC(tblMissNum, 1); CTG_RET(code); } @@ -405,7 +411,7 @@ int32_t ctgReadTbVerFromCache(SCatalog *pCtg, const SName *pTableName, int32_t * } -int32_t ctgGetTbTypeFromCache(SCatalog* pCtg, const char* dbFName, const char *tableName, int32_t *tbType) { +int32_t ctgReadTbTypeFromCache(SCatalog* pCtg, const char* dbFName, const char *tableName, int32_t *tbType) { if (NULL == pCtg->dbCache) { ctgWarn("empty db cache, dbFName:%s, tbName:%s", dbFName, tableName); return TSDB_CODE_SUCCESS; @@ -456,7 +462,7 @@ int32_t ctgChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFNam *inCache = true; ctgDebug("Got user from cache, user:%s", user); - CTG_CACHE_STAT_ADD(userHitNum, 1); + CTG_CACHE_STAT_INC(userHitNum, 1); if (pUser->superUser) { *pass = true; @@ -485,13 +491,13 @@ int32_t ctgChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFNam _return: *inCache = false; - CTG_CACHE_STAT_ADD(userMissNum, 1); + CTG_CACHE_STAT_INC(userMissNum, 1); return TSDB_CODE_SUCCESS; } -void ctgWaitAction(SCtgMetaAction *action) { +void ctgWaitOpDone(SCtgCacheOperation *action) { while (true) { tsem_wait(&gCtgMgmt.queue.rspSem); @@ -509,54 +515,54 @@ void ctgWaitAction(SCtgMetaAction *action) { } } -void ctgPopAction(SCtgMetaAction **action) { +void ctgDequeue(SCtgCacheOperation **op) { SCtgQNode *orig = gCtgMgmt.queue.head; SCtgQNode *node = gCtgMgmt.queue.head->next; gCtgMgmt.queue.head = gCtgMgmt.queue.head->next; - CTG_QUEUE_SUB(); + CTG_QUEUE_DEC(); taosMemoryFreeClear(orig); - *action = &node->action; + *op = &node->op; } -int32_t ctgPushAction(SCatalog* pCtg, SCtgMetaAction *action) { +int32_t ctgEnqueue(SCatalog* pCtg, SCtgCacheOperation *operation) { SCtgQNode *node = taosMemoryCalloc(1, sizeof(SCtgQNode)); if (NULL == node) { qError("calloc %d failed", (int32_t)sizeof(SCtgQNode)); CTG_RET(TSDB_CODE_CTG_MEM_ERROR); } - action->seqId = atomic_add_fetch_64(&gCtgMgmt.queue.seqId, 1); + operation->seqId = atomic_add_fetch_64(&gCtgMgmt.queue.seqId, 1); - node->action = *action; + node->op = *operation; CTG_LOCK(CTG_WRITE, &gCtgMgmt.queue.qlock); gCtgMgmt.queue.tail->next = node; gCtgMgmt.queue.tail = node; CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.queue.qlock); - CTG_QUEUE_ADD(); - CTG_RUNTIME_STAT_ADD(qNum, 1); + CTG_QUEUE_INC(); + CTG_RT_STAT_INC(qNum, 1); tsem_post(&gCtgMgmt.queue.reqSem); - ctgDebug("action [%s] added into queue", gCtgAction[action->act].name); + ctgDebug("action [%s] added into queue", gCtgCacheOperation[operation->opId].name); - if (action->syncReq) { - ctgWaitAction(action); + if (operation->syncOp) { + ctgWaitOpDone(operation); } return TSDB_CODE_SUCCESS; } -int32_t ctgPutRmDBToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) { +int32_t ctgDropDbCacheEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_REMOVE_DB}; + SCtgCacheOperation action= {.opId = CTG_OP_DROP_DB_CACHE}; SCtgRemoveDBMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveDBMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveDBMsg)); @@ -574,7 +580,7 @@ int32_t ctgPutRmDBToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) { action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -585,9 +591,9 @@ _return: } -int32_t ctgPutRmStbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq) { +int32_t ctgDropStbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncOp) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_REMOVE_STB, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_DROP_STB_META, .syncOp = syncOp}; SCtgRemoveStbMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveStbMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveStbMsg)); @@ -602,7 +608,7 @@ int32_t ctgPutRmStbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, co action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -614,9 +620,9 @@ _return: -int32_t ctgPutRmTbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq) { +int32_t ctgDropTbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncOp) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_REMOVE_TBL, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_DROP_TB_META, .syncOp = syncOp}; SCtgRemoveTblMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveTblMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveTblMsg)); @@ -630,7 +636,7 @@ int32_t ctgPutRmTbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, con action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -640,9 +646,9 @@ _return: CTG_RET(code); } -int32_t ctgPutUpdateVgToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq) { +int32_t ctgUpdateVgroupEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncOp) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_UPDATE_VG, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_VGROUP, .syncOp = syncOp}; SCtgUpdateVgMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateVgMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateVgMsg)); @@ -662,7 +668,7 @@ int32_t ctgPutUpdateVgToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -673,9 +679,9 @@ _return: CTG_RET(code); } -int32_t ctgPutUpdateTbToQueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq) { +int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool syncOp) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_UPDATE_TBL, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_TB_META, .syncOp = syncOp}; SCtgUpdateTblMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateTblMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateTblMsg)); @@ -692,7 +698,34 @@ int32_t ctgPutUpdateTbToQueue(SCatalog* pCtg, STableMetaOutput *output, bool syn action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + +int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEpSet* pEpSet) { + int32_t code = 0; + SCtgCacheOperation operation= {.opId = CTG_OP_UPDATE_VG_EPSET}; + SCtgUpdateEpsetMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateEpsetMsg)); + if (NULL == msg) { + ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateEpsetMsg)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + msg->pCtg = pCtg; + strcpy(msg->dbFName, dbFName); + msg->vgId = vgId; + msg->epSet = *pEpSet; + + operation.data = msg; + + CTG_ERR_JRET(ctgEnqueue(pCtg, &operation)); return TSDB_CODE_SUCCESS; @@ -703,9 +736,11 @@ _return: CTG_RET(code); } -int32_t ctgPutUpdateUserToQueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq) { + + +int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncOp) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_UPDATE_USER, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_USER, .syncOp = syncOp}; SCtgUpdateUserMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateUserMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateUserMsg)); @@ -717,7 +752,7 @@ int32_t ctgPutUpdateUserToQueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syn action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -953,7 +988,7 @@ int32_t ctgAddNewDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId) { CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); } - CTG_CACHE_STAT_ADD(dbNum, 1); + CTG_CACHE_STAT_INC(dbNum, 1); SDbVgVersion vgVersion = {.dbId = newDBCache.dbId, .vgVersion = -1}; strncpy(vgVersion.dbFName, dbFName, sizeof(vgVersion.dbFName)); @@ -1013,7 +1048,7 @@ int32_t ctgRemoveDBFromCache(SCatalog* pCtg, SCtgDBCache *dbCache, const char* d CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED); } - CTG_CACHE_STAT_SUB(dbNum, 1); + CTG_CACHE_STAT_DEC(dbNum, 1); ctgInfo("db removed from cache, dbFName:%s, dbId:%"PRIx64, dbFName, dbId); @@ -1152,7 +1187,7 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam if (taosHashRemove(tbCache->stbCache, &orig->suid, sizeof(orig->suid))) { ctgError("stb not exist in stbCache, dbFName:%s, stb:%s, suid:%"PRIx64, dbFName, tbName, orig->suid); } else { - CTG_CACHE_STAT_SUB(stblNum, 1); + CTG_CACHE_STAT_DEC(stblNum, 1); } CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock); @@ -1179,7 +1214,7 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam } if (NULL == orig) { - CTG_CACHE_STAT_ADD(tblNum, 1); + CTG_CACHE_STAT_INC(tblNum, 1); } ctgDebug("tbmeta updated to cache, dbFName:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType); @@ -1198,7 +1233,7 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); } - CTG_CACHE_STAT_ADD(stblNum, 1); + CTG_CACHE_STAT_INC(stblNum, 1); CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock); @@ -1219,7 +1254,7 @@ int32_t ctgUpdateTbMetaToCache(SCatalog* pCtg, STableMetaOutput* pOut, bool sync int32_t code = 0; CTG_ERR_RET(ctgCloneMetaOutput(pOut, &pOutput)); - CTG_ERR_JRET(ctgPutUpdateTbToQueue(pCtg, pOutput, syncReq)); + CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, pOutput, syncReq)); return TSDB_CODE_SUCCESS; @@ -1230,9 +1265,9 @@ _return: } -int32_t ctgActUpdateVg(SCtgMetaAction *action) { +int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgUpdateVgMsg *msg = action->data; + SCtgUpdateVgMsg *msg = operation->data; CTG_ERR_JRET(ctgWriteDBVgInfoToCache(msg->pCtg, msg->dbFName, msg->dbId, &msg->dbInfo)); @@ -1244,9 +1279,9 @@ _return: CTG_RET(code); } -int32_t ctgActRemoveDB(SCtgMetaAction *action) { +int32_t ctgOpDropDbCache(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgRemoveDBMsg *msg = action->data; + SCtgRemoveDBMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; SCtgDBCache *dbCache = NULL; @@ -1270,9 +1305,9 @@ _return: } -int32_t ctgActUpdateTb(SCtgMetaAction *action) { +int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgUpdateTblMsg *msg = action->data; + SCtgUpdateTblMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; STableMetaOutput* output = msg->output; SCtgDBCache *dbCache = NULL; @@ -1316,9 +1351,9 @@ _return: } -int32_t ctgActRemoveStb(SCtgMetaAction *action) { +int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgRemoveStbMsg *msg = action->data; + SCtgRemoveStbMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; SCtgDBCache *dbCache = NULL; @@ -1336,14 +1371,14 @@ int32_t ctgActRemoveStb(SCtgMetaAction *action) { if (taosHashRemove(dbCache->tbCache.stbCache, &msg->suid, sizeof(msg->suid))) { ctgDebug("stb not exist in stbCache, may be removed, dbFName:%s, stb:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid); } else { - CTG_CACHE_STAT_SUB(stblNum, 1); + CTG_CACHE_STAT_DEC(stblNum, 1); } CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock); if (taosHashRemove(dbCache->tbCache.metaCache, msg->stbName, strlen(msg->stbName))) { ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid); } else { - CTG_CACHE_STAT_SUB(tblNum, 1); + CTG_CACHE_STAT_DEC(tblNum, 1); } CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); @@ -1362,9 +1397,9 @@ _return: CTG_RET(code); } -int32_t ctgActRemoveTb(SCtgMetaAction *action) { +int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgRemoveTblMsg *msg = action->data; + SCtgRemoveTblMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; SCtgDBCache *dbCache = NULL; @@ -1384,7 +1419,7 @@ int32_t ctgActRemoveTb(SCtgMetaAction *action) { ctgError("stb not exist in cache, dbFName:%s, tbName:%s", msg->dbFName, msg->tbName); CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); } else { - CTG_CACHE_STAT_SUB(tblNum, 1); + CTG_CACHE_STAT_DEC(tblNum, 1); } CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock); @@ -1397,9 +1432,9 @@ _return: CTG_RET(code); } -int32_t ctgActUpdateUser(SCtgMetaAction *action) { +int32_t ctgOpUpdateUser(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgUpdateUserMsg *msg = action->data; + SCtgUpdateUserMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; if (NULL == pCtg->userCache) { @@ -1460,14 +1495,60 @@ _return: CTG_RET(code); } -void ctgUpdateThreadFuncUnexpectedStopped(void) { +int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation) { + int32_t code = 0; + SCtgUpdateEpsetMsg *msg = operation->data; + SCatalog* pCtg = msg->pCtg; + + SCtgDBCache *dbCache = NULL; + CTG_ERR_RET(ctgAcquireDBCache(pCtg, msg->dbFName, &dbCache)); + if (NULL == dbCache) { + ctgDebug("db %s not exist, ignore epset update", msg->dbFName); + goto _return; + } + + SDBVgInfo *vgInfo = NULL; + CTG_ERR_RET(ctgWAcquireVgInfo(pCtg, dbCache)); + + if (NULL == dbCache->vgInfo) { + ctgWReleaseVgInfo(dbCache); + ctgDebug("vgroup in db %s not cached, ignore epset update", msg->dbFName); + goto _return; + } + + SVgroupInfo* pInfo = taosHashGet(dbCache->vgInfo->vgHash, &msg->vgId, sizeof(msg->vgId)); + if (NULL == pInfo) { + ctgWReleaseVgInfo(dbCache); + ctgDebug("no vgroup %d in db %s, ignore epset update", msg->vgId, msg->dbFName); + goto _return; + } + + pInfo->epSet = msg->epSet; + + ctgDebug("epset in vgroup %d updated, dbFName:%s", pInfo->vgId, msg->dbFName); + + ctgWReleaseVgInfo(dbCache); + +_return: + + if (dbCache) { + ctgReleaseDBCache(msg->pCtg, dbCache); + } + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + + +void ctgUpdateThreadUnexpectedStopped(void) { if (CTG_IS_LOCKED(&gCtgMgmt.lock) > 0) CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock); } void* ctgUpdateThreadFunc(void* param) { setThreadName("catalog"); #ifdef WINDOWS - atexit(ctgUpdateThreadFuncUnexpectedStopped); + atexit(ctgUpdateThreadUnexpectedStopped); #endif qInfo("catalog update thread started"); @@ -1483,21 +1564,21 @@ void* ctgUpdateThreadFunc(void* param) { break; } - SCtgMetaAction *action = NULL; - ctgPopAction(&action); - SCatalog *pCtg = ((SCtgUpdateMsgHeader *)action->data)->pCtg; + SCtgCacheOperation *operation = NULL; + ctgDequeue(&operation); + SCatalog *pCtg = ((SCtgUpdateMsgHeader *)operation->data)->pCtg; - ctgDebug("process [%s] action", gCtgAction[action->act].name); + ctgDebug("process [%s] operation", gCtgCacheOperation[operation->opId].name); - (*gCtgAction[action->act].func)(action); + (*gCtgCacheOperation[operation->opId].func)(operation); - gCtgMgmt.queue.seqDone = action->seqId; + gCtgMgmt.queue.seqDone = operation->seqId; - if (action->syncReq) { + if (operation->syncOp) { tsem_post(&gCtgMgmt.queue.rspSem); } - CTG_RUNTIME_STAT_ADD(qDoneNum, 1); + CTG_RT_STAT_INC(qDoneNum, 1); ctgdShowClusterCache(pCtg); } diff --git a/source/libs/catalog/src/ctgDbg.c b/source/libs/catalog/src/ctgDbg.c index 849c66fd126dcbb0b0bdee1de1ec54ea8bd3697c..fdab50db0f65fd67d16d6f5b134f847dc0f882bc 100644 --- a/source/libs/catalog/src/ctgDbg.c +++ b/source/libs/catalog/src/ctgDbg.c @@ -71,6 +71,16 @@ void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) { qDebug("empty db vgroup"); } + if (pResult->pDbInfo && taosArrayGetSize(pResult->pDbInfo) > 0) { + num = taosArrayGetSize(pResult->pDbInfo); + for (int32_t i = 0; i < num; ++i) { + SDbInfo *pDb = taosArrayGet(pResult->pDbInfo, i); + qDebug("db %d dbInfo: vgVer:%d, tbNum:%d, dbId:%" PRIx64, i, pDb->vgVer, pDb->tbNum, pDb->dbId); + } + } else { + qDebug("empty db info"); + } + if (pResult->pTableHash && taosArrayGetSize(pResult->pTableHash) > 0) { num = taosArrayGetSize(pResult->pTableHash); for (int32_t i = 0; i < num; ++i) { @@ -127,6 +137,7 @@ int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps SCatalogReq req = {0}; req.pTableMeta = taosArrayInit(2, sizeof(SName)); req.pDbVgroup = taosArrayInit(2, TSDB_DB_FNAME_LEN); + req.pDbInfo = taosArrayInit(2, TSDB_DB_FNAME_LEN); req.pTableHash = taosArrayInit(2, sizeof(SName)); req.pUdf = taosArrayInit(2, TSDB_FUNC_NAME_LEN); req.pDbCfg = taosArrayInit(2, TSDB_DB_FNAME_LEN); @@ -149,9 +160,11 @@ int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps strcpy(dbFName, "1.db1"); taosArrayPush(req.pDbVgroup, dbFName); taosArrayPush(req.pDbCfg, dbFName); + taosArrayPush(req.pDbInfo, dbFName); strcpy(dbFName, "1.db2"); taosArrayPush(req.pDbVgroup, dbFName); taosArrayPush(req.pDbCfg, dbFName); + taosArrayPush(req.pDbInfo, dbFName); strcpy(funcName, "udf1"); taosArrayPush(req.pUdf, funcName); diff --git a/source/libs/catalog/src/ctgRemote.c b/source/libs/catalog/src/ctgRemote.c index 4def1fff4f3c2185de569a706f59ace1c215d488..b16a082f75ff54946bdb20ef8c25989e8f597ec0 100644 --- a/source/libs/catalog/src/ctgRemote.c +++ b/source/libs/catalog/src/ctgRemote.c @@ -275,7 +275,7 @@ int32_t ctgGetQnodeListFromMnode(CTG_PARAMS, SArray *out, SCtgTask* pTask) { } if (pTask) { - void* pOut = taosArrayInit(4, sizeof(struct SQueryNodeAddr)); + void* pOut = taosArrayInit(4, sizeof(SQueryNodeLoad)); if (NULL == pOut) { CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index 1f78a97733614fcf7cbbf48a1a90be62dfa61ce9..4625203dd8d20a6a96af8ea8b748533d4b0a1534 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -42,6 +42,9 @@ void ctgFreeSMetaData(SMetaData* pData) { } taosArrayDestroy(pData->pDbCfg); pData->pDbCfg = NULL; + + taosArrayDestroy(pData->pDbInfo); + pData->pDbInfo = NULL; taosArrayDestroy(pData->pIndex); pData->pIndex = NULL; @@ -82,7 +85,7 @@ void ctgFreeTbMetaCache(SCtgTbMetaCache *cache) { int32_t stblNum = taosHashGetSize(cache->stbCache); taosHashCleanup(cache->stbCache); cache->stbCache = NULL; - CTG_CACHE_STAT_SUB(stblNum, stblNum); + CTG_CACHE_STAT_DEC(stblNum, stblNum); } CTG_UNLOCK(CTG_WRITE, &cache->stbLock); @@ -91,7 +94,7 @@ void ctgFreeTbMetaCache(SCtgTbMetaCache *cache) { int32_t tblNum = taosHashGetSize(cache->metaCache); taosHashCleanup(cache->metaCache); cache->metaCache = NULL; - CTG_CACHE_STAT_SUB(tblNum, tblNum); + CTG_CACHE_STAT_DEC(tblNum, tblNum); } CTG_UNLOCK(CTG_WRITE, &cache->metaLock); } @@ -142,7 +145,7 @@ void ctgFreeHandle(SCatalog* pCtg) { taosHashCleanup(pCtg->dbCache); - CTG_CACHE_STAT_SUB(dbNum, dbNum); + CTG_CACHE_STAT_DEC(dbNum, dbNum); } if (pCtg->userCache) { @@ -159,7 +162,7 @@ void ctgFreeHandle(SCatalog* pCtg) { taosHashCleanup(pCtg->userCache); - CTG_CACHE_STAT_SUB(userNum, userNum); + CTG_CACHE_STAT_DEC(userNum, userNum); } taosMemoryFree(pCtg); @@ -293,9 +296,12 @@ void ctgFreeTask(SCtgTask* pTask) { } case CTG_TASK_GET_DB_CFG: { taosMemoryFreeClear(pTask->taskCtx); - if (pTask->res) { - taosMemoryFreeClear(pTask->res); - } + taosMemoryFreeClear(pTask->res); + break; + } + case CTG_TASK_GET_DB_INFO: { + taosMemoryFreeClear(pTask->taskCtx); + taosMemoryFreeClear(pTask->res); break; } case CTG_TASK_GET_TB_HASH: { diff --git a/source/libs/catalog/test/catalogTests.cpp b/source/libs/catalog/test/catalogTests.cpp index 6c7d1ac4ca554e69b92bcde3e4c64f20a46d0dcb..e4ae2c004f412e356feba406fca07f1c83863abe 100644 --- a/source/libs/catalog/test/catalogTests.cpp +++ b/source/libs/catalog/test/catalogTests.cpp @@ -41,7 +41,6 @@ namespace { extern "C" int32_t ctgdGetClusterCacheNum(struct SCatalog* pCatalog, int32_t type); -extern "C" int32_t ctgActUpdateTb(SCtgMetaAction *action); extern "C" int32_t ctgdEnableDebug(char *option); extern "C" int32_t ctgdGetStatNum(char *option, void *res); @@ -138,7 +137,7 @@ void ctgTestInitLogFile() { tsAsyncLog = 0; qDebugFlag = 159; - strcpy(tsLogDir, "/var/log/taos"); + strcpy(tsLogDir, TD_LOG_DIR_PATH); ctgdEnableDebug("api"); ctgdEnableDebug("meta"); @@ -888,9 +887,9 @@ void *ctgTestSetCtableMetaThread(void *param) { int32_t n = 0; STableMetaOutput *output = NULL; - SCtgMetaAction action = {0}; + SCtgCacheOperation operation = {0}; - action.act = CTG_ACT_UPDATE_TBL; + operation.opId = CTG_OP_UPDATE_TB_META; while (!ctgTestStop) { output = (STableMetaOutput *)taosMemoryMalloc(sizeof(STableMetaOutput)); @@ -899,9 +898,9 @@ void *ctgTestSetCtableMetaThread(void *param) { SCtgUpdateTblMsg *msg = (SCtgUpdateTblMsg *)taosMemoryMalloc(sizeof(SCtgUpdateTblMsg)); msg->pCtg = pCtg; msg->output = output; - action.data = msg; + operation.data = msg; - code = ctgActUpdateTb(&action); + code = ctgOpUpdateTbMeta(&operation); if (code) { assert(0); } @@ -1381,7 +1380,7 @@ TEST(tableMeta, updateStbMeta) { STableMetaRsp rsp = {0}; ctgTestBuildSTableMetaRsp(&rsp); - code = catalogUpdateSTableMeta(pCtg, &rsp); + code = catalogUpdateTableMeta(pCtg, &rsp); ASSERT_EQ(code, 0); taosMemoryFreeClear(rsp.pSchemas); diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 26a0f3bf6cf85bfe4d81a0ab5d8913d7e1767eeb..831b7017b2632a3e52e3050c08b2c29ffa463eeb 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -560,8 +560,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length); - EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pAggNode->pAggFuncs) { + EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pAggNode->node.pOutputDataBlockDesc->totalRowSize); if (pAggNode->pGroupKeys) { EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); diff --git a/source/libs/executor/inc/dataSinkInt.h b/source/libs/executor/inc/dataSinkInt.h index 85356a862ce282ac53aaad4ee72f0a77b19f115c..8f49440105c813b512835717e861d3da1b2065df 100644 --- a/source/libs/executor/inc/dataSinkInt.h +++ b/source/libs/executor/inc/dataSinkInt.h @@ -37,6 +37,7 @@ typedef void (*FEndPut)(struct SDataSinkHandle* pHandle, uint64_t useconds); typedef void (*FGetDataLength)(struct SDataSinkHandle* pHandle, int32_t* pLen, bool* pQueryEnd); typedef int32_t (*FGetDataBlock)(struct SDataSinkHandle* pHandle, SOutputData* pOutput); typedef int32_t (*FDestroyDataSinker)(struct SDataSinkHandle* pHandle); +typedef int32_t (*FGetCacheSize)(struct SDataSinkHandle* pHandle, uint64_t* size); typedef struct SDataSinkHandle { FPutDataBlock fPut; @@ -44,6 +45,7 @@ typedef struct SDataSinkHandle { FGetDataLength fGetLen; FGetDataBlock fGetData; FDestroyDataSinker fDestroy; + FGetCacheSize fGetCacheSize; } SDataSinkHandle; int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle); diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 63c398618f38446260124978a803b2a63c6f0688..b8975854c9446eab43cd4a7d8c3ccb6e38b93016 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -75,15 +75,15 @@ typedef struct SResultRowInfo { int32_t size; // number of result set int32_t capacity; // max capacity SResultRowPosition cur; + SList* openWindow; } SResultRowInfo; struct SqlFunctionCtx; -size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput); +size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput); int32_t initResultRowInfo(SResultRowInfo* pResultRowInfo, int32_t size); void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo); -int32_t numOfClosedResultRows(SResultRowInfo* pResultRowInfo); void closeAllResultRows(SResultRowInfo* pResultRowInfo); void initResultRow(SResultRow *pResultRow); @@ -92,15 +92,6 @@ bool isResultRowClosed(SResultRow* pResultRow); struct SResultRowEntryInfo* getResultCell(const SResultRow* pRow, int32_t index, const int32_t* offset); -static FORCE_INLINE SResultRow *getResultRow(SDiskbasedBuf* pBuf, SResultRowInfo *pResultRowInfo, int32_t slot) { - ASSERT(pResultRowInfo != NULL && slot >= 0 && slot < pResultRowInfo->size); - SResultRowPosition* pos = &pResultRowInfo->pPosition[slot]; - - SFilePage* bufPage = (SFilePage*) getBufPage(pBuf, pos->pageId); - SResultRow* pRow = (SResultRow*)((char*)bufPage + pos->offset); - return pRow; -} - static FORCE_INLINE SResultRow *getResultRowByPos(SDiskbasedBuf* pBuf, SResultRowPosition* pos) { SFilePage* bufPage = (SFilePage*) getBufPage(pBuf, pos->pageId); SResultRow* pRow = (SResultRow*)((char*)bufPage + pos->offset); diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 85572a9e1770ff485270e5222268aa120eaced7d..88f308710ec5022c1ad1b83a38bfa0e4dd4e53a6 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -29,7 +29,7 @@ typedef struct { int32_t initGroupOptrInfo(SArray** pGroupColVals, int32_t* keyLen, char** keyBuf, const SArray* pGroupColList); uint64_t calcGroupId(char* pData, int32_t len); -void recordNewGroupKeys(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlock* pBlock, int32_t rowIndex, int32_t numOfGroupCols); +void recordNewGroupKeys(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlock* pBlock, int32_t rowIndex); int32_t buildGroupKeys(void* pKey, const SArray* pGroupColVals); #ifdef __cplusplus } diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index aaa2cccf47a5638236d2e7795b82c9be55de3ed4..60be28167d53cd0b23e65787d37f91d38281e95a 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -156,13 +156,11 @@ typedef struct STaskAttr { } STaskAttr; struct SOperatorInfo; -struct SAggSupporter; -struct SOptrBasicInfo; +//struct SAggSupporter; +//struct SOptrBasicInfo; -typedef void (*__optr_encode_fn_t)(struct SOperatorInfo* pOperator, struct SAggSupporter* pSup, - struct SOptrBasicInfo* pInfo, char** result, int32_t* length); -typedef bool (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, struct SAggSupporter* pSup, - struct SOptrBasicInfo* pInfo, char* result, int32_t length); +typedef int32_t (*__optr_encode_fn_t)(struct SOperatorInfo* pOperator, char** result, int32_t* length); +typedef int32_t (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, char* result); typedef int32_t (*__optr_open_fn_t)(struct SOperatorInfo* pOptr); typedef SSDataBlock* (*__optr_fn_t)(struct SOperatorInfo* pOptr); @@ -340,6 +338,8 @@ typedef struct STableScanInfo { char* keyBuf; // group by keys for hash int32_t groupKeyLen; // total group by column width SHashObj* pGroupSet; // quick locate the window object for each result + + int32_t curTWinIdx; } STableScanInfo; typedef struct STagScanInfo { @@ -377,31 +377,35 @@ typedef struct SessionWindowSupporter { SStreamAggSupporter* pStreamAggSup; int64_t gap; } SessionWindowSupporter; + typedef struct SStreamBlockScanInfo { - SArray* pBlockLists; // multiple SSDatablock. - SSDataBlock* pRes; // result SSDataBlock - SSDataBlock* pUpdateRes; // update SSDataBlock - int32_t updateResIndex; - int32_t blockType; // current block type - int32_t validBlockIndex; // Is current data has returned? - SColumnInfo* pCols; // the output column info - uint64_t numOfRows; // total scanned rows - uint64_t numOfExec; // execution times - void* streamBlockReader;// stream block reader handle - SArray* pColMatchInfo; // - SNode* pCondition; - SArray* tsArray; - SUpdateInfo* pUpdateInfo; - int32_t primaryTsIndex; // primary time stamp slot id - void* pDataReader; - SReadHandle readHandle; - uint64_t tableUid; // queried super table uid + SArray* pBlockLists; // multiple SSDatablock. + SSDataBlock* pRes; // result SSDataBlock + SSDataBlock* pUpdateRes; // update SSDataBlock + int32_t updateResIndex; + int32_t blockType; // current block type + int32_t validBlockIndex; // Is current data has returned? + SColumnInfo* pCols; // the output column info + uint64_t numOfExec; // execution times + void* streamBlockReader;// stream block reader handle + SArray* pColMatchInfo; // + SNode* pCondition; + SArray* tsArray; + SUpdateInfo* pUpdateInfo; + + SExprInfo* pPseudoExpr; + int32_t numOfPseudoExpr; + + int32_t primaryTsIndex; // primary time stamp slot id + void* pDataReader; + SReadHandle readHandle; + uint64_t tableUid; // queried super table uid EStreamScanMode scanMode; SOperatorInfo* pOperatorDumy; SInterval interval; // if the upstream is an interval operator, the interval info is also kept here. - SCatchSupporter childAggSup; - SArray* childIds; + SArray* childIds; SessionWindowSupporter sessionSup; + bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA. } SStreamBlockScanInfo; typedef struct SSysTableScanInfo { @@ -440,41 +444,50 @@ typedef struct SAggSupporter { typedef struct STimeWindowSupp { int8_t calTrigger; int64_t waterMark; + TSKEY maxTs; SColumnInfoData timeWindowData; // query time window info for scalar function execution. + SHashObj *winMap; } STimeWindowAggSupp; typedef struct SIntervalAggOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; // basic info + SAggSupporter aggSup; // aggregate supporter + SGroupResInfo groupResInfo; // multiple results build supporter SInterval interval; // interval info int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator. STimeWindow win; // query time range bool timeWindowInterpo; // interpolation needed or not char** pRow; // previous row/tuple of already processed datablock - SAggSupporter aggSup; // aggregate supporter + SArray* pInterpCols; // interpolation columns STableQueryInfo* pCurrent; // current tableQueryInfo struct int32_t order; // current SSDataBlock scan order EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model] SArray* pUpdatedWindow; // updated time window due to the input data block from the downstream operator. STimeWindowAggSupp twAggSup; - struct SFillInfo* pFillInfo; // fill info bool invertible; + SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation. } SIntervalAggOperatorInfo; typedef struct SStreamFinalIntervalOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; // basic info + SAggSupporter aggSup; // aggregate supporter + SGroupResInfo groupResInfo; // multiple results build supporter SInterval interval; // interval info int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator. - SAggSupporter aggSup; // aggregate supporter int32_t order; // current SSDataBlock scan order STimeWindowAggSupp twAggSup; SArray* pChildren; } SStreamFinalIntervalOperatorInfo; typedef struct SAggOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; + STableQueryInfo *current; uint64_t groupId; SGroupResInfo groupResInfo; @@ -487,8 +500,10 @@ typedef struct SAggOperatorInfo { } SAggOperatorInfo; typedef struct SProjectOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; + SSDataBlock* existDataBlock; SArray* pPseudoColInfo; SLimit limit; @@ -512,7 +527,10 @@ typedef struct SFillOperatorInfo { } SFillOperatorInfo; typedef struct SGroupbyOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; + SAggSupporter aggSup; + SArray* pGroupCols; // group by columns, SArray SArray* pGroupColVals; // current group column values, SArray SNode* pCondition; @@ -520,7 +538,6 @@ typedef struct SGroupbyOperatorInfo { char* keyBuf; // group by keys for hash int32_t groupKeyLen; // total group by column width SGroupResInfo groupResInfo; - SAggSupporter aggSup; SExprInfo* pScalarExprInfo; int32_t numOfScalarExpr; // the number of scalar expression in group operator SqlFunctionCtx* pScalarFuncCtx; @@ -557,8 +574,10 @@ typedef struct SWindowRowsSup { } SWindowRowsSup; typedef struct SSessionAggOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; + SGroupResInfo groupResInfo; SWindowRowsSup winSup; bool reptScan; // next round scan @@ -571,6 +590,7 @@ typedef struct SResultWindowInfo { SResultRowPosition pos; STimeWindow win; bool isOutput; + bool isClosed; } SResultWindowInfo; typedef struct SStreamSessionAggOperatorInfo { @@ -596,8 +616,10 @@ typedef struct STimeSliceOperatorInfo { } STimeSliceOperatorInfo; typedef struct SStateWindowOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; + SGroupResInfo groupResInfo; SWindowRowsSup winSup; SColumn stateCol; // start row index @@ -609,8 +631,10 @@ typedef struct SStateWindowOperatorInfo { } SStateWindowOperatorInfo; typedef struct SSortedMergeOperatorInfo { - + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; + SAggSupporter aggSup; + SArray* pSortInfo; int32_t numOfSources; SSortHandle *pSortHandle; @@ -622,7 +646,6 @@ typedef struct SSortedMergeOperatorInfo { int32_t numOfResPerPage; char** groupVal; SArray *groupInfo; - SAggSupporter aggSup; } SSortedMergeOperatorInfo; typedef struct SSortOperatorInfo { @@ -740,10 +763,11 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx SSDataBlock* pResultBlock, SArray* pGroupColList, SNode* pCondition, SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo); SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SReadHandle* pHandle, - uint64_t uid, SSDataBlock* pResBlock, SArray* pColList, - SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pCondition, - SOperatorInfo* pOperatorDumy); + +SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, + SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, + STimeWindowAggSupp* pTwSup); + SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, SInterval* pInterval, STimeWindow* pWindow, SSDataBlock* pResBlock, int32_t fillType, SNodeListNode* fillVal, @@ -783,31 +807,48 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo); void doDestroyTask(SExecTaskInfo* pTaskInfo); int32_t getMaximumIdleDurationSec(); +/* + * ops: root operator + * data: *data save the result of encode, need to be freed by caller + * length: *length save the length of *data + * return: result code, 0 means success + */ +int32_t encodeOperator(SOperatorInfo* ops, char** data, int32_t *length); + +/* + * ops: root operator, created by caller + * data: save the result of decode + * length: the length of data + * return: result code, 0 means success + */ +int32_t decodeOperator(SOperatorInfo* ops, char* data, int32_t length); + void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status); int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, EOPTR_EXEC_MODEL model); int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo** pRes, int32_t* capacity, int32_t* resNum); -bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char* result, - int32_t length); -void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char** result, - int32_t* length); +int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result); +int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length); + STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts, SInterval* pInterval, int32_t precision, STimeWindow* win); int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos, TSKEY ekey, __block_search_fn_t searchFn, STableQueryInfo* item, int32_t order); int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order); -int32_t initCacheSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey, - const char* pDir); int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey); -SResultRow* getNewResultRow_rv(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize); +SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize); SResultWindowInfo* getSessionTimeWindow(SArray* pWinInfos, TSKEY ts, int64_t gap, int32_t* pIndex); int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pTs, int32_t rows, int32_t start, int64_t gap, SHashObj* pStDeleted); bool functionNeedToExecute(SqlFunctionCtx* pCtx); +int64_t getSmaWaterMark(int64_t interval, double filesFactor); +bool isSmaStream(int8_t triggerType); + +int32_t compareTimeWindow(const void* p1, const void* p2, const void* param); #ifdef __cplusplus } #endif diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index fa9e27a5f810268f057a53d10b4d946dbd6825ea..080cf5c2ad44f31f11f0fce0e2350fe121c2c1fb 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -22,6 +22,8 @@ #include "tglobal.h" #include "tqueue.h" +extern SDataSinkStat gDataSinkStat; + typedef struct SDataDispatchBuf { int32_t useSize; int32_t allocSize; @@ -45,6 +47,7 @@ typedef struct SDataDispatchHandle { int32_t status; bool queryEnd; uint64_t useconds; + uint64_t cachedSize; TdThreadMutex mutex; } SDataDispatchHandle; @@ -71,7 +74,7 @@ static bool needCompress(const SSDataBlock* pData, int32_t numOfCols) { // +----------------+--------------+----------+--------------------------------------+-------------+-----------+-------------+-----------+ // The length of bitmap is decided by number of rows of this data block, and the length of each column data is // recorded in the first segment, next to the struct header -static void toDataCacheEntry(const SDataDispatchHandle* pHandle, const SInputData* pInput, SDataDispatchBuf* pBuf) { +static void toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* pInput, SDataDispatchBuf* pBuf) { int32_t numOfCols = LIST_LENGTH(pHandle->pSchema->pSlots); SDataCacheEntry* pEntry = (SDataCacheEntry*)pBuf->pData; @@ -84,6 +87,9 @@ static void toDataCacheEntry(const SDataDispatchHandle* pHandle, const SInputDat blockCompressEncode(pInput->pData, pEntry->data, &pEntry->dataLen, numOfCols, pEntry->compressed); pBuf->useSize += pEntry->dataLen; + + atomic_add_fetch_64(&pHandle->cachedSize, pEntry->dataLen); + atomic_add_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen); } static bool allocBuf(SDataDispatchHandle* pDispatcher, const SInputData* pInput, SDataDispatchBuf* pBuf) { @@ -156,6 +162,7 @@ static void getDataLength(SDataSinkHandle* pHandle, int32_t* pLen, bool* pQueryE taosFreeQitem(pBuf); *pLen = ((SDataCacheEntry*)(pDispatcher->nextOutput.pData))->dataLen; *pQueryEnd = pDispatcher->queryEnd; + qDebug("got data len %d, row num %d in sink", *pLen, ((SDataCacheEntry*)(pDispatcher->nextOutput.pData))->numOfRows); } static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) { @@ -173,6 +180,10 @@ static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) { pOutput->numOfRows = pEntry->numOfRows; pOutput->numOfCols = pEntry->numOfCols; pOutput->compressed = pEntry->compressed; + + atomic_sub_fetch_64(&pDispatcher->cachedSize, pEntry->dataLen); + atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen); + taosMemoryFreeClear(pDispatcher->nextOutput.pData); // todo persistent pOutput->bufStatus = updateStatus(pDispatcher); taosThreadMutexLock(&pDispatcher->mutex); @@ -180,11 +191,14 @@ static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) { pOutput->useconds = pDispatcher->useconds; pOutput->precision = pDispatcher->pSchema->precision; taosThreadMutexUnlock(&pDispatcher->mutex); + + return TSDB_CODE_SUCCESS; } static int32_t destroyDataSinker(SDataSinkHandle* pHandle) { SDataDispatchHandle* pDispatcher = (SDataDispatchHandle*)pHandle; + atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pDispatcher->cachedSize); taosMemoryFreeClear(pDispatcher->nextOutput.pData); while (!taosQueueEmpty(pDispatcher->pDataBlocks)) { SDataDispatchBuf* pBuf = NULL; @@ -197,6 +211,13 @@ static int32_t destroyDataSinker(SDataSinkHandle* pHandle) { return TSDB_CODE_SUCCESS; } +int32_t getCacheSize(struct SDataSinkHandle* pHandle, uint64_t* size) { + SDataDispatchHandle* pDispatcher = (SDataDispatchHandle*)pHandle; + + *size = atomic_load_64(&pDispatcher->cachedSize); + return TSDB_CODE_SUCCESS; +} + int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle) { SDataDispatchHandle* dispatcher = taosMemoryCalloc(1, sizeof(SDataDispatchHandle)); if (NULL == dispatcher) { @@ -208,6 +229,7 @@ int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pD dispatcher->sink.fGetLen = getDataLength; dispatcher->sink.fGetData = getDataBlock; dispatcher->sink.fDestroy = destroyDataSinker; + dispatcher->sink.fGetCacheSize = getCacheSize; dispatcher->pManager = pManager; dispatcher->pSchema = pDataSink->pInputDataBlockDesc; dispatcher->status = DS_BUF_EMPTY; diff --git a/source/libs/executor/src/dataSinkMgt.c b/source/libs/executor/src/dataSinkMgt.c index 64206fc10aac0ab9835d65333322657a0ccaecbf..9016ca274a3567d8cbc45d522d5e1cb93b176e68 100644 --- a/source/libs/executor/src/dataSinkMgt.c +++ b/source/libs/executor/src/dataSinkMgt.c @@ -19,6 +19,7 @@ #include "planner.h" static SDataSinkManager gDataSinkManager = {0}; +SDataSinkStat gDataSinkStat = {0}; int32_t dsDataSinkMgtInit(SDataSinkMgtCfg *cfg) { gDataSinkManager.cfg = *cfg; @@ -26,6 +27,13 @@ int32_t dsDataSinkMgtInit(SDataSinkMgtCfg *cfg) { return 0; // to avoid compiler eror } +int32_t dsDataSinkGetCacheSize(SDataSinkStat *pStat) { + pStat->cachedSize = atomic_load_64(&gDataSinkStat.cachedSize); + + return 0; +} + + int32_t dsCreateDataSinker(const SDataSinkNode *pDataSink, DataSinkHandle* pHandle) { if (QUERY_NODE_PHYSICAL_PLAN_DISPATCH == nodeType(pDataSink)) { return createDataDispatcher(&gDataSinkManager, pDataSink, pHandle); @@ -53,6 +61,12 @@ int32_t dsGetDataBlock(DataSinkHandle handle, SOutputData* pOutput) { return pHandleImpl->fGetData(pHandleImpl, pOutput); } +int32_t dsGetCacheSize(DataSinkHandle handle, uint64_t *pSize) { + SDataSinkHandle* pHandleImpl = (SDataSinkHandle*)handle; + return pHandleImpl->fGetCacheSize(pHandleImpl, pSize); +} + + void dsScheduleProcess(void* ahandle, void* pItem) { // todo } diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 5a02547f58aa4cf73c5297dda771ba0900bce141..1c45e38b632d29340472c1955d2b097377478ce0 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -101,20 +101,8 @@ void resetResultRowInfo(STaskRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRow pResultRowInfo->size = 0; } -int32_t numOfClosedResultRows(SResultRowInfo *pResultRowInfo) { - int32_t i = 0; -// while (i < pResultRowInfo->size && pResultRowInfo->pResult[i]->closed) { -// ++i; -// } - - return i; -} - void closeAllResultRows(SResultRowInfo *pResultRowInfo) { - assert(pResultRowInfo->size >= 0 && pResultRowInfo->capacity >= pResultRowInfo->size); - - for (int32_t i = 0; i < pResultRowInfo->size; ++i) { - } +// do nothing } bool isResultRowClosed(SResultRow* pRow) { @@ -233,7 +221,7 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList) { if (pGroupResInfo->pRows != NULL) { - taosArrayDestroy(pGroupResInfo->pRows); + taosArrayDestroyP(pGroupResInfo->pRows, taosMemoryFree); } pGroupResInfo->pRows = pArrayList; @@ -258,32 +246,6 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) { return (int32_t) taosArrayGetSize(pGroupResInfo->pRows); } -static int64_t getNumOfResultWindowRes(STaskRuntimeEnv* pRuntimeEnv, SResultRowPosition *pos, int32_t* rowCellInfoOffset) { - STaskAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; - ASSERT(0); - - for (int32_t j = 0; j < pQueryAttr->numOfOutput; ++j) { - int32_t functionId = 0;//pQueryAttr->pExpr1[j].base.functionId; - - /* - * ts, tag, tagprj function can not decide the output number of current query - * the number of output result is decided by main output - */ - if (functionId == FUNCTION_TS || functionId == FUNCTION_TAG || functionId == FUNCTION_TAGPRJ) { - continue; - } - -// SResultRowEntryInfo *pResultInfo = getResultCell(pResultRow, j, rowCellInfoOffset); -// assert(pResultInfo != NULL); -// -// if (pResultInfo->numOfRes > 0) { -// return pResultInfo->numOfRes; -// } - } - - return 0; -} - static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *param) { int32_t left = *(int32_t *)pLeft; int32_t right = *(int32_t *)pRight; @@ -381,7 +343,7 @@ static int32_t mergeIntoGroupResultImplRv(STaskRuntimeEnv *pRuntimeEnv, SGroupRe } - int64_t num = getNumOfResultWindowRes(pRuntimeEnv, &pResultRowCell->pos, rowCellInfoOffset); + int64_t num = 0;//getNumOfResultWindowRes(pRuntimeEnv, &pResultRowCell->pos, rowCellInfoOffset); if (num <= 0) { continue; } diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 2811c8dce84918bc61339597150b15f56690b99d..fd62849e56805c22472a5ea438140ec655e20df0 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -19,7 +19,7 @@ #include "tdatablock.h" #include "vnode.h" -static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, char* id) { +static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, bool assignUid, char* id) { ASSERT(pOperator != NULL); if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { if (pOperator->numOfDownstream == 0) { @@ -32,11 +32,12 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu return TSDB_CODE_QRY_APP_ERROR; } pOperator->status = OP_NOT_OPENED; - return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, id); + return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, assignUid, id); } else { pOperator->status = OP_NOT_OPENED; SStreamBlockScanInfo* pInfo = pOperator->info; + pInfo->assignBlockUid = assignUid; // the block type can not be changed in the streamscan operators if (pInfo->blockType == 0) { @@ -67,11 +68,11 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu } } -int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type) { - return qSetMultiStreamInput(tinfo, input, 1, type); +int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid) { + return qSetMultiStreamInput(tinfo, input, 1, type, assignUid); } -int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) { +int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type, bool assignUid) { if (tinfo == NULL) { return TSDB_CODE_QRY_APP_ERROR; } @@ -82,7 +83,7 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; - int32_t code = doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, GET_TASKID(pTaskInfo)); + int32_t code = doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, assignUid, GET_TASKID(pTaskInfo)); if (code != TSDB_CODE_SUCCESS) { qError("%s failed to set the stream block data", GET_TASKID(pTaskInfo)); } else { diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 96b3b9f9e361601a61d5e16fe91f103e92001553..8388253975fe69c7c328003adbcb61975bd66fbd 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -28,13 +28,13 @@ #include "ttime.h" #include "executorimpl.h" +#include "index.h" #include "query.h" #include "tcompare.h" #include "tcompression.h" #include "thash.h" #include "ttypes.h" #include "vnode.h" -#include "index.h" #define IS_MAIN_SCAN(runtime) ((runtime)->scanFlag == MAIN_SCAN) #define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN) @@ -87,7 +87,7 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { #define realloc u_realloc #endif -#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) +#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) //#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) @@ -239,36 +239,6 @@ static bool hasNull(SColumn* pColumn, SColumnDataAgg* pStatis) { return true; } -static void prepareResultListBuffer(SResultRowInfo* pResultRowInfo, jmp_buf env) { - int64_t newCapacity = 0; - - // more than the capacity, reallocate the resources - if (pResultRowInfo->size < pResultRowInfo->capacity) { - return; - } - - if (pResultRowInfo->capacity > 10000) { - newCapacity = (int64_t)(pResultRowInfo->capacity * 1.25); - } else { - newCapacity = (int64_t)(pResultRowInfo->capacity * 1.5); - } - - if (newCapacity <= pResultRowInfo->capacity) { - newCapacity += 4; - } - - char* p = taosMemoryRealloc(pResultRowInfo->pPosition, newCapacity * sizeof(SResultRowPosition)); - if (p == NULL) { - longjmp(env, TSDB_CODE_OUT_OF_MEMORY); - } - - pResultRowInfo->pPosition = (SResultRowPosition*)p; - - int32_t inc = (int32_t)newCapacity - pResultRowInfo->capacity; - memset(&pResultRowInfo->pPosition[pResultRowInfo->capacity], 0, sizeof(SResultRowPosition) * inc); - pResultRowInfo->capacity = (int32_t)newCapacity; -} - static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, char* pData, int16_t bytes, bool masterscan, uint64_t uid) { bool existed = false; @@ -306,7 +276,7 @@ static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pR return p1 != NULL; } -SResultRow* getNewResultRow_rv(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) { +SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) { SFilePage* pData = NULL; // in the first scan, new space needed for results @@ -375,6 +345,8 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR // In case of group by column query, the required SResultRow object must be existInCurrentResusltRowInfo in the // pResultRowInfo object. if (p1 != NULL) { + + // todo pResult = getResultRowByPos(pResultBuf, p1); ASSERT(pResult->pageId == p1->pageId && pResult->offset == p1->offset); } @@ -383,34 +355,28 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR // 1. close current opened time window if (pResultRowInfo->cur.pageId != -1 && ((pResult == NULL) || (pResult->pageId != pResultRowInfo->cur.pageId && pResult->offset != pResultRowInfo->cur.offset))) { - // todo extract function SResultRowPosition pos = pResultRowInfo->cur; - SFilePage* pPage = getBufPage(pResultBuf, pos.pageId); - SResultRow* pRow = (SResultRow*)((char*)pPage + pos.offset); - closeResultRow(pRow); + SFilePage* pPage = getBufPage(pResultBuf, pos.pageId); releaseBufPage(pResultBuf, pPage); } // allocate a new buffer page - prepareResultListBuffer(pResultRowInfo, pTaskInfo->env); if (pResult == NULL) { ASSERT(pSup->resultRowSize > 0); - pResult = getNewResultRow_rv(pResultBuf, groupId, pSup->resultRowSize); + pResult = getNewResultRow(pResultBuf, groupId, pSup->resultRowSize); + initResultRow(pResult); // add a new result set for a new group SResultRowPosition pos = {.pageId = pResult->pageId, .offset = pResult->offset}; - taosHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos, - sizeof(SResultRowPosition)); + taosHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos, sizeof(SResultRowPosition)); } // 2. set the new time window to be the new active time window - pResultRowInfo->pPosition[pResultRowInfo->size++] = - (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; pResultRowInfo->cur = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; // too many time window in query - if (pResultRowInfo->size > MAX_INTERVAL_TIME_WINDOW) { + if (taosHashGetSize(pSup->pResultRowHashTable) > MAX_INTERVAL_TIME_WINDOW) { longjmp(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); } @@ -585,11 +551,13 @@ void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWindow colDataAppendInt64(pColData, 4, &pQueryWindow->ekey); } + void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset, int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput, int32_t order) { for (int32_t k = 0; k < numOfOutput; ++k) { // keep it temporarily + // todo no need this?? bool hasAgg = pCtx[k].input.colDataAggIsSet; int32_t numOfRows = pCtx[k].input.numOfRows; int32_t startOffset = pCtx[k].input.startRowIndex; @@ -609,7 +577,8 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow if (fmIsWindowPseudoColumnFunc(pCtx[k].functionId)) { SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(&pCtx[k]); - char* p = GET_ROWCELL_INTERBUF(pEntryInfo); + + char* p = GET_ROWCELL_INTERBUF(pEntryInfo); SColumnInfoData idata = {0}; idata.info.type = TSDB_DATA_TYPE_BIGINT; @@ -620,22 +589,23 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow SScalarParam tw = {.numOfRows = 5, .columnData = pTimeWindowData}; pCtx[k].sfp.process(&tw, 1, &out); pEntryInfo->numOfRes = 1; - continue; - } - int32_t code = TSDB_CODE_SUCCESS; - if (functionNeedToExecute(&pCtx[k]) && pCtx[k].fpSet.process != NULL) { - code = pCtx[k].fpSet.process(&pCtx[k]); - if (code != TSDB_CODE_SUCCESS) { - qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code)); - taskInfo->code = code; - longjmp(taskInfo->env, code); + } else { + int32_t code = TSDB_CODE_SUCCESS; + if (functionNeedToExecute(&pCtx[k]) && pCtx[k].fpSet.process != NULL) { + code = pCtx[k].fpSet.process(&pCtx[k]); + + if (code != TSDB_CODE_SUCCESS) { + qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code)); + taskInfo->code = code; + longjmp(taskInfo->env, code); + } } - } - // restore it - pCtx[k].input.colDataAggIsSet = hasAgg; - pCtx[k].input.startRowIndex = startOffset; - pCtx[k].input.numOfRows = numOfRows; + // restore it + pCtx[k].input.colDataAggIsSet = hasAgg; + pCtx[k].input.startRowIndex = startOffset; + pCtx[k].input.numOfRows = numOfRows; + } } } @@ -774,12 +744,14 @@ static int32_t doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SqlFunct for (int32_t k = 0; k < pOperator->numOfExprs; ++k) { if (functionNeedToExecute(&pCtx[k])) { // todo add a dummy funtion to avoid process check - if (pCtx[k].fpSet.process != NULL) { - int32_t code = pCtx[k].fpSet.process(&pCtx[k]); - if (code != TSDB_CODE_SUCCESS) { - qError("%s aggregate function error happens, code: %s", GET_TASKID(pOperator->pTaskInfo), tstrerror(code)); - return code; - } + if (pCtx[k].fpSet.process == NULL) { + continue; + } + + int32_t code = pCtx[k].fpSet.process(&pCtx[k]); + if (code != TSDB_CODE_SUCCESS) { + qError("%s aggregate function error happens, code: %s", GET_TASKID(pOperator->pTaskInfo), tstrerror(code)); + return code; } } } @@ -1218,7 +1190,6 @@ static void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) { taosVariantDestroy(&pCtx[i].param[j].param); } - taosVariantDestroy(&pCtx[i].tag); taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx); taosMemoryFree(pCtx[i].input.pData); taosMemoryFree(pCtx[i].input.pColumnDataAgg); @@ -1248,9 +1219,9 @@ void setTaskKilled(SExecTaskInfo* pTaskInfo) { pTaskInfo->code = TSDB_CODE_TSC_Q static bool isCachedLastQuery(STaskAttr* pQueryAttr) { for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { int32_t functionId = getExprFunctionId(&pQueryAttr->pExpr1[i]); - if (functionId == FUNCTION_LAST || functionId == FUNCTION_LAST_DST) { - continue; - } +// if (functionId == FUNCTION_LAST || functionId == FUNCTION_LAST_DST) { +// continue; +// } return false; } @@ -1300,7 +1271,7 @@ static int32_t updateBlockLoadStatus(STaskAttr* pQuery, int32_t status) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { int32_t functionId = getExprFunctionId(&pQuery->pExpr1[i]); - +#if 0 if (functionId == FUNCTION_TS || functionId == FUNCTION_TS_DUMMY || functionId == FUNCTION_TAG || functionId == FUNCTION_TAG_DUMMY) { continue; @@ -1311,6 +1282,8 @@ static int32_t updateBlockLoadStatus(STaskAttr* pQuery, int32_t status) { } else { hasOtherFunc = true; } +#endif + } if (hasFirstLastFunc && status == BLK_DATA_NOT_LOAD) { @@ -1786,41 +1759,13 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t* bufCapacity, int32_t numOf // set the correct pointer after the memory buffer reallocated. int32_t functionId = pBInfo->pCtx[i].functionId; - +#if 0 if (functionId == FUNCTION_TOP || functionId == FUNCTION_BOTTOM || functionId == FUNCTION_DIFF || functionId == FUNCTION_DERIVATIVE) { // if (i > 0) pBInfo->pCtx[i].pTsOutput = pBInfo->pCtx[i - 1].pOutput; } - } -} - -void copyTsColoum(SSDataBlock* pRes, SqlFunctionCtx* pCtx, int32_t numOfOutput) { - bool needCopyTs = false; - int32_t tsNum = 0; - char* src = NULL; - for (int32_t i = 0; i < numOfOutput; i++) { - int32_t functionId = pCtx[i].functionId; - if (functionId == FUNCTION_DIFF || functionId == FUNCTION_DERIVATIVE) { - needCopyTs = true; - if (i > 0 && pCtx[i - 1].functionId == FUNCTION_TS_DUMMY) { - SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i - 1); // find ts data - src = pColRes->pData; - } - } else if (functionId == FUNCTION_TS_DUMMY) { - tsNum++; - } - } - - if (!needCopyTs) return; - if (tsNum < 2) return; - if (src == NULL) return; +#endif - for (int32_t i = 0; i < numOfOutput; i++) { - int32_t functionId = pCtx[i].functionId; - if (functionId == FUNCTION_TS_DUMMY) { - SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i); - memcpy(pColRes->pData, src, pColRes->info.bytes * pRes->info.rows); - } } } @@ -2577,46 +2522,7 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total, SArray* pColList) { if (pColList == NULL) { // data from other sources - blockDataEnsureCapacity(pRes, numOfRows); - - int32_t dataLen = *(int32_t*)pData; - pData += sizeof(int32_t); - - pRes->info.groupId = *(uint64_t*)pData; - pData += sizeof(uint64_t); - - int32_t* colLen = (int32_t*)pData; - - char* pStart = pData + sizeof(int32_t) * numOfOutput; - for (int32_t i = 0; i < numOfOutput; ++i) { - colLen[i] = htonl(colLen[i]); - ASSERT(colLen[i] >= 0); - - SColumnInfoData* pColInfoData = taosArrayGet(pRes->pDataBlock, i); - if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { - pColInfoData->varmeta.length = colLen[i]; - pColInfoData->varmeta.allocLen = colLen[i]; - - memcpy(pColInfoData->varmeta.offset, pStart, sizeof(int32_t) * numOfRows); - pStart += sizeof(int32_t) * numOfRows; - - if (colLen[i] > 0) { - pColInfoData->pData = taosMemoryMalloc(colLen[i]); - } - } else { - memcpy(pColInfoData->nullbitmap, pStart, BitmapLen(numOfRows)); - pStart += BitmapLen(numOfRows); - } - - if (colLen[i] > 0) { - memcpy(pColInfoData->pData, pStart, colLen[i]); - } - - // TODO setting this flag to true temporarily so aggregate function on stable will - // examine NULL value for non-primary key column - pColInfoData->hasNull = true; - pStart += colLen[i]; - } + blockCompressDecode(pRes, numOfOutput, numOfRows, pData); } else { // extract data according to pColList ASSERT(numOfOutput == taosArrayGetSize(pColList)); char* pStart = pData; @@ -2758,6 +2664,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx pExchangeInfo->loadInfo.totalRows); pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED; completed += 1; + taosMemoryFreeClear(pDataInfo->pRsp); continue; } @@ -2765,6 +2672,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx code = setSDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data, pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL); if (code != 0) { + taosMemoryFreeClear(pDataInfo->pRsp); goto _error; } @@ -2785,10 +2693,12 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx pDataInfo->status = EX_SOURCE_DATA_NOT_READY; code = doSendFetchDataRequest(pExchangeInfo, pTaskInfo, i); if (code != TSDB_CODE_SUCCESS) { + taosMemoryFreeClear(pDataInfo->pRsp); goto _error; } } + taosMemoryFreeClear(pDataInfo->pRsp); return pExchangeInfo->pResult; } @@ -2891,6 +2801,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED; pExchangeInfo->current += 1; + taosMemoryFreeClear(pDataInfo->pRsp); continue; } @@ -2916,6 +2827,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { } pOperator->resultInfo.totalRows += pRes->info.rows; + taosMemoryFreeClear(pDataInfo->pRsp); return pExchangeInfo->pResult; } } @@ -3377,7 +3289,7 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan // todo add more information about exchange operation int32_t type = pOperator->operatorType; if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE || type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN || - type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { + type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN) { *order = TSDB_ORDER_ASC; *scanFlag = MAIN_SCAN; return TSDB_CODE_SUCCESS; @@ -3441,14 +3353,14 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { } #if 0 // test for encode/decode result info - if(pOperator->encodeResultRow){ + if(pOperator->fpSet.encodeResultRow){ char *result = NULL; int32_t length = 0; - SAggSupporter *pSup = &pAggInfo->aggSup; - pOperator->encodeResultRow(pOperator, pSup, pInfo, &result, &length); + pOperator->fpSet.encodeResultRow(pOperator, &result, &length); + SAggSupporter* pSup = &pAggInfo->aggSup; taosHashClear(pSup->pResultRowHashTable); pInfo->resultRowInfo.size = 0; - pOperator->decodeResultRow(pOperator, pSup, pInfo, result, length); + pOperator->fpSet.decodeResultRow(pOperator, result); if(result){ taosMemoryFree(result); } @@ -3491,17 +3403,25 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) { return (rows == 0) ? NULL : pInfo->pRes; } -void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char** result, - int32_t* length) { - int32_t size = taosHashGetSize(pSup->pResultRowHashTable); - size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length - int32_t totalSize = sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize); - *result = taosMemoryCalloc(1, totalSize); +int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length) { + if (result == NULL || length == NULL) { + return TSDB_CODE_TSC_INVALID_INPUT; + } + SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info); + SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo)); + int32_t size = taosHashGetSize(pSup->pResultRowHashTable); + size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length + int32_t totalSize = + sizeof(int32_t) + sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize); + + *result = (char*)taosMemoryCalloc(1, totalSize); if (*result == NULL) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); + return TSDB_CODE_OUT_OF_MEMORY; } - *(int32_t*)(*result) = size; + int32_t offset = sizeof(int32_t); + *(int32_t*)(*result + offset) = size; + offset += sizeof(int32_t); // prepare memory SResultRowPosition* pos = &pInfo->resultRowInfo.cur; @@ -3523,12 +3443,11 @@ void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi // recalculate the result size int32_t realTotalSize = offset + sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize; if (realTotalSize > totalSize) { - char* tmp = taosMemoryRealloc(*result, realTotalSize); + char* tmp = (char*)taosMemoryRealloc(*result, realTotalSize); if (tmp == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; taosMemoryFree(*result); *result = NULL; - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); + return TSDB_CODE_OUT_OF_MEMORY; } else { *result = tmp; } @@ -3548,30 +3467,34 @@ void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi pIter = taosHashIterate(pSup->pResultRowHashTable, pIter); } - if (length) { - *length = offset; - } - return; + *(int32_t*)(*result) = offset; + *length = offset; + + return TDB_CODE_SUCCESS; } -bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char* result, - int32_t length) { - if (!result || length <= 0) { - return false; +int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) { + if (result == NULL) { + return TSDB_CODE_TSC_INVALID_INPUT; } + SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info); + SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo)); // int32_t size = taosHashGetSize(pSup->pResultRowHashTable); - int32_t count = *(int32_t*)(result); - + int32_t length = *(int32_t*)(result); int32_t offset = sizeof(int32_t); + + int32_t count = *(int32_t*)(result + offset); + offset += sizeof(int32_t); + while (count-- > 0 && length > offset) { int32_t keyLen = *(int32_t*)(result + offset); offset += sizeof(int32_t); uint64_t tableGroupId = *(uint64_t*)(result + offset); - SResultRow* resultRow = getNewResultRow_rv(pSup->pResultBuf, tableGroupId, pSup->resultRowSize); + SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, tableGroupId, pSup->resultRowSize); if (!resultRow) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT); + return TSDB_CODE_TSC_INVALID_INPUT; } // add a new result set for a new group @@ -3581,7 +3504,7 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi offset += keyLen; int32_t valueLen = *(int32_t*)(result + offset); if (valueLen != pSup->resultRowSize) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT); + return TSDB_CODE_TSC_INVALID_INPUT; } offset += sizeof(int32_t); int32_t pageId = resultRow->pageId; @@ -3592,17 +3515,13 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi offset += valueLen; initResultRow(resultRow); - prepareResultListBuffer(&pInfo->resultRowInfo, pOperator->pTaskInfo->env); - // pInfo->resultRowInfo.cur = pInfo->resultRowInfo.size; - // pInfo->resultRowInfo.pPosition[pInfo->resultRowInfo.size++] = - // (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset}; pInfo->resultRowInfo.cur = (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset}; } if (offset != length) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT); + return TSDB_CODE_TSC_INVALID_INPUT; } - return true; + return TDB_CODE_SUCCESS; } enum { @@ -3887,18 +3806,6 @@ static SSDataBlock* doFill(SOperatorInfo* pOperator) { } } -// todo set the attribute of query scan count -static int32_t getNumOfScanTimes(STaskAttr* pQueryAttr) { - for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { - int32_t functionId = getExprFunctionId(&pQueryAttr->pExpr1[i]); - if (functionId == FUNCTION_STDDEV || functionId == FUNCTION_PERCT) { - return 2; - } - } - - return 1; -} - static void destroyOperatorInfo(SOperatorInfo* pOperator) { if (pOperator == NULL) { return; @@ -3960,11 +3867,11 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n return TSDB_CODE_OUT_OF_MEMORY; } - uint32_t defaultPgsz = 0; + uint32_t defaultPgsz = 0; uint32_t defaultBufsz = 0; getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz); - int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, TD_TMP_DIR_PATH); + int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, TD_TMP_DIR_PATH); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -4001,7 +3908,7 @@ void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows) { } } -//static STableQueryInfo* initTableQueryInfo(const STableListInfo* pTableListInfo) { +// static STableQueryInfo* initTableQueryInfo(const STableListInfo* pTableListInfo) { // int32_t size = taosArrayGetSize(pTableListInfo->pTableList); // if (size == 0) { // return NULL; @@ -4434,9 +4341,11 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT } static tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableListInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond); + STableListInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, + SNode* pTagCond); -static int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, SNode* pTagCond); +static int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, + SNode* pTagCond); static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo); static SArray* extractColumnInfo(SNodeList* pNodeList); @@ -4473,7 +4382,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) { STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; - tsdbReaderT pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); + tsdbReaderT pDataReader = + doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); if (pDataReader == NULL && terrno != 0) { return NULL; } @@ -4492,9 +4402,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) { SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode; // simple child table. STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; - - int32_t numOfCols = 0; - + STimeWindowAggSupp twSup = { + .waterMark = pTableScanNode->watermark, .calTrigger = pTableScanNode->triggerType, .maxTs = INT64_MIN}; tsdbReaderT pDataReader = NULL; if (pHandle->vnode) { pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); @@ -4503,24 +4412,16 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } if (pDataReader == NULL && terrno != 0) { - qDebug("pDataReader is NULL"); + qDebug("%s pDataReader is NULL", GET_TASKID(pTaskInfo)); // return NULL; } else { - qDebug("pDataReader is not NULL"); + qDebug("%s pDataReader is not NULL", GET_TASKID(pTaskInfo)); } + SArray* tableIdList = extractTableIdList(pTableListInfo); - SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc; - SOperatorInfo* pOperatorDumy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, NULL, pTaskInfo); + SOperatorInfo* pOperator = createStreamScanOperatorInfo(pDataReader, pHandle, + tableIdList, pTableScanNode, pTaskInfo, &twSup); - SArray* tableIdList = extractTableIdList(pTableListInfo); - - SSDataBlock* pResBlock = createResDataBlock(pDescNode); - SArray* pCols = - extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); - - SOperatorInfo* pOperator = - createStreamScanOperatorInfo(pHandle->reader, pDataReader, pHandle, pScanPhyNode->uid, pResBlock, pCols, - tableIdList, pTaskInfo, pScanPhyNode->node.pConditions, pOperatorDumy); taosArrayDestroy(tableIdList); return pOperator; } else if (QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN == type) { @@ -4602,8 +4503,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pOptr = createGroupOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pAggNode->node.pConditions, pScalarExprInfo, numOfScalarExpr, pTaskInfo); } else { - pOptr = createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pScalarExprInfo, numOfScalarExpr, - pTaskInfo); + pOptr = + createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pScalarExprInfo, numOfScalarExpr, pTaskInfo); } } else if (QUERY_NODE_PHYSICAL_PLAN_INTERVAL == type || QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) { SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode; @@ -4619,7 +4520,20 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision}; STimeWindowAggSupp as = {.waterMark = pIntervalPhyNode->window.watermark, - .calTrigger = pIntervalPhyNode->window.triggerType}; + .calTrigger = pIntervalPhyNode->window.triggerType, + .maxTs = INT64_MIN, + .winMap = NULL,}; + if (isSmaStream(pIntervalPhyNode->window.triggerType)) { + if (FLT_LESS(pIntervalPhyNode->window.filesFactor, 1.000000)) { + as.calTrigger = STREAM_TRIGGER_AT_ONCE_SMA; + } else { + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP); + as.winMap = taosHashInit(64, hashFn, true, HASH_NO_LOCK); + as.waterMark = getSmaWaterMark(interval.interval, + pIntervalPhyNode->window.filesFactor); + as.calTrigger = STREAM_TRIGGER_WINDOW_CLOSE_SMA; + } + } int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId; pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTaskInfo); @@ -4706,6 +4620,18 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo return pOptr; } +int32_t compareTimeWindow(const void* p1, const void* p2, const void* param) { + const SQueryTableDataCond* pCond = param; + const STimeWindow* pWin1 = p1; + const STimeWindow* pWin2 = p2; + if (pCond->order == TSDB_ORDER_ASC) { + return pWin1->skey - pWin2->skey; + } else if (pCond->order == TSDB_ORDER_DESC) { + return pWin2->skey - pWin1->skey; + } + return 0; +} + int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode) { pCond->loadExternalRows = false; @@ -4717,16 +4643,30 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi return terrno; } - pCond->twindow = pTableScanNode->scanRange; + // pCond->twindow = pTableScanNode->scanRange; + // TODO: get it from stable scan node + pCond->numOfTWindows = 1; + pCond->twindows = taosMemoryCalloc(pCond->numOfTWindows, sizeof(STimeWindow)); + pCond->twindows[0] = pTableScanNode->scanRange; #if 1 // todo work around a problem, remove it later - if ((pCond->order == TSDB_ORDER_ASC && pCond->twindow.skey > pCond->twindow.ekey) || - (pCond->order == TSDB_ORDER_DESC && pCond->twindow.skey < pCond->twindow.ekey)) { - TSWAP(pCond->twindow.skey, pCond->twindow.ekey); + for (int32_t i = 0; i < pCond->numOfTWindows; ++i) { + if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) || + (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) { + TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey); + } } #endif + for (int32_t i = 0; i < pCond->numOfTWindows; ++i) { + if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) || + (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) { + TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey); + } + } + taosqsort(pCond->twindows, pCond->numOfTWindows, sizeof(STimeWindow), pCond, compareTimeWindow); + pCond->type = BLOCK_LOAD_OFFSET_SEQ_ORDER; // pCond->type = pTableScanNode->scanFlag; @@ -4885,27 +4825,31 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod return pList; } -int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, - STableListInfo* pListInfo, SNode* pTagCond) { +int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, + SNode* pTagCond) { int32_t code = TSDB_CODE_SUCCESS; pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo)); if (tableType == TSDB_SUPER_TABLE) { - if(pTagCond){ + if (pTagCond) { + SIndexMetaArg metaArg = {.metaEx = metaHandle, .metaHandle = tsdbGetIdx(metaHandle), .suid = tableUid}; + SArray* res = taosArrayInit(8, sizeof(uint64_t)); - code = doFilterTag(pTagCond, res); + code = doFilterTag(pTagCond, &metaArg, res); if (code != TSDB_CODE_SUCCESS) { - qError("doFilterTag error:%d", code); + qError("failed to get tableIds, reason: %s, suid: %" PRIu64 "", tstrerror(code), tableUid); taosArrayDestroy(res); terrno = code; return code; + } else { + qDebug("sucess to get tableIds, size: %d, suid: %" PRIu64 "", (int)taosArrayGetSize(res), tableUid); } - for(int i = 0; i < taosArrayGetSize(res); i++){ + for (int i = 0; i < taosArrayGetSize(res); i++) { STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i)}; taosArrayPush(pListInfo->pTableList, &info); } taosArrayDestroy(res); - }else{ + } else { code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList); } } else { // Create one table group. @@ -4930,7 +4874,8 @@ SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) { tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond) { - int32_t code = getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); + int32_t code = + getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -4954,6 +4899,93 @@ _error: return NULL; } +int32_t encodeOperator(SOperatorInfo* ops, char** result, int32_t* length) { + int32_t code = TDB_CODE_SUCCESS; + char* pCurrent = NULL; + int32_t currLength = 0; + if (ops->fpSet.encodeResultRow) { + if (result == NULL || length == NULL) { + return TSDB_CODE_TSC_INVALID_INPUT; + } + code = ops->fpSet.encodeResultRow(ops, &pCurrent, &currLength); + + if (code != TDB_CODE_SUCCESS) { + if (*result != NULL) { + taosMemoryFree(*result); + *result = NULL; + } + return code; + } + + if (*result == NULL) { + *result = (char*)taosMemoryCalloc(1, currLength + sizeof(int32_t)); + if (*result == NULL) { + taosMemoryFree(pCurrent); + return TSDB_CODE_OUT_OF_MEMORY; + } + memcpy(*result + sizeof(int32_t), pCurrent, currLength); + *(int32_t*)(*result) = currLength + sizeof(int32_t); + } else { + int32_t sizePre = *(int32_t*)(*result); + char* tmp = (char*)taosMemoryRealloc(*result, sizePre + currLength); + if (tmp == NULL) { + taosMemoryFree(pCurrent); + taosMemoryFree(*result); + *result = NULL; + return TSDB_CODE_OUT_OF_MEMORY; + } + *result = tmp; + memcpy(*result + sizePre, pCurrent, currLength); + *(int32_t*)(*result) += currLength; + } + taosMemoryFree(pCurrent); + *length = *(int32_t*)(*result); + } + + for (int32_t i = 0; i < ops->numOfDownstream; ++i) { + code = encodeOperator(ops->pDownstream[i], result, length); + if (code != TDB_CODE_SUCCESS) { + return code; + } + } + return TDB_CODE_SUCCESS; +} + +int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length) { + int32_t code = TDB_CODE_SUCCESS; + if (ops->fpSet.decodeResultRow) { + if (result == NULL) { + return TSDB_CODE_TSC_INVALID_INPUT; + } + ASSERT(length == *(int32_t*)result); + char* data = result + sizeof(int32_t); + code = ops->fpSet.decodeResultRow(ops, data); + if (code != TDB_CODE_SUCCESS) { + return code; + } + + int32_t totalLength = *(int32_t*)result; + int32_t dataLength = *(int32_t*)data; + + if (totalLength == dataLength + sizeof(int32_t)) { // the last data + result = NULL; + length = 0; + } else { + result += dataLength; + *(int32_t*)(result) = totalLength - dataLength; + length = totalLength - dataLength; + } + } + + for (int32_t i = 0; i < ops->numOfDownstream; ++i) { + code = decodeOperator(ops->pDownstream[i], result, length); + if (code != TDB_CODE_SUCCESS) { + return code; + } + } + return TDB_CODE_SUCCESS; +} + int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, EOPTR_EXEC_MODEL model) { uint64_t queryId = pPlan->id.queryId; @@ -4965,8 +4997,8 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead goto _complete; } - (*pTaskInfo)->pRoot = - createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, &(*pTaskInfo)->tableqinfoList, pPlan->pTagCond); + (*pTaskInfo)->pRoot = createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, + &(*pTaskInfo)->tableqinfoList, pPlan->pTagCond); if (NULL == (*pTaskInfo)->pRoot) { code = terrno; goto _complete; @@ -5151,20 +5183,6 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo return TSDB_CODE_SUCCESS; } -int32_t initCacheSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey, const char* pDir) { - pCatchSup->keySize = sizeof(int64_t) + sizeof(int64_t) + sizeof(TSKEY); - pCatchSup->pKeyBuf = taosMemoryCalloc(1, pCatchSup->keySize); - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); - pCatchSup->pWindowHashTable = taosHashInit(10000, hashFn, true, HASH_NO_LOCK); - if (pCatchSup->pKeyBuf == NULL || pCatchSup->pWindowHashTable == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } - - int32_t pageSize = rowSize * 32; - int32_t bufSize = pageSize * 4096; - return createDiskbasedBuf(&pCatchSup->pDataBuf, pageSize, bufSize, pKey, pDir); -} - int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey) { pSup->keySize = sizeof(int64_t) + sizeof(TSKEY); pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize); @@ -5182,5 +5200,20 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey) { if (bufSize <= pageSize) { bufSize = pageSize * 4; } - return createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, "/tmp/"); + return createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, TD_TMP_DIR_PATH); +} + +int64_t getSmaWaterMark(int64_t interval, double filesFactor) { + int64_t waterMark = 0; + ASSERT(FLT_GREATEREQUAL(filesFactor,0.000000)); + waterMark = -1 * filesFactor; + return waterMark; +} + +bool isSmaStream(int8_t triggerType) { + if (triggerType == STREAM_TRIGGER_AT_ONCE || + triggerType == STREAM_TRIGGER_WINDOW_CLOSE) { + return false; + } + return true; } diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 18f24d76405d076dd0eed9b80d425dca9dc90c04..3691024096d869a5d8527c0a95cbf5837e558a9c 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -110,9 +110,11 @@ static bool groupKeyCompare(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlo return true; } -void recordNewGroupKeys(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlock* pBlock, int32_t rowIndex, int32_t numOfGroupCols) { +void recordNewGroupKeys(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlock* pBlock, int32_t rowIndex) { SColumnDataAgg* pColAgg = NULL; + size_t numOfGroupCols = taosArrayGetSize(pGroupCols); + for (int32_t i = 0; i < numOfGroupCols; ++i) { SColumn* pCol = taosArrayGet(pGroupCols, i); SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pCol->slotId); @@ -208,7 +210,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { for (int32_t j = 0; j < pBlock->info.rows; ++j) { // Compare with the previous row of this column, and do not set the output buffer again if they are identical. if (!pInfo->isInit) { - recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j, numOfGroupCols); + recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j); pInfo->isInit = true; num++; continue; @@ -223,7 +225,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { // The first row of a new block does not belongs to the previous existed group if (j == 0) { num++; - recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j, numOfGroupCols); + recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j); continue; } @@ -238,7 +240,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { // assign the group keys or user input constant values if required doAssignGroupKeys(pCtx, pOperator->numOfExprs, pBlock->info.rows, rowIndex); - recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j, numOfGroupCols); + recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j); num = 1; } @@ -318,7 +320,20 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { // updateNumOfRowsInResultRows(pInfo->binfo.pCtx, pOperator->numOfExprs, &pInfo->binfo.resultRowInfo, // pInfo->binfo.rowCellInfoOffset); // } - +#if 0 + if(pOperator->fpSet.encodeResultRow){ + char *result = NULL; + int32_t length = 0; + pOperator->fpSet.encodeResultRow(pOperator, &result, &length); + SAggSupporter* pSup = &pInfo->aggSup; + taosHashClear(pSup->pResultRowHashTable); + pInfo->binfo.resultRowInfo.size = 0; + pOperator->fpSet.decodeResultRow(pOperator, result); + if(result){ + taosMemoryFree(result); + } + } +#endif blockDataEnsureCapacity(pRes, pOperator->resultInfo.capacity); initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, 0); @@ -394,9 +409,8 @@ static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) { SPartitionOperatorInfo* pInfo = pOperator->info; - int32_t numOfGroupCols = taosArrayGetSize(pInfo->pGroupCols); for (int32_t j = 0; j < pBlock->info.rows; ++j) { - recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j, numOfGroupCols); + recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j); int32_t len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals); SDataGroupInfo* pGInfo = NULL; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 3131c27e28ce5007eb2a4b2dd324bba29fc3b3c8..b9507f9f06afa99cd24f4775714722c8cb644aaf 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -160,7 +160,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn return false; } -static void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock); +static void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock); static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, uint32_t* status) { @@ -252,7 +252,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca // currently only the tbname pseudo column if (pTableScanInfo->numOfPseudoExpr > 0) { - addTagPseudoColumnData(pTableScanInfo, pBlock); + addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pPseudoExpr, pTableScanInfo->numOfPseudoExpr, pBlock); } int64_t st = taosGetTimestampMs(); @@ -276,58 +276,62 @@ static void prepareForDescendingScan(STableScanInfo* pTableScanInfo, SqlFunction switchCtxOrder(pCtx, numOfOutput); // setupQueryRangeForReverseScan(pTableScanInfo); - STimeWindow* pTWindow = &pTableScanInfo->cond.twindow; - TSWAP(pTWindow->skey, pTWindow->ekey); pTableScanInfo->cond.order = TSDB_ORDER_DESC; + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pTWindow = &pTableScanInfo->cond.twindows[i]; + TSWAP(pTWindow->skey, pTWindow->ekey); + } + SQueryTableDataCond *pCond = &pTableScanInfo->cond; + taosqsort(pCond->twindows, + pCond->numOfTWindows, + sizeof(STimeWindow), + pCond, + compareTimeWindow); } -void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock) { +void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock) { // currently only the tbname pseudo column - if (pTableScanInfo->numOfPseudoExpr == 0) { + if (numOfPseudoExpr == 0) { return; } SMetaReader mr = {0}; - metaReaderInit(&mr, pTableScanInfo->readHandle.meta, 0); + metaReaderInit(&mr, pHandle->meta, 0); metaGetTableEntryByUid(&mr, pBlock->info.uid); - for (int32_t j = 0; j < pTableScanInfo->numOfPseudoExpr; ++j) { - SExprInfo* pExpr = &pTableScanInfo->pPseudoExpr[j]; + for (int32_t j = 0; j < numOfPseudoExpr; ++j) { + SExprInfo* pExpr = &pPseudoExpr[j]; int32_t dstSlotId = pExpr->base.resSchema.slotId; SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotId); + colInfoDataEnsureCapacity(pColInfoData, 0, pBlock->info.rows); + colInfoDataCleanup(pColInfoData, pBlock->info.rows); int32_t functionId = pExpr->pExpr->_function.functionId; // this is to handle the tbname if (fmIsScanPseudoColumnFunc(functionId)) { - setTbNameColData(pTableScanInfo->readHandle.meta, pBlock, pColInfoData, functionId); + setTbNameColData(pHandle->meta, pBlock, pColInfoData, functionId); } else { // these are tags - const char* p = NULL; - if (pColInfoData->info.type == TSDB_DATA_TYPE_JSON) { - const uint8_t* tmp = mr.me.ctbEntry.pTags; - - char* data = taosMemoryCalloc(kvRowLen(tmp) + 1, 1); - if (data == NULL) { - metaReaderClear(&mr); - qError("doTagScan calloc error:%d", kvRowLen(tmp) + 1); - return; - } - - *data = TSDB_DATA_TYPE_JSON; - memcpy(data + 1, tmp, kvRowLen(tmp)); - p = data; - } else { - p = metaGetTableTagVal(&mr.me, pExpr->base.pParam[0].pCol->colId); + STagVal tagVal = {0}; + tagVal.cid = pExpr->base.pParam[0].pCol->colId; + const char *p = metaGetTableTagVal(&mr.me, pColInfoData->info.type, &tagVal); + + char *data = NULL; + if(pColInfoData->info.type != TSDB_DATA_TYPE_JSON && p != NULL){ + data = tTagValToData((const STagVal *)p, false); + }else { + data = (char*)p; } for (int32_t i = 0; i < pBlock->info.rows; ++i) { - colDataAppend(pColInfoData, i, p, (p == NULL)); + colDataAppend(pColInfoData, i, data, (data == NULL)); } - if (pColInfoData->info.type == TSDB_DATA_TYPE_JSON) { - taosMemoryFree((void*)p); + if (data && (pColInfoData->info.type != TSDB_DATA_TYPE_JSON) && p != NULL && + IS_VAR_DATA_TYPE(((const STagVal*)p)->type)) { + taosMemoryFree(data); } } } @@ -371,8 +375,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { longjmp(pOperator->pTaskInfo->env, code); } - int32_t numOfGroupCols = taosArrayGetSize(pTableScanInfo->pGroupCols); - recordNewGroupKeys(pTableScanInfo->pGroupCols, pTableScanInfo->pGroupColVals, pBlock, 0, numOfGroupCols); + recordNewGroupKeys(pTableScanInfo->pGroupCols, pTableScanInfo->pGroupColVals, pBlock, 0); int32_t len = buildGroupKeys(pTableScanInfo->keyBuf, pTableScanInfo->pGroupColVals); uint64_t *groupId = taosHashGet(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len); @@ -395,7 +398,6 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { pOperator->cost.totalCost = pTableScanInfo->readRecorder.elapsedTime; return pBlock; } - return NULL; } @@ -410,9 +412,15 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { // do the ascending order traverse in the first place. while (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { - SSDataBlock* p = doTableScanImpl(pOperator); - if (p != NULL) { - return p; + while (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + SSDataBlock* p = doTableScanImpl(pOperator); + if (p != NULL) { + return p; + } + pTableScanInfo->curTWinIdx += 1; + if (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, pTableScanInfo->curTWinIdx); + } } pTableScanInfo->scanTimes += 1; @@ -420,14 +428,14 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); pTableScanInfo->scanFlag = REPEAT_SCAN; - - STimeWindow* pWin = &pTableScanInfo->cond.twindow; - qDebug("%s start to repeat ascending order scan data blocks due to query func required, qrange:%" PRId64 - "-%" PRId64, - GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); - + qDebug("%s start to repeat ascending order scan data blocks due to query func required", GET_TASKID(pTaskInfo)); + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pWin = &pTableScanInfo->cond.twindows[i]; + qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); + } // do prepare for the next round table scan operation - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + pTableScanInfo->curTWinIdx = 0; } } @@ -435,31 +443,40 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { if (pTableScanInfo->scanTimes < total) { if (pTableScanInfo->cond.order == TSDB_ORDER_ASC) { prepareForDescendingScan(pTableScanInfo, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput); - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + pTableScanInfo->curTWinIdx = 0; } - STimeWindow* pWin = &pTableScanInfo->cond.twindow; - qDebug("%s start to descending order scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64, - GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); - + qDebug("%s start to descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo)); + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pWin = &pTableScanInfo->cond.twindows[i]; + qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); + } while (pTableScanInfo->scanTimes < total) { - SSDataBlock* p = doTableScanImpl(pOperator); - if (p != NULL) { - return p; + while (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + SSDataBlock* p = doTableScanImpl(pOperator); + if (p != NULL) { + return p; + } + pTableScanInfo->curTWinIdx += 1; + if (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, pTableScanInfo->curTWinIdx); + } } pTableScanInfo->scanTimes += 1; - if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { + if (pTableScanInfo->scanTimes < total) { setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); pTableScanInfo->scanFlag = REPEAT_SCAN; - qDebug("%s start to repeat descending order scan data blocks due to query func required, qrange:%" PRId64 - "-%" PRId64, - GET_TASKID(pTaskInfo), pTaskInfo->window.skey, pTaskInfo->window.ekey); - - // do prepare for the next round table scan operation - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + qDebug("%s start to repeat descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo)); + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pWin = &pTableScanInfo->cond.twindows[i]; + qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); + } + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + pTableScanInfo->curTWinIdx = 0; } } } @@ -532,7 +549,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, } pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]}; - // pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose +// pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose pInfo->readHandle = *readHandle; pInfo->interval = extractIntervalInfo(pTableScanNode); @@ -543,6 +560,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, pInfo->dataReader = pDataReader; pInfo->scanFlag = MAIN_SCAN; pInfo->pColMatchInfo = pColList; + pInfo->curTWinIdx = 0; pOperator->name = "TableScanOperator"; // for debug purpose pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN; @@ -715,8 +733,9 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo) { binarySearchForKey, NULL, TSDB_ORDER_ASC); } STableScanInfo* pTableScanInfo = pInfo->pOperatorDumy->info; - pTableScanInfo->cond.twindow = win; - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + pTableScanInfo->cond.twindows[0] = win; + pTableScanInfo->curTWinIdx = 0; + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); pTableScanInfo->scanTimes = 0; return true; } else { @@ -768,91 +787,6 @@ static SSDataBlock* getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool inverti return NULL; } -void static setSupKeyBuf(SCatchSupporter* pSup, int64_t groupId, int64_t childId, TSKEY ts) { - int64_t* pKey = (int64_t*)pSup->pKeyBuf; - pKey[0] = groupId; - pKey[1] = childId; - pKey[2] = ts; -} - -static int32_t catchWidonwInfo(SSDataBlock* pDataBlock, SCatchSupporter* pSup, int32_t pageId, int32_t tsIndex, - int64_t childId) { - SColumnInfoData* pColDataInfo = taosArrayGet(pDataBlock->pDataBlock, tsIndex); - TSKEY* tsCols = (int64_t*)pColDataInfo->pData; - for (int32_t i = 0; i < pDataBlock->info.rows; i++) { - setSupKeyBuf(pSup, pDataBlock->info.groupId, childId, tsCols[i]); - SWindowPosition* p1 = (SWindowPosition*)taosHashGet(pSup->pWindowHashTable, pSup->pKeyBuf, pSup->keySize); - if (p1 == NULL) { - SWindowPosition pos = {.pageId = pageId, .rowId = i}; - int32_t code = taosHashPut(pSup->pWindowHashTable, pSup->pKeyBuf, pSup->keySize, &pos, sizeof(SWindowPosition)); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } else { - p1->pageId = pageId; - p1->rowId = i; - } - } - return TSDB_CODE_SUCCESS; -} - -static int32_t catchDatablock(SSDataBlock* pDataBlock, SCatchSupporter* pSup, int32_t tsIndex, int64_t childId) { - int32_t start = 0; - int32_t stop = 0; - int32_t pageSize = getBufPageSize(pSup->pDataBuf); - while (start < pDataBlock->info.rows) { - blockDataSplitRows(pDataBlock, pDataBlock->info.hasVarCol, start, &stop, pageSize); - SSDataBlock* pDB = blockDataExtractBlock(pDataBlock, start, stop - start + 1); - if (pDB == NULL) { - return terrno; - } - int32_t pageId = -1; - void* pPage = getNewBufPage(pSup->pDataBuf, pDataBlock->info.groupId, &pageId); - if (pPage == NULL) { - blockDataDestroy(pDB); - return terrno; - } - int32_t size = blockDataGetSize(pDB) + sizeof(int32_t) + pDB->info.numOfCols * sizeof(int32_t); - assert(size <= pageSize); - blockDataToBuf(pPage, pDB); - setBufPageDirty(pPage, true); - releaseBufPage(pSup->pDataBuf, pPage); - blockDataDestroy(pDB); - start = stop + 1; - int32_t code = catchWidonwInfo(pDB, pSup, pageId, tsIndex, childId); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - return TSDB_CODE_SUCCESS; -} - -static SSDataBlock* getDataFromCatch(SStreamBlockScanInfo* pInfo) { - SSDataBlock* pBlock = pInfo->pUpdateRes; - if (pInfo->updateResIndex < pBlock->info.rows) { - blockDataCleanup(pInfo->pRes); - SCatchSupporter* pCSup = &pInfo->childAggSup; - SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, 0); - TSKEY* tsCols = (TSKEY*)pColDataInfo->pData; - int32_t size = taosArrayGetSize(pInfo->childIds); - for (int32_t i = 0; i < size; i++) { - int64_t id = *(int64_t*)taosArrayGet(pInfo->childIds, i); - setSupKeyBuf(pCSup, pBlock->info.groupId, id, tsCols[pInfo->updateResIndex]); - SWindowPosition* pos = (SWindowPosition*)taosHashGet(pCSup->pWindowHashTable, pCSup->pKeyBuf, pCSup->keySize); - void* buf = getBufPage(pCSup->pDataBuf, pos->pageId); - SSDataBlock* pDB = createOneDataBlock(pInfo->pRes, false); - blockDataFromBuf(pDB, buf); - SSDataBlock* pSub = blockDataExtractBlock(pDB, pos->rowId, 1); - blockDataMerge(pInfo->pRes, pSub); - blockDataDestroy(pDB); - blockDataDestroy(pSub); - } - pInfo->updateResIndex++; - return pInfo->pRes; - } - return NULL; -} - static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { // NOTE: this operator does never check if current status is done or not SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -915,8 +849,15 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { pInfo->pRes->info.uid = uid; pInfo->pRes->info.type = STREAM_NORMAL; - int32_t numOfCols = pInfo->pRes->info.numOfCols; - for (int32_t i = 0; i < numOfCols; ++i) { + // for generating rollup SMA result, each time is an independent time serie. + // TODO temporarily used, when the statement of "partition by tbname" is ready, remove this + if (pInfo->assignBlockUid) { + pInfo->pRes->info.groupId = uid; + } else { + pInfo->pRes->info.groupId = groupId; + } + + for (int32_t i = 0; i < taosArrayGetSize(pInfo->pColMatchInfo); ++i) { SColMatchInfo* pColMatchInfo = taosArrayGet(pInfo->pColMatchInfo, i); if (!pColMatchInfo->output) { continue; @@ -946,21 +887,27 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { pTaskInfo->code = terrno; return NULL; } + rows = pBlockInfo->rows; + + // currently only the tbname pseudo column + if (pInfo->numOfPseudoExpr > 0) { + addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes); + } + doFilter(pInfo->pCondition, pInfo->pRes, NULL); blockDataUpdateTsWindow(pInfo->pRes, 0); - break; } // record the scan action. pInfo->numOfExec++; - pInfo->numOfRows += pBlockInfo->rows; + pOperator->resultInfo.totalRows += pBlockInfo->rows; if (rows == 0) { pOperator->status = OP_EXEC_DONE; } else if (pInfo->pUpdateInfo) { - SSDataBlock* upRes = getUpdateDataBlock(pInfo, true); // TODO(liuyao) get invertible from plan + SSDataBlock* upRes = getUpdateDataBlock(pInfo, true); if (upRes) { pInfo->pUpdateRes = upRes; if (upRes->info.type == STREAM_REPROCESS) { @@ -977,10 +924,9 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { } } -SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SReadHandle* pHandle, - uint64_t uid, SSDataBlock* pResBlock, SArray* pColList, - SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pCondition, - SOperatorInfo* pOperatorDumy) { +SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, + SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, + STimeWindowAggSupp* pTwSup) { SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -988,22 +934,28 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR goto _error; } - STableScanInfo* pSTInfo = (STableScanInfo*)pOperatorDumy->info; + SScanPhysiNode* pScanPhyNode = &pTableScanNode->scan; + + SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc; + SOperatorInfo* pTableScanDummy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, NULL, pTaskInfo); + + STableScanInfo* pSTInfo = (STableScanInfo*)pTableScanDummy->info; - int32_t numOfOutput = taosArrayGetSize(pColList); + int32_t numOfCols = 0; + pInfo->pColMatchInfo = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); - SArray* pColIds = taosArrayInit(4, sizeof(int16_t)); + int32_t numOfOutput = taosArrayGetSize(pInfo->pColMatchInfo); + SArray* pColIds = taosArrayInit(numOfOutput, sizeof(int16_t)); for (int32_t i = 0; i < numOfOutput; ++i) { - SColMatchInfo* id = taosArrayGet(pColList, i); - int16_t colId = id->colId; + SColMatchInfo* id = taosArrayGet(pInfo->pColMatchInfo, i); + + int16_t colId = id->colId; taosArrayPush(pColIds, &colId); } - pInfo->pColMatchInfo = pColList; - // set the extract column id to streamHandle - tqReadHandleSetColIdList((STqReadHandle*)streamReadHandle, pColIds); - int32_t code = tqReadHandleSetTbUidList(streamReadHandle, pTableIdList); + tqReadHandleSetColIdList((STqReadHandle*)pHandle->reader, pColIds); + int32_t code = tqReadHandleSetTbUidList(pHandle->reader, pTableIdList); if (code != 0) { goto _error; } @@ -1019,37 +971,43 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR goto _error; } - pInfo->primaryTsIndex = 0; // TODO(liuyao) get it from physical plan - if (pSTInfo->interval.interval > 0) { - pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, 10000); // TODO(liuyao) get watermark from physical plan + if (isSmaStream(pTableScanNode->triggerType)) { + pTwSup->waterMark = getSmaWaterMark(pSTInfo->interval.interval, + pTableScanNode->filesFactor); + } + pInfo->primaryTsIndex = 0; // pTableScanNode->tsColId; + if (pSTInfo->interval.interval > 0 && pDataReader) { + pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, pTwSup->waterMark); } else { pInfo->pUpdateInfo = NULL; } - pInfo->readHandle = *pHandle; - pInfo->tableUid = uid; - pInfo->streamBlockReader = streamReadHandle; - pInfo->pRes = pResBlock; - pInfo->pCondition = pCondition; - pInfo->pDataReader = pDataReader; - pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; - pInfo->pOperatorDumy = pOperatorDumy; - pInfo->interval = pSTInfo->interval; - pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1}; - - initCacheSupporter(&pInfo->childAggSup, 1024, "StreamFinalInterval", - "/tmp/"); // TODO(liuyao) get row size from phy plan + // create the pseduo columns info + if (pTableScanNode->scan.pScanPseudoCols != NULL) { + pInfo->pPseudoExpr = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->numOfPseudoExpr); + } - pOperator->name = "StreamBlockScanOperator"; + pInfo->readHandle = *pHandle; + pInfo->tableUid = pScanPhyNode->uid; + pInfo->streamBlockReader = pHandle->reader; + pInfo->pRes = createResDataBlock(pDescNode); + pInfo->pCondition = pScanPhyNode->node.pConditions; + pInfo->pDataReader = pDataReader; + pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; + pInfo->pOperatorDumy = pTableScanDummy; + pInfo->interval = pSTInfo->interval; + pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1}; + + pOperator->name = "StreamBlockScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->numOfExprs = pResBlock->info.numOfCols; - pOperator->pTaskInfo = pTaskInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->numOfExprs = pInfo->pRes->info.numOfCols; + pOperator->pTaskInfo = pTaskInfo; - pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, NULL, operatorDummyCloseFn, NULL, NULL, NULL); + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, + NULL, operatorDummyCloseFn, NULL, NULL, NULL); return pOperator; @@ -1661,22 +1619,21 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { STR_TO_VARSTR(str, mr.me.name); colDataAppend(pDst, count, str, false); } else { // it is a tag value - if (pDst->info.type == TSDB_DATA_TYPE_JSON) { - const uint8_t* tmp = mr.me.ctbEntry.pTags; - // TODO opt perf by realloc memory - char* data = taosMemoryCalloc(kvRowLen(tmp) + 1, 1); - if (data == NULL) { - qError("%s failed to malloc memory, size:%d", GET_TASKID(pTaskInfo), kvRowLen(tmp) + 1); - longjmp(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); - } + STagVal val = {0}; + val.cid = pExprInfo[j].base.pParam[0].pCol->colId; + const char* p = metaGetTableTagVal(&mr.me, pDst->info.type, &val); + + char *data = NULL; + if(pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL){ + data = tTagValToData((const STagVal *)p, false); + }else { + data = (char*)p; + } + colDataAppend(pDst, count, data, (data == NULL)); - *data = TSDB_DATA_TYPE_JSON; - memcpy(data + 1, tmp, kvRowLen(tmp)); - colDataAppend(pDst, count, data, false); + if(pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL + && IS_VAR_DATA_TYPE(((const STagVal *)p)->type) && data != NULL){ taosMemoryFree(data); - } else { - const char* p = metaGetTableTagVal(&mr.me, pExprInfo[j].base.pParam[0].pCol->colId); - colDataAppend(pDst, count, p, (p == NULL)); } } } @@ -1706,8 +1663,8 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { } SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput, - SSDataBlock* pResBlock, SArray* pColMatchInfo, - STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) { + SSDataBlock* pResBlock, SArray* pColMatchInfo, STableListInfo* pTableListInfo, + SExecTaskInfo* pTaskInfo) { STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 47dda8fc2b8d056a89aac0f9cba818168db2e64c..41037e9f163ff476e6ba583d316357ade84e773a 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1,3 +1,4 @@ +#include "function.h" #include "executorimpl.h" #include "functionMgt.h" #include "tdatablock.h" @@ -11,6 +12,11 @@ typedef enum SResultTsInterpType { static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator); static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator); +static int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo); + +static SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult); +static void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult); + /* * There are two cases to handle: * @@ -21,47 +27,11 @@ static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator); * is a previous result generated or not. */ static void setIntervalQueryRange(STableQueryInfo* pTableQueryInfo, TSKEY key, STimeWindow* pQRange) { - // SResultRowInfo* pResultRowInfo = &pTableQueryInfo->resInfo; - // if (pResultRowInfo->curPos != -1) { - // return; - // } - - // pTableQueryInfo->win.skey = key; - // STimeWindow win = {.skey = key, .ekey = pQRange->ekey}; - - /** - * In handling the both ascending and descending order super table query, we need to find the first qualified - * timestamp of this table, and then set the first qualified start timestamp. - * In ascending query, the key is the first qualified timestamp. However, in the descending order query, additional - * operations involve. - */ - // STimeWindow w = TSWINDOW_INITIALIZER; - // - // TSKEY sk = TMIN(win.skey, win.ekey); - // TSKEY ek = TMAX(win.skey, win.ekey); - // getAlignQueryTimeWindow(pQueryAttr, win.skey, sk, ek, &w); - - // if (pResultRowInfo->prevSKey == TSKEY_INITIAL_VAL) { - // if (!QUERY_IS_ASC_QUERY(pQueryAttr)) { - // assert(win.ekey == pQueryAttr->window.ekey); - // } - // - // pResultRowInfo->prevSKey = w.skey; - // } - - // pTableQueryInfo->lastKey = pTableQueryInfo->win.skey; + // do nothing } -static TSKEY getStartTsKey(STimeWindow* win, const TSKEY* tsCols, int32_t rows, bool ascQuery) { - TSKEY ts = TSKEY_INITIAL_VAL; - if (tsCols == NULL) { - ts = ascQuery ? win->skey : win->ekey; - } else { -// int32_t offset = ascQuery ? 0 : rows - 1; - ts = tsCols[0]; - } - - return ts; +static TSKEY getStartTsKey(STimeWindow* win, const TSKEY* tsCols) { + return tsCols == NULL? win->skey:tsCols[0]; } static void getInitialStartTimeWindow(SInterval* pInterval, int32_t precision, TSKEY ts, STimeWindow* w, @@ -134,8 +104,10 @@ static int32_t setTimeWindowOutputBuf(SResultRowInfo* pResultRowInfo, STimeWindo // set time window for current result pResultRow->win = (*win); + *pResult = pResultRow; setResultRowInitCtx(pResultRow, pCtx, numOfOutput, rowCellInfoOffset); + return TSDB_CODE_SUCCESS; } @@ -163,38 +135,38 @@ static void doKeepNewWindowStartInfo(SWindowRowsSup* pRowSup, const int64_t* tsL static FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int16_t pos, int16_t order, int64_t* pData) { - int32_t forwardStep = 0; + int32_t forwardRows = 0; if (order == TSDB_ORDER_ASC) { int32_t end = searchFn((char*)&pData[pos], numOfRows - pos, ekey, order); if (end >= 0) { - forwardStep = end; + forwardRows = end; if (pData[end + pos] == ekey) { - forwardStep += 1; + forwardRows += 1; } } } else { int32_t end = searchFn((char*)&pData[pos], numOfRows - pos, ekey, order); if (end >= 0) { - forwardStep = end; + forwardRows = end; if (pData[end + pos] == ekey) { - forwardStep += 1; + forwardRows += 1; } } // int32_t end = searchFn((char*)pData, pos + 1, ekey, order); // if (end >= 0) { -// forwardStep = pos - end; +// forwardRows = pos - end; // // if (pData[end] == ekey) { -// forwardStep += 1; +// forwardRows += 1; // } // } } - assert(forwardStep >= 0); - return forwardStep; + assert(forwardRows >= 0); + return forwardRows; } int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) { @@ -339,34 +311,40 @@ static void getNextTimeWindow(SInterval* pInterval, int32_t precision, int32_t o tw->ekey -= 1; } -void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, SArray* pDataBlock, TSKEY prevTs, +void doTimeWindowInterpolation(SIntervalAggOperatorInfo *pInfo, int32_t numOfExprs, SArray* pDataBlock, TSKEY prevTs, int32_t prevRowIndex, TSKEY curTs, int32_t curRowIndex, TSKEY windowKey, int32_t type) { - SExprInfo* pExpr = pOperator->pExpr; + SqlFunctionCtx* pCtx = pInfo->binfo.pCtx; - SqlFunctionCtx* pCtx = pInfo->pCtx; + int32_t index = 1; + for (int32_t k = 0; k < numOfExprs; ++k) { - for (int32_t k = 0; k < pOperator->numOfExprs; ++k) { - int32_t functionId = pCtx[k].functionId; - if (functionId != FUNCTION_TWA && functionId != FUNCTION_INTERP) { + // todo use flag instead of function name + if (strcmp(pCtx[k].pExpr->pExpr->_function.functionName, "twa") != 0) { pCtx[k].start.key = INT64_MIN; continue; } - SColIndex* pColIndex = NULL /*&pExpr[k].base.colInfo*/; - int16_t index = pColIndex->colIndex; - SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, index); +// if (functionId != FUNCTION_TWA && functionId != FUNCTION_INTERP) { +// pCtx[k].start.key = INT64_MIN; +// continue; +// } - // assert(pColInfo->info.colId == pColIndex->info.colId && curTs != windowKey); - double v1 = 0, v2 = 0, v = 0; + SFunctParam* pParam = &pCtx[k].param[0]; + SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, pParam->pCol->slotId); + ASSERT(pColInfo->info.colId == pParam->pCol->colId && curTs != windowKey); + + double v1 = 0, v2 = 0, v = 0; if (prevRowIndex == -1) { - // GET_TYPED_DATA(v1, double, pColInfo->info.type, (char*)pRuntimeEnv->prevRow[index]); + SGroupKeys* p = taosArrayGet(pInfo->pPrevValues, index); + GET_TYPED_DATA(v1, double, pColInfo->info.type, p->pData); } else { - GET_TYPED_DATA(v1, double, pColInfo->info.type, (char*)pColInfo->pData + prevRowIndex * pColInfo->info.bytes); + GET_TYPED_DATA(v1, double, pColInfo->info.type, colDataGetData(pColInfo, prevRowIndex)); } - GET_TYPED_DATA(v2, double, pColInfo->info.type, (char*)pColInfo->pData + curRowIndex * pColInfo->info.bytes); + GET_TYPED_DATA(v2, double, pColInfo->info.type, colDataGetData(pColInfo, curRowIndex)); +#if 0 if (functionId == FUNCTION_INTERP) { if (type == RESULT_ROW_START_INTERP) { pCtx[k].start.key = prevTs; @@ -386,6 +364,8 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, } } } else if (functionId == FUNCTION_TWA) { +#endif + SPoint point1 = (SPoint){.key = prevTs, .val = &v1}; SPoint point2 = (SPoint){.key = curTs, .val = &v2}; SPoint point = (SPoint){.key = windowKey, .val = &v}; @@ -399,8 +379,13 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, pCtx[k].end.key = point.key; pCtx[k].end.val = v; } + + index += 1; } +#if 0 } +#endif + } static void setNotInterpoWindowKey(SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t type) { @@ -415,62 +400,59 @@ static void setNotInterpoWindowKey(SqlFunctionCtx* pCtx, int32_t numOfOutput, in } } -static bool setTimeWindowInterpolationStartTs(SOperatorInfo* pOperatorInfo, SqlFunctionCtx* pCtx, int32_t pos, - int32_t numOfRows, SArray* pDataBlock, const TSKEY* tsCols, - STimeWindow* win) { - bool ascQuery = true; +static bool setTimeWindowInterpolationStartTs(SIntervalAggOperatorInfo *pInfo, SqlFunctionCtx* pCtx, int32_t numOfExprs, int32_t pos, + SSDataBlock* pBlock, const TSKEY* tsCols, STimeWindow* win) { + bool ascQuery = (pInfo->order == TSDB_ORDER_ASC); + TSKEY curTs = tsCols[pos]; - TSKEY lastTs = 0; //*(TSKEY*)pRuntimeEnv->prevRow[0]; + + SGroupKeys* pTsKey = taosArrayGet(pInfo->pPrevValues, 0); + TSKEY lastTs = *(int64_t*) pTsKey->pData; // lastTs == INT64_MIN and pos == 0 means this is the first time window, interpolation is not needed. // start exactly from this point, no need to do interpolation TSKEY key = ascQuery ? win->skey : win->ekey; if (key == curTs) { - setNotInterpoWindowKey(pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_START_INTERP); + setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_START_INTERP); return true; } - if (lastTs == INT64_MIN && ((pos == 0 && ascQuery) || (pos == (numOfRows - 1) && !ascQuery))) { - setNotInterpoWindowKey(pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_START_INTERP); - return true; + // it is the first time window, no need to do interpolation + if (pTsKey->isNull && pos == 0) { + setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_START_INTERP); + } else { + TSKEY prevTs = ((pos == 0) ? lastTs : tsCols[pos - 1]); + doTimeWindowInterpolation(pInfo, numOfExprs, pBlock->pDataBlock, prevTs, pos - 1, curTs, pos, key, + RESULT_ROW_START_INTERP); } - int32_t step = 1; // GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); - TSKEY prevTs = ((pos == 0 && ascQuery) || (pos == (numOfRows - 1) && !ascQuery)) ? lastTs : tsCols[pos - step]; - - doTimeWindowInterpolation(pOperatorInfo, pOperatorInfo->info, pDataBlock, prevTs, pos - step, curTs, pos, key, - RESULT_ROW_START_INTERP); return true; } -static bool setTimeWindowInterpolationEndTs(SOperatorInfo* pOperatorInfo, SqlFunctionCtx* pCtx, int32_t endRowIndex, - SArray* pDataBlock, const TSKEY* tsCols, TSKEY blockEkey, - STimeWindow* win) { - int32_t order = TSDB_ORDER_ASC; - int32_t numOfOutput = pOperatorInfo->numOfExprs; +static bool setTimeWindowInterpolationEndTs(SIntervalAggOperatorInfo *pInfo, SqlFunctionCtx* pCtx, int32_t numOfExprs, int32_t endRowIndex, + SArray* pDataBlock, const TSKEY* tsCols, TSKEY blockEkey, STimeWindow* win) { + int32_t order = pInfo->order; TSKEY actualEndKey = tsCols[endRowIndex]; - TSKEY key = order ? win->ekey : win->skey; + TSKEY key = (order == TSDB_ORDER_ASC) ? win->ekey : win->skey; // not ended in current data block, do not invoke interpolation - if ((key > blockEkey /*&& QUERY_IS_ASC_QUERY(pQueryAttr)*/) || - (key < blockEkey /*&& !QUERY_IS_ASC_QUERY(pQueryAttr)*/)) { - setNotInterpoWindowKey(pCtx, numOfOutput, RESULT_ROW_END_INTERP); + if ((key > blockEkey && (order == TSDB_ORDER_ASC)) || (key < blockEkey && (order == TSDB_ORDER_DESC))) { + setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_END_INTERP); return false; } - // there is actual end point of current time window, no interpolation need + // there is actual end point of current time window, no interpolation needs if (key == actualEndKey) { - setNotInterpoWindowKey(pCtx, numOfOutput, RESULT_ROW_END_INTERP); + setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_END_INTERP); return true; } - int32_t step = GET_FORWARD_DIRECTION_FACTOR(order); - int32_t nextRowIndex = endRowIndex + step; + int32_t nextRowIndex = endRowIndex + 1; assert(nextRowIndex >= 0); TSKEY nextKey = tsCols[nextRowIndex]; - doTimeWindowInterpolation(pOperatorInfo, pOperatorInfo->info, pDataBlock, actualEndKey, endRowIndex, nextKey, + doTimeWindowInterpolation(pInfo, numOfExprs, pDataBlock, actualEndKey, endRowIndex, nextKey, nextRowIndex, key, RESULT_ROW_END_INTERP); return true; } @@ -542,8 +524,8 @@ static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, return startPos; } -static bool resultRowInterpolated(SResultRow* pResult, SResultTsInterpType type) { - assert(pResult != NULL && (type == RESULT_ROW_START_INTERP || type == RESULT_ROW_END_INTERP)); +static bool isResultRowInterpolated(SResultRow* pResult, SResultTsInterpType type) { + ASSERT(pResult != NULL && (type == RESULT_ROW_START_INTERP || type == RESULT_ROW_END_INTERP)); if (type == RESULT_ROW_START_INTERP) { return pResult->startInterp == true; } else { @@ -560,34 +542,29 @@ static void setResultRowInterpo(SResultRow* pResult, SResultTsInterpType type) { } } -static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBlock* pBlock, SqlFunctionCtx* pCtx, - SResultRow* pResult, STimeWindow* win, int32_t startPos, int32_t forwardStep, - int32_t order, bool timeWindowInterpo) { - if (!timeWindowInterpo) { +static void doWindowBorderInterpolation(SIntervalAggOperatorInfo *pInfo, SSDataBlock* pBlock, int32_t numOfExprs, SqlFunctionCtx* pCtx, + SResultRow* pResult, STimeWindow* win, int32_t startPos, int32_t forwardRows) { + if (!pInfo->timeWindowInterpo) { return; } - assert(pBlock != NULL); - int32_t step = GET_FORWARD_DIRECTION_FACTOR(order); - + ASSERT(pBlock != NULL); if (pBlock->pDataBlock == NULL) { // tscError("pBlock->pDataBlock == NULL"); return; } - SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, 0); + SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex); TSKEY* tsCols = (TSKEY*)(pColInfo->pData); - bool done = resultRowInterpolated(pResult, RESULT_ROW_START_INTERP); + bool done = isResultRowInterpolated(pResult, RESULT_ROW_START_INTERP); if (!done) { // it is not interpolated, now start to generated the interpolated value - int32_t startRowIndex = startPos; - bool interp = setTimeWindowInterpolationStartTs(pOperatorInfo, pCtx, startRowIndex, pBlock->info.rows, - pBlock->pDataBlock, tsCols, win); + bool interp = setTimeWindowInterpolationStartTs(pInfo, pCtx, numOfExprs, startPos, pBlock, tsCols, win); if (interp) { setResultRowInterpo(pResult, RESULT_ROW_START_INTERP); } } else { - setNotInterpoWindowKey(pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_START_INTERP); + setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_START_INTERP); } // point interpolation does not require the end key time window interpolation. @@ -596,139 +573,261 @@ static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBloc // } // interpolation query does not generate the time window end interpolation - done = resultRowInterpolated(pResult, RESULT_ROW_END_INTERP); + done = isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP); if (!done) { - int32_t endRowIndex = startPos + (forwardStep - 1) * step; + int32_t endRowIndex = startPos + forwardRows - 1; - TSKEY endKey = (order == TSDB_ORDER_ASC) ? pBlock->info.window.ekey : pBlock->info.window.skey; + TSKEY endKey = (pInfo->order == TSDB_ORDER_ASC) ? pBlock->info.window.ekey : pBlock->info.window.skey; bool interp = - setTimeWindowInterpolationEndTs(pOperatorInfo, pCtx, endRowIndex, pBlock->pDataBlock, tsCols, endKey, win); + setTimeWindowInterpolationEndTs(pInfo, pCtx, numOfExprs, endRowIndex, pBlock->pDataBlock, tsCols, endKey, win); if (interp) { setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); } } else { - setNotInterpoWindowKey(pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_END_INTERP); + setNotInterpoWindowKey(pCtx, numOfExprs, RESULT_ROW_END_INTERP); } } -static void saveDataBlockLastRow(char** pRow, SArray* pDataBlock, int32_t rowIndex, int32_t numOfCols) { - if (pDataBlock == NULL) { +static void saveDataBlockLastRow(SArray* pPrevKeys, const SSDataBlock* pBlock, SArray* pCols) { + if (pBlock->pDataBlock == NULL) { return; } - for (int32_t k = 0; k < numOfCols; ++k) { - SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, k); - memcpy(pRow[k], ((char*)pColInfo->pData) + (pColInfo->info.bytes * rowIndex), pColInfo->info.bytes); + size_t num = taosArrayGetSize(pPrevKeys); + for (int32_t k = 0; k < num; ++k) { + SColumn* pc = taosArrayGet(pCols, k); + + SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, pc->slotId); + + SGroupKeys* pkey = taosArrayGet(pPrevKeys, k); + for(int32_t i = pBlock->info.rows - 1; i >= 0; --i) { + if (colDataIsNull_s(pColInfo, i)) { + continue; + } + + char* val = colDataGetData(pColInfo, i); + if (IS_VAR_DATA_TYPE(pkey->type)) { + memcpy(pkey->pData, val, varDataTLen(val)); + ASSERT(varDataTLen(val) <= pkey->bytes); + } else { + memcpy(pkey->pData, val, pkey->bytes); + } + + break; + } } } -static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, - uint64_t tableGroupId) { +static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t numOfExprs, SResultRowInfo* pResultRowInfo, + SSDataBlock* pBlock, int32_t scanFlag, int64_t* tsCols, SResultRowPosition* p) { + SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; + SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info; - SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; - int32_t numOfOutput = pOperatorInfo->numOfExprs; + int32_t startPos = 0; + int32_t numOfOutput = pOperatorInfo->numOfExprs; + uint64_t groupId = pBlock->info.groupId; - SArray* pUpdated = NULL; - if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { - pUpdated = taosArrayInit(4, POINTER_BYTES); + SResultRow* pResult = NULL; + + while (1) { + SListNode* pn = tdListGetHead(pResultRowInfo->openWindow); + + SResultRowPosition* p1 = (SResultRowPosition*)pn->data; + if (p->pageId == p1->pageId && p->offset == p1->offset) { + break; + } + + SResultRow* pr = getResultRowByPos(pInfo->aggSup.pResultBuf, p1); + ASSERT(pr->offset == p1->offset && pr->pageId == p1->pageId); + + if (pr->closed) { + ASSERT(isResultRowInterpolated(pr, RESULT_ROW_START_INTERP) && isResultRowInterpolated(pr, RESULT_ROW_END_INTERP)); + tdListPopHead(pResultRowInfo->openWindow); + continue; + } + + STimeWindow w = pr->win; + int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &w, (scanFlag == MAIN_SCAN), &pResult, groupId, pInfo->binfo.pCtx, + numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, pTaskInfo); + if (ret != TSDB_CODE_SUCCESS) { + longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + ASSERT(!isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)); + + SGroupKeys *pTsKey = taosArrayGet(pInfo->pPrevValues, 0); + int64_t prevTs = *(int64_t*) pTsKey->pData; + doTimeWindowInterpolation(pInfo, numOfOutput, pBlock->pDataBlock, prevTs, -1, tsCols[startPos], startPos, + w.ekey, RESULT_ROW_END_INTERP); + + setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); + setNotInterpoWindowKey(pInfo->binfo.pCtx, numOfExprs, RESULT_ROW_START_INTERP); + + doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &w, &pInfo->twAggSup.timeWindowData, startPos, 0, tsCols, + pBlock->info.rows, numOfExprs, pInfo->order); + + if (isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) { + closeResultRow(pr); + tdListPopHead(pResultRowInfo->openWindow); + } else { // the remains are can not be closed yet. + break; + } } +} - int32_t step = 1; - bool ascScan = (pInfo->order == TSDB_ORDER_ASC); +typedef int64_t (*__get_value_fn_t)(void* data, int32_t index); - // int32_t prevIndex = pResultRowInfo->curPos; +int32_t binarySearch(void* keyList, int num, TSKEY key, int order, + __get_value_fn_t getValuefn) { + int firstPos = 0, lastPos = num - 1, midPos = -1; + int numOfRows = 0; - TSKEY* tsCols = NULL; - if (pBlock->pDataBlock != NULL) { - SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex); - tsCols = (int64_t*)pColDataInfo->pData; + if (num <= 0) return -1; + if (order == TSDB_ORDER_DESC) { + // find the first position which is smaller or equal than the key + while (1) { + if (key >= getValuefn(keyList, lastPos)) return lastPos; + if (key == getValuefn(keyList, firstPos)) return firstPos; + if (key < getValuefn(keyList, firstPos)) return firstPos - 1; - if (tsCols != NULL) { - blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex); + numOfRows = lastPos - firstPos + 1; + midPos = (numOfRows >> 1) + firstPos; + + if (key < getValuefn(keyList, midPos)) { + lastPos = midPos - 1; + } else if (key > getValuefn(keyList, midPos)) { + firstPos = midPos + 1; + } else { + break; + } + } + + } else { + // find the first position which is bigger or equal than the key + while (1) { + if (key <= getValuefn(keyList, firstPos)) return firstPos; + if (key == getValuefn(keyList, lastPos)) return lastPos; + + if (key > getValuefn(keyList, lastPos)) { + lastPos = lastPos + 1; + if (lastPos >= num) + return -1; + else + return lastPos; + } + + numOfRows = lastPos - firstPos + 1; + midPos = (numOfRows >> 1) + firstPos; + + if (key < getValuefn(keyList, midPos)) { + lastPos = midPos - 1; + } else if (key > getValuefn(keyList, midPos)) { + firstPos = midPos + 1; + } else { + break; + } } } - int32_t startPos = 0; - TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols, pBlock->info.rows, ascScan); + return midPos; +} + +int64_t getReskey(void* data, int32_t index) { + SArray* res = (SArray*) data; + SResKeyPos* pos = taosArrayGetP(res, index); + return *(int64_t*)pos->key; +} + +static int32_t saveResult(SResultRow* result, uint64_t groupId, SArray* pUpdated) { + int32_t size = taosArrayGetSize(pUpdated); + int32_t index = binarySearch(pUpdated, size, result->win.skey, TSDB_ORDER_DESC, getReskey); + if (index == -1) { + index = 0; + } else { + TSKEY resTs = getReskey(pUpdated, index); + if (resTs < result->win.skey) { + index++; + } else { + return TSDB_CODE_SUCCESS; + } + } + + SResKeyPos* newPos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (newPos == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + newPos->groupId = groupId; + newPos->pos = (SResultRowPosition){.pageId = result->pageId, .offset = result->offset}; + *(int64_t*)newPos->key = result->win.skey; + if (taosArrayInsert(pUpdated, index, &newPos) == NULL ){ + return TSDB_CODE_OUT_OF_MEMORY; + } + return TSDB_CODE_SUCCESS; +} + +static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, + int32_t scanFlag, SArray* pUpdated) { + SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info; + + SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; + + int32_t startPos = 0; + int32_t numOfOutput = pOperatorInfo->numOfExprs; + int64_t *tsCols = extractTsCol(pBlock, pInfo); + uint64_t tableGroupId = pBlock->info.groupId; + bool ascScan = (pInfo->order == TSDB_ORDER_ASC); + TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols); + SResultRow* pResult = NULL; STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->interval.precision, &pInfo->win); - bool masterScan = true; - SResultRow* pResult = NULL; - int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, masterScan, &pResult, tableGroupId, pInfo->binfo.pCtx, + int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { - SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); - pos->groupId = tableGroupId; - pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; - *(int64_t*)pos->key = pResult->win.skey; - - taosArrayPush(pUpdated, &pos); + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE || + pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE_SMA) { + saveResult(pResult, tableGroupId, pUpdated); + } + if (pInfo->twAggSup.winMap) { + taosHashRemove(pInfo->twAggSup.winMap, &win.skey, sizeof(TSKEY)); + } } - int32_t forwardStep = 0; TSKEY ekey = ascScan? win.ekey:win.skey; - forwardStep = - getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->order); - ASSERT(forwardStep > 0); + int32_t forwardRows = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->order); + ASSERT(forwardRows > 0); // prev time window not interpolation yet. - // int32_t curIndex = pResultRowInfo->curPos; - -#if 0 - if (prevIndex != -1 && prevIndex < curIndex && pInfo->timeWindowInterpo) { - for (int32_t j = prevIndex; j < curIndex; ++j) { // previous time window may be all closed already. - SResultRow* pRes = getResultRow(pResultRowInfo, j); - if (pRes->closed) { - assert(resultRowInterpolated(pRes, RESULT_ROW_START_INTERP) && resultRowInterpolated(pRes, RESULT_ROW_END_INTERP)); - continue; - } - - STimeWindow w = pRes->win; - ret = setTimeWindowOutputBuf(pResultRowInfo, pBlock->info.uid, &w, masterScan, &pResult, tableGroupId, - pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, - pTaskInfo); - if (ret != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - assert(!resultRowInterpolated(pResult, RESULT_ROW_END_INTERP)); - doTimeWindowInterpolation(pOperatorInfo, &pInfo->binfo, pBlock->pDataBlock, *(TSKEY*)pInfo->pRow[0], -1, - tsCols[startPos], startPos, w.ekey, RESULT_ROW_END_INTERP); - - setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); - setNotInterpoWindowKey(pInfo->binfo.pCtx, pOperatorInfo->numOfExprs, RESULT_ROW_START_INTERP); - - doApplyFunctions(pInfo->binfo.pCtx, &w, &pInfo->timeWindowData, startPos, 0, tsCols, pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); - } + if (pInfo->timeWindowInterpo) { + SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult); + doInterpUnclosedTimeWindow(pOperatorInfo, numOfOutput, pResultRowInfo, pBlock, scanFlag, tsCols, &pos); // restore current time window - ret = setTimeWindowOutputBuf(pResultRowInfo, pBlock->info.uid, &win, masterScan, &pResult, tableGroupId, - pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, - pTaskInfo); + ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pInfo->binfo.pCtx, + numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS) { longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - } -#endif - // window start key interpolation - doWindowBorderInterpolation(pOperatorInfo, pBlock, pInfo->binfo.pCtx, pResult, &win, startPos, forwardStep, - pInfo->order, false); + // window start key interpolation + doWindowBorderInterpolation(pInfo, pBlock, numOfOutput, pInfo->binfo.pCtx, pResult, &win, startPos, forwardRows); + } updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true); - doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardStep, tsCols, - pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); + doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, + pBlock->info.rows, numOfOutput, pInfo->order); + + doCloseWindow(pResultRowInfo, pInfo, pResult); STimeWindow nextWin = win; while (1) { - int32_t prevEndPos = (forwardStep - 1) * step + startPos; + int32_t prevEndPos = forwardRows - 1 + startPos; startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, pInfo->order); if (startPos < 0) { break; @@ -736,41 +835,77 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // null data, failed to allocate more memory buffer int32_t code = - setTimeWindowOutputBuf(pResultRowInfo, &nextWin, masterScan, &pResult, tableGroupId, pInfo->binfo.pCtx, + setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset, &pInfo->aggSup, pTaskInfo); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { - SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); - pos->groupId = tableGroupId; - pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; - *(int64_t*)pos->key = pResult->win.skey; - taosArrayPush(pUpdated, &pos); + if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE || + pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE_SMA) { + saveResult(pResult, tableGroupId, pUpdated); + } + if (pInfo->twAggSup.winMap) { + taosHashRemove(pInfo->twAggSup.winMap, &win.skey, sizeof(TSKEY)); + } } ekey = ascScan? nextWin.ekey:nextWin.skey; - forwardStep = + forwardRows = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->order); // window start(end) key interpolation - doWindowBorderInterpolation(pOperatorInfo, pBlock, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardStep, - pInfo->order, false); + doWindowBorderInterpolation(pInfo, pBlock, numOfOutput, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardRows); updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true); - doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardStep, tsCols, - pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); + doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, + pBlock->info.rows, numOfOutput, pInfo->order); + doCloseWindow(pResultRowInfo, pInfo, pResult); } if (pInfo->timeWindowInterpo) { - int32_t rowIndex = ascScan ? (pBlock->info.rows - 1) : 0; - saveDataBlockLastRow(pInfo->pRow, pBlock->pDataBlock, rowIndex, pBlock->info.numOfCols); + saveDataBlockLastRow(pInfo->pPrevValues, pBlock, pInfo->pInterpCols); + } +} + +void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult) { + // current result is done in computing final results. + if (pInfo->timeWindowInterpo && isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) { + closeResultRow(pResult); + tdListPopHead(pResultRowInfo->openWindow); } +} - return pUpdated; - // updateResultRowInfoActiveIndex(pResultRowInfo, &pInfo->win, pRuntimeEnv->current->lastKey, true, false); +SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult) { + SResultRowPosition pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; + SListNode* pn = tdListGetTail(pResultRowInfo->openWindow); + if (pn == NULL) { + tdListAppend(pResultRowInfo->openWindow, &pos); + return pos; + } + + SResultRowPosition* px = (SResultRowPosition*)pn->data; + if (px->pageId != pos.pageId || px->offset != pos.offset) { + tdListAppend(pResultRowInfo->openWindow, &pos); + } + + return pos; +} + +int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo) { + TSKEY* tsCols = NULL; + if (pBlock->pDataBlock != NULL) { + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex); + tsCols = (int64_t*)pColDataInfo->pData; + + if (tsCols != NULL) { + blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex); + } + } + + return tsCols; } static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { @@ -799,17 +934,17 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { STableQueryInfo* pTableQueryInfo = pInfo->pCurrent; setIntervalQueryRange(pTableQueryInfo, pBlock->info.window.skey, &pTaskInfo->window); - hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId); + hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag, NULL); #if 0 // test for encode/decode result info - if(pOperator->encodeResultRow){ + if(pOperator->fpSet.encodeResultRow){ char *result = NULL; int32_t length = 0; SAggSupporter *pSup = &pInfo->aggSup; - pOperator->encodeResultRow(pOperator, pSup, &pInfo->binfo, &result, &length); + pOperator->fpSet.encodeResultRow(pOperator, &result, &length); taosHashClear(pSup->pResultRowHashTable); pInfo->binfo.resultRowInfo.size = 0; - pOperator->decodeResultRow(pOperator, pSup, &pInfo->binfo, result, length); + pOperator->fpSet.decodeResultRow(pOperator, result); if(result){ taosMemoryFree(result); } @@ -930,8 +1065,9 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) { } SStateWindowOperatorInfo* pInfo = pOperator->info; - SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - SOptrBasicInfo* pBInfo = &pInfo->binfo; + + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SOptrBasicInfo* pBInfo = &pInfo->binfo; if (pOperator->status == OP_RES_TO_RETURN) { doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); @@ -1040,8 +1176,7 @@ static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type } } -void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, - SOptrBasicInfo* pBinfo, int32_t numOfOutput) { +void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SOptrBasicInfo* pBinfo, int32_t numOfOutput) { SResultRow* pResult = getResultRowByPos(pResultBuf, p1); SqlFunctionCtx* pCtx = pBinfo->pCtx; for (int32_t i = 0; i < numOfOutput; ++i) { @@ -1067,7 +1202,7 @@ void doClearWindow(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, char* pData, } static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, - SInterval* pIntrerval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock, + SInterval* pInterval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock, SArray* pUpWins) { SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex); TSKEY *tsCols = (TSKEY*)pColDataInfo->pData; @@ -1075,8 +1210,8 @@ static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, for (int32_t i = 0; i < pBlock->info.rows; i += step) { SResultRowInfo dumyInfo; dumyInfo.cur.pageId = -1; - STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pIntrerval, - pIntrerval->precision, NULL); + STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pInterval, + pInterval->precision, NULL); step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); doClearWindow(pSup, pBinfo, (char*)&win.skey, sizeof(TKEY), pBlock->info.groupId, numOfOutput); @@ -1086,6 +1221,48 @@ static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, } } +static int32_t closeIntervalWindow(SHashObj *pHashMap, STimeWindowAggSupp *pSup, + SInterval* pInterval, SArray* closeWins) { + void *pIte = NULL; + size_t keyLen = 0; + while((pIte = taosHashIterate(pHashMap, pIte)) != NULL) { + void* key = taosHashGetKey(pIte, &keyLen); + uint64_t groupId = *(uint64_t*) key; + ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))); + TSKEY ts = *(int64_t*) ((char*)key + sizeof(uint64_t)); + SResultRowInfo dumyInfo; + dumyInfo.cur.pageId = -1; + STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, ts, pInterval, + pInterval->precision, NULL); + if (win.ekey < pSup->maxTs - pSup->waterMark) { + if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE_SMA) { + if (taosHashGet(pSup->winMap, &win.skey, sizeof(TSKEY))) { + continue; + } + } + char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))]; + SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId); + if (pSup->calTrigger != STREAM_TRIGGER_AT_ONCE_SMA && + pSup->calTrigger != STREAM_TRIGGER_WINDOW_CLOSE_SMA) { + taosHashRemove(pHashMap, keyBuf, keyLen); + } + SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (pos == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pos->groupId = groupId; + pos->pos = *(SResultRowPosition*) pIte; + *(int64_t*)pos->key = ts; + if (!taosArrayPush(closeWins, &pos)) { + taosMemoryFree(pos); + return TSDB_CODE_OUT_OF_MEMORY; + } + taosHashPut(pSup->winMap, &win.skey, sizeof(TSKEY), NULL, 0); + } + } + return TSDB_CODE_SUCCESS; +} + static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SIntervalAggOperatorInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -1106,14 +1283,16 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SOperatorInfo* downstream = pOperator->pDownstream[0]; - SArray* pUpdated = NULL; + SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); + SArray* pClosed = taosArrayInit(4, POINTER_BYTES); + while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { break; } - // The timewindows that overlaps the timestamps of the input pBlock need to be recalculated and return to the + // The timewindow that overlaps the timestamps of the input pBlock need to be recalculated and return to the // caller. Note that all the time window are not close till now. // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, pInfo->order, MAIN_SCAN, true); @@ -1128,9 +1307,20 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { continue; } - pUpdated = hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, 0); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); + hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdated); + } + + closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, + &pInfo->interval, pClosed); + finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pClosed, + pInfo->binfo.rowCellInfoOffset); + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE || + pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE_SMA) { + taosArrayAddAll(pUpdated, pClosed); } + taosArrayDestroy(pClosed); finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset); initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); @@ -1169,7 +1359,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) { } } -bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) { +static bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) { for (int32_t i = 0; i < numOfCols; i++) { if (!fmIsInvertible(pFCtx[i].functionId)) { return false; @@ -1178,6 +1368,50 @@ bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) { return true; } +static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SIntervalAggOperatorInfo* pInfo) { + // the primary timestamp column + bool needed = false; + pInfo->pInterpCols = taosArrayInit(4, sizeof(SColumn)); + pInfo->pPrevValues = taosArrayInit(4, sizeof(SGroupKeys)); + + { // ts column + SColumn c = {0}; + c.colId = 1; + c.slotId = pInfo->primaryTsIndex; + c.type = TSDB_DATA_TYPE_TIMESTAMP; + c.bytes = sizeof(int64_t); + taosArrayPush(pInfo->pInterpCols, &c); + + SGroupKeys key = {0}; + key.bytes = c.bytes; + key.type = c.type; + key.isNull = true; // to denote no value is assigned yet + key.pData = taosMemoryCalloc(1, c.bytes); + taosArrayPush(pInfo->pPrevValues, &key); + } + + for(int32_t i = 0; i < numOfCols; ++i) { + SExprInfo* pExpr = pCtx[i].pExpr; + + if (strcmp(pExpr->pExpr->_function.functionName, "twa") == 0) { + SFunctParam* pParam = &pExpr->base.pParam[0]; + + SColumn c = *pParam->pCol; + taosArrayPush(pInfo->pInterpCols, &c); + needed = true; + + SGroupKeys key = {0}; + key.bytes = c.bytes; + key.type = c.type; + key.isNull = false; + key.pData = taosMemoryCalloc(1, c.bytes); + taosArrayPush(pInfo->pPrevValues, &key); + } + } + + return needed; +} + SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId, STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { @@ -1187,11 +1421,12 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* goto _error; } - pInfo->order = TSDB_ORDER_ASC; - pInfo->interval = *pInterval; + pInfo->win = pTaskInfo->window; + pInfo->order = TSDB_ORDER_ASC; + pInfo->interval = *pInterval; pInfo->execModel = pTaskInfo->execModel; - pInfo->win = pTaskInfo->window; - pInfo->twAggSup = *pTwAggSupp; + pInfo->twAggSup = *pTwAggSupp; + pInfo->primaryTsIndex = primaryTsSlotId; size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; @@ -1201,23 +1436,30 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str); initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->win); + pInfo->invertible = allInvertible(pInfo->binfo.pCtx, numOfCols); pInfo->invertible = false; // Todo(liuyao): Dependent TSDB API - if (code != TSDB_CODE_SUCCESS) { + pInfo->timeWindowInterpo = timeWindowinterpNeeded(pInfo->binfo.pCtx, numOfCols, pInfo); + if (pInfo->timeWindowInterpo) { + pInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition)); + } + + // pInfo->pTableQueryInfo = initTableQueryInfo(pTableGroupInfo); + if (code != TSDB_CODE_SUCCESS /* || pInfo->pTableQueryInfo == NULL*/) { goto _error; } initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); - pOperator->name = "TimeIntervalAggOperator"; + pOperator->name = "TimeIntervalAggOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_INTERVAL; - pOperator->blocking = true; - pOperator->status = OP_NOT_OPENED; - pOperator->pExpr = pExprInfo; - pOperator->pTaskInfo = pTaskInfo; - pOperator->numOfExprs = numOfCols; - pOperator->info = pInfo; + pOperator->blocking = true; + pOperator->status = OP_NOT_OPENED; + pOperator->pExpr = pExprInfo; + pOperator->pTaskInfo = pTaskInfo; + pOperator->numOfExprs = numOfCols; + pOperator->info = pInfo; pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, doStreamIntervalAgg, NULL, destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); @@ -1676,7 +1918,7 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataB bool ascScan = true; TSKEY* tsCols = NULL; SResultRow* pResult = NULL; - int32_t forwardStep = 0; + int32_t forwardRows = 0; if (pSDataBlock->pDataBlock != NULL) { SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex); @@ -1686,7 +1928,7 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataB } int32_t startPos = ascScan ? 0 : (pSDataBlock->info.rows - 1); - TSKEY ts = getStartTsKey(&pSDataBlock->info.window, tsCols, pSDataBlock->info.rows, ascScan); + TSKEY ts = getStartTsKey(&pSDataBlock->info.window, tsCols); STimeWindow nextWin = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->interval.precision, NULL); while (1) { @@ -1701,15 +1943,15 @@ static SArray* doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataB pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; *(int64_t*)pos->key = pResult->win.skey; taosArrayPush(pUpdated, &pos); - forwardStep = + forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); // window start(end) key interpolation - doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardStep, - pInfo->order, false); + // disable it temporarily +// doWindowBorderInterpolation(pInfo, pSDataBlock, numOfOutput, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardRows); updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true); - doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardStep, tsCols, + doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols, pSDataBlock->info.rows, numOfOutput, TSDB_ORDER_ASC); - int32_t prevEndPos = (forwardStep - 1) * step + startPos; + int32_t prevEndPos = (forwardRows - 1) * step + startPos; startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pSDataBlock->info, tsCols, prevEndPos, pInfo->order); if (startPos < 0) { break; @@ -1935,63 +2177,6 @@ _error: return NULL; } -typedef int64_t (*__get_value_fn_t)(void* data, int32_t index); - -int32_t binarySearch(void* keyList, int num, TSKEY key, int order, - __get_value_fn_t getValuefn) { - int firstPos = 0, lastPos = num - 1, midPos = -1; - int numOfRows = 0; - - if (num <= 0) return -1; - if (order == TSDB_ORDER_DESC) { - // find the first position which is smaller than the key - while (1) { - if (key >= getValuefn(keyList, lastPos)) return lastPos; - if (key == getValuefn(keyList, firstPos)) return firstPos; - if (key < getValuefn(keyList, firstPos)) return firstPos - 1; - - numOfRows = lastPos - firstPos + 1; - midPos = (numOfRows >> 1) + firstPos; - - if (key < getValuefn(keyList, midPos)) { - lastPos = midPos - 1; - } else if (key > getValuefn(keyList, midPos)) { - firstPos = midPos + 1; - } else { - break; - } - } - - } else { - // find the first position which is bigger than the key - while (1) { - if (key <= getValuefn(keyList, firstPos)) return firstPos; - if (key == getValuefn(keyList, lastPos)) return lastPos; - - if (key > getValuefn(keyList, lastPos)) { - lastPos = lastPos + 1; - if (lastPos >= num) - return -1; - else - return lastPos; - } - - numOfRows = lastPos - firstPos + 1; - midPos = (numOfRows >> 1) + firstPos; - - if (key < getValuefn(keyList, midPos)) { - lastPos = midPos - 1; - } else if (key > getValuefn(keyList, midPos)) { - firstPos = midPos + 1; - } else { - break; - } - } - } - - return midPos; -} - int64_t getSessionWindowEndkey(void* data, int32_t index) { SArray* pWinInfos = (SArray*) data; SResultWindowInfo* pWin = taosArrayGet(pWinInfos, index); @@ -2082,7 +2267,7 @@ static int32_t setWindowOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pRes } if (pWinInfo->pos.pageId == -1) { - *pResult = getNewResultRow_rv(pAggSup->pResultBuf, groupId, pAggSup->resultRowSize); + *pResult = getNewResultRow(pAggSup->pResultBuf, groupId, pAggSup->resultRowSize); if (*pResult == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -2217,18 +2402,20 @@ static void doStreamSessionWindowAggImpl(SOperatorInfo* pOperator, longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } // window start(end) key interpolation - // doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardStep, + // doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->binfo.pCtx, pResult, &nextWin, startPos, forwardRows, // pInfo->order, false); int32_t winNum = getNumCompactWindow(pAggSup->pResultRows, winIndex, gap); if (winNum > 0) { compactTimeWindow(pInfo, winIndex, winNum, groupId, numOfOutput, pTaskInfo, pStUpdated, pStDeleted); } - - code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &(pCurWin->win.skey), sizeof(TSKEY)); - if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + pCurWin->isClosed = false; + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { + code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &(pCurWin->win.skey), sizeof(TSKEY)); + if (code != TSDB_CODE_SUCCESS) { + longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } + pCurWin->isOutput = true; } - pCurWin->isOutput = true; i += winRows; } } @@ -2325,6 +2512,37 @@ bool isFinalSession(SStreamSessionAggOperatorInfo* pInfo) { return pInfo->pChildren != NULL; } +int32_t closeSessionWindow(SArray *pWins, STimeWindowAggSupp *pTwSup, SArray *pClosed, + int8_t calTrigger) { + // Todo(liuyao) save window to tdb + int32_t size = taosArrayGetSize(pWins); + for (int32_t i = 0; i < size; i++) { + SResultWindowInfo *pSeWin = taosArrayGet(pWins, i); + if (pSeWin->win.ekey < pTwSup->maxTs - pTwSup->waterMark) { + if (!pSeWin->isClosed) { + SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (pos == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pos->groupId = 0; + pos->pos = pSeWin->pos; + *(int64_t*)pos->key = pSeWin->win.ekey; + if (!taosArrayPush(pClosed, &pos)) { + taosMemoryFree(pos); + return TSDB_CODE_OUT_OF_MEMORY; + } + pSeWin->isClosed = true; + if (calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) { + pSeWin->isOutput = true; + } + } + continue; + } + break; + } + return TSDB_CODE_SUCCESS; +} + static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -2377,13 +2595,21 @@ static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) { doStreamSessionWindowAggImpl(pOperator, pBlock, NULL, NULL); } doStreamSessionWindowAggImpl(pOperator, pBlock, pStUpdated, pInfo->pStDeleted); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); } - // restore the value pOperator->status = OP_RES_TO_RETURN; + + SArray* pClosed = taosArrayInit(16, POINTER_BYTES); + closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pClosed, + pInfo->twAggSup.calTrigger); SArray* pUpdated = taosArrayInit(16, POINTER_BYTES); copyUpdateResult(pStUpdated, pUpdated, pBInfo->pRes->info.groupId); taosHashCleanup(pStUpdated); + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) { + taosArrayAddAll(pUpdated, pClosed); + } + finalizeUpdatedResult(pOperator->numOfExprs, pInfo->streamAggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset); initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt index 7a4cd8092205786065015252432dcb4de0a1db41..ea401e56e5c6585b93344af99280bb450137f98f 100644 --- a/source/libs/function/CMakeLists.txt +++ b/source/libs/function/CMakeLists.txt @@ -14,7 +14,7 @@ target_include_directories( target_link_libraries( function - PRIVATE os util common nodes scalar catalog qcom transport + PRIVATE os util common nodes scalar qcom transport PUBLIC uv_a ) diff --git a/source/libs/function/inc/builtins.h b/source/libs/function/inc/builtins.h index 3bd0f35bf5f8b29cd585ec841363b091b02211c5..bc91875006b0c45162f52505084c9971b17e5429 100644 --- a/source/libs/function/inc/builtins.h +++ b/source/libs/function/inc/builtins.h @@ -26,22 +26,24 @@ typedef int32_t (*FTranslateFunc)(SFunctionNode* pFunc, char* pErrBuf, int32_t l typedef EFuncDataRequired (*FFuncDataRequired)(SFunctionNode* pFunc, STimeWindow* pTimeWindow); typedef struct SBuiltinFuncDefinition { - char name[FUNCTION_NAME_MAX_LENGTH]; - EFunctionType type; - uint64_t classification; - FTranslateFunc translateFunc; - FFuncDataRequired dataRequiredFunc; - FExecGetEnv getEnvFunc; - FExecInit initFunc; - FExecProcess processFunc; + const char* name; + EFunctionType type; + uint64_t classification; + FTranslateFunc translateFunc; + FFuncDataRequired dataRequiredFunc; + FExecGetEnv getEnvFunc; + FExecInit initFunc; + FExecProcess processFunc; FScalarExecProcess sprocessFunc; - FExecFinalize finalizeFunc; - FExecProcess invertFunc; - FExecCombine combineFunc; + FExecFinalize finalizeFunc; + FExecProcess invertFunc; + FExecCombine combineFunc; + const char* pPartialFunc; + const char* pMergeFunc; } SBuiltinFuncDefinition; extern const SBuiltinFuncDefinition funcMgtBuiltins[]; -extern const int funcMgtBuiltinsNum; +extern const int funcMgtBuiltinsNum; #ifdef __cplusplus } diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h index cac86be91752575feba4dcb016eddd84564389d1..68b83f4a1955c72e119dcadd5d409ce10639e5e1 100644 --- a/source/libs/function/inc/builtinsimpl.h +++ b/source/libs/function/inc/builtinsimpl.h @@ -140,6 +140,10 @@ bool uniqueFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo) int32_t uniqueFunction(SqlFunctionCtx *pCtx); //int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +bool getTwaFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); +bool twaFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo); +int32_t twaFunction(SqlFunctionCtx *pCtx); +int32_t twaFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock); bool getSelectivityFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv); diff --git a/source/libs/function/inc/functionMgtInt.h b/source/libs/function/inc/functionMgtInt.h index 21d277665872fc520ecea0fe6157b8338789499b..29dd0bcd90d6297ca539bad8a5c5cd78ff151d1d 100644 --- a/source/libs/function/inc/functionMgtInt.h +++ b/source/libs/function/inc/functionMgtInt.h @@ -44,9 +44,7 @@ extern "C" { #define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0) -#define FUNC_UDF_ID_START 5000 -#define FUNC_AGGREGATE_UDF_ID 5001 -#define FUNC_SCALAR_UDF_ID 5002 +#define FUNC_UDF_ID_START 5000 extern const int funcMgtUdfNum; diff --git a/source/libs/function/inc/taggfunction.h b/source/libs/function/inc/taggfunction.h index d779cf50f4ce019ddcea41b71720347d54a34e96..c3d61d426d889cecda0723b48c6c26eae16316ff 100644 --- a/source/libs/function/inc/taggfunction.h +++ b/source/libs/function/inc/taggfunction.h @@ -52,13 +52,6 @@ typedef struct SInterpInfoDetail { int8_t primaryCol; } SInterpInfoDetail; -typedef struct STwaInfo { - int8_t hasResult; // flag to denote has value - double dOutput; - SPoint1 p; - STimeWindow win; -} STwaInfo; - bool topbot_datablock_filter(SqlFunctionCtx *pCtx, const char *minval, const char *maxval); /** diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index c33d4b9b400fad73c2e6e7de81fc2151a8c689e0..c9c63169c931cc03fdf58d16f551f6abbdc8ba85 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -18,7 +18,6 @@ #include "querynodes.h" #include "scalar.h" #include "taoserror.h" -#include "tdatablock.h" static int32_t buildFuncErrMsg(char* pErrBuf, int32_t len, int32_t errCode, const char* pFormat, ...) { va_list vArgList; @@ -103,6 +102,28 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le return TSDB_CODE_SUCCESS; } +static int32_t translateLogarithm(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (1 != numOfParams && 2 != numOfParams) { + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); + } + + uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + if (!IS_NUMERIC_TYPE(para1Type)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + if (2 == numOfParams) { + uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + if (!IS_NUMERIC_TYPE(para2Type)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + } + + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; + return TSDB_CODE_SUCCESS; +} + static int32_t translateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { if (1 != LIST_LENGTH(pFunc->pParameterList)) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); @@ -156,6 +177,14 @@ static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } + // param0 + SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The first parameter of PERCENTILE function can only be column"); + } + + // param1 SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); if (pValue->datum.i < 0 || pValue->datum.i > 100) { @@ -170,11 +199,12 @@ static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + // set result type pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; return TSDB_CODE_SUCCESS; } -static bool validAperventileAlgo(const SValueNode* pVal) { +static bool validateApercentileAlgo(const SValueNode* pVal) { if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) { return false; } @@ -188,30 +218,47 @@ static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; - if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + // param0 + SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The first parameter of APERCENTILE function can only be column"); } - SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, 1); - if (nodeType(pParamNode) != QUERY_NODE_VALUE) { + // param1 + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); + if (nodeType(pParamNode1) != QUERY_NODE_VALUE) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - SValueNode* pValue = (SValueNode*)pParamNode; + SValueNode* pValue = (SValueNode*)pParamNode1; if (pValue->datum.i < 0 || pValue->datum.i > 100) { return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); } pValue->notReserved = true; + uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + // param2 if (3 == numOfParams) { - SNode* pPara3 = nodesListGetNode(pFunc->pParameterList, 2); - if (QUERY_NODE_VALUE != nodeType(pPara3) || !validAperventileAlgo((SValueNode*)pPara3)) { + uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type; + if (!IS_VAR_DATA_TYPE(para3Type)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SNode* pParamNode2 = nodesListGetNode(pFunc->pParameterList, 2); + if (QUERY_NODE_VALUE != nodeType(pParamNode2) || !validateApercentileAlgo((SValueNode*)pParamNode2)) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "Third parameter algorithm of apercentile must be 'default' or 't-digest'"); } + + pValue = (SValueNode*)pParamNode2; + pValue->notReserved = true; } pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; @@ -237,14 +284,14 @@ static int32_t translateTop(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - //param0 + // param0 SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0); if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "The first parameter of TOP/BOTTOM function can only be column"); } - //param1 + // param1 SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); if (nodeType(pParamNode1) != QUERY_NODE_VALUE) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -261,7 +308,7 @@ static int32_t translateTop(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { pValue->notReserved = true; - //set result type + // set result type SDataType* pType = &((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType; pFunc->node.resType = (SDataType){.bytes = pType->bytes, .type = pType->type}; return TSDB_CODE_SUCCESS; @@ -314,7 +361,7 @@ static int32_t translateElapsed(SFunctionNode* pFunc, char* pErrBuf, int32_t len pValue->notReserved = true; - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; if (!IS_INTEGER_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -408,10 +455,19 @@ static int32_t translateHLL(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { "The input parameter of HYPERLOGLOG function can only be column"); } - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_UBIGINT].bytes, .type = TSDB_DATA_TYPE_UBIGINT}; + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; return TSDB_CODE_SUCCESS; } +static bool validateStateOper(const SValueNode* pVal) { + if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) { + return false; + } + return (0 == strcasecmp(varDataVal(pVal->datum.p), "GT") || 0 == strcasecmp(varDataVal(pVal->datum.p), "GE") || + 0 == strcasecmp(varDataVal(pVal->datum.p), "LT") || 0 == strcasecmp(varDataVal(pVal->datum.p), "LE") || + 0 == strcasecmp(varDataVal(pVal->datum.p), "EQ") || 0 == strcasecmp(varDataVal(pVal->datum.p), "NE")); +} + static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); if (3 != numOfParams) { @@ -419,6 +475,11 @@ static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t } // param0 + SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The input parameter of STATECOUNT function can only be column"); + } uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -433,6 +494,12 @@ static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t SValueNode* pValue = (SValueNode*)pParamNode; + if (i == 1 && !validateStateOper(pValue)) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "Second parameter of STATECOUNT function" + "must be one of the following: 'GE', 'GT', 'LE', 'LT', 'EQ', 'NE'"); + } + pValue->notReserved = true; } @@ -454,6 +521,11 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32 } // param0 + SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The input parameter of STATEDURATION function can only be column"); + } uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -468,6 +540,15 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32 SValueNode* pValue = (SValueNode*)pParamNode; + if (i == 1 && !validateStateOper(pValue)) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "Second parameter of STATEDURATION function" + "must be one of the following: 'GE', 'GT', 'LE', 'LT', 'EQ', 'NE'"); + } else if (i == 3 && pValue->datum.i == 0) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "STATEDURATION function time unit parameter should be greater than db precision"); + } + pValue->notReserved = true; } @@ -489,7 +570,7 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32 static int32_t translateCsum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return TSDB_CODE_SUCCESS; + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); @@ -621,8 +702,10 @@ static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { SValueNode* pValue = (SValueNode*)pParamNode; - if (pValue->datum.i < ((i > 1) ? 0 : 1) || pValue->datum.i > 1000) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); + if (pValue->datum.i < ((i > 1) ? 0 : 1) || pValue->datum.i > 100) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "TAIL function second parameter should be in range [1, 100], " + "third parameter should be in range [0, 100]"); } pValue->notReserved = true; @@ -665,7 +748,7 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return TSDB_CODE_SUCCESS; + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); @@ -678,17 +761,51 @@ static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len) } static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t paraLen = LIST_LENGTH(pFunc->pParameterList); - if (paraLen == 0 || paraLen > 2) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (numOfParams == 0 || numOfParams > 2) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - SExprNode* p1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); - if (!IS_SIGNED_NUMERIC_TYPE(p1->resType.type) && !IS_FLOAT_TYPE(p1->resType.type) && - TSDB_DATA_TYPE_BOOL != p1->resType.type) { + // param0 + SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The first parameter of DIFF function can only be column"); + } + + uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - pFunc->node.resType = p1->resType; + + // param1 + if (numOfParams == 2) { + uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + if (!IS_INTEGER_TYPE(paraType)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); + if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SValueNode* pValue = (SValueNode*)pParamNode1; + if (pValue->datum.i != 0 && pValue->datum.i != 1) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "Second parameter of DIFF function should be only 0 or 1"); + } + + pValue->notReserved = true; + } + + uint8_t resType; + if (IS_SIGNED_NUMERIC_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) { + resType = TSDB_DATA_TYPE_BIGINT; + } else { + resType = TSDB_DATA_TYPE_DOUBLE; + } + pFunc->node.resType = (SDataType){.bytes = tDataTypes[resType].bytes, .type = resType}; return TSDB_CODE_SUCCESS; } @@ -716,11 +833,20 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t int32_t resultBytes = 0; int32_t sepBytes = 0; + // concat_ws separator should be constant string + if (hasSep) { + SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); + if (nodeType(pPara) != QUERY_NODE_VALUE) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The first parameter of CONCAT_WS function can only be constant string"); + } + } + /* For concat/concat_ws function, if params have NCHAR type, promote the final result to NCHAR */ for (int32_t i = 0; i < numOfParams; ++i) { SNode* pPara = nodesListGetNode(pFunc->pParameterList, i); uint8_t paraType = ((SExprNode*)pPara)->resType.type; - if (!IS_VAR_DATA_TYPE(paraType)) { + if (!IS_VAR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } if (TSDB_DATA_TYPE_NCHAR == paraType) { @@ -733,6 +859,12 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t uint8_t paraType = ((SExprNode*)pPara)->resType.type; int32_t paraBytes = ((SExprNode*)pPara)->resType.bytes; int32_t factor = 1; + if (IS_NULL_TYPE(paraType)) { + resultType = TSDB_DATA_TYPE_VARCHAR; + resultBytes = 0; + sepBytes = 0; + break; + } if (TSDB_DATA_TYPE_NCHAR == resultType && TSDB_DATA_TYPE_VARCHAR == paraType) { factor *= TSDB_NCHAR_SIZE; } @@ -779,7 +911,7 @@ static int32_t translateSubstr(SFunctionNode* pFunc, char* pErrBuf, int32_t len) if (3 == numOfParams) { SExprNode* p2 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 2); - uint8_t para2Type = p2->resType.type; + uint8_t para2Type = p2->resType.type; if (!IS_INTEGER_TYPE(para2Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -820,16 +952,107 @@ static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { return TSDB_CODE_SUCCESS; } +/* Following are valid ISO-8601 timezone format: + * 1 z/Z + * 2 ±hh:mm + * 3 ±hhmm + * 4 ±hh + * + */ + +static bool validateTimezoneFormat(const SValueNode* pVal) { + if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) { + return false; + } + + char* tz = varDataVal(pVal->datum.p); + int32_t len = varDataLen(pVal->datum.p); + + if (len == 0) { + return false; + } else if (len == 1 && (tz[0] == 'z' || tz[0] == 'Z')) { + return true; + } else if ((tz[0] == '+' || tz[0] == '-')) { + switch (len) { + case 3: + case 5: { + for (int32_t i = 1; i < len; ++i) { + if (!isdigit(tz[i])) { + return false; + } + } + break; + } + case 6: { + for (int32_t i = 1; i < len; ++i) { + if (i == 3) { + if (tz[i] != ':') { + return false; + } + continue; + } + if (!isdigit(tz[i])) { + return false; + } + } + break; + } + default: { + return false; + } + } + } else { + return false; + } + + return true; +} + +void static addTimezoneParam(SNodeList* pList) { + char buf[6] = {0}; + time_t t = taosTime(NULL); + struct tm* tmInfo = taosLocalTime(&t, NULL); + strftime(buf, sizeof(buf), "%z", tmInfo); + int32_t len = (int32_t)strlen(buf); + + SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + pVal->literal = strndup(buf, len); + pVal->isDuration = false; + pVal->translate = true; + pVal->node.resType.type = TSDB_DATA_TYPE_BINARY; + pVal->node.resType.bytes = len + VARSTR_HEADER_SIZE; + pVal->node.resType.precision = TSDB_TIME_PRECISION_MILLI; + pVal->datum.p = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE + 1); + varDataSetLen(pVal->datum.p, len); + strncpy(varDataVal(pVal->datum.p), pVal->literal, len); + + nodesListAppend(pList, pVal); +} + static int32_t translateToIso8601(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (1 != numOfParams && 2 != numOfParams) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } + // param0 uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; if (!IS_INTEGER_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + // param1 + if (numOfParams == 2) { + SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); + + if (!validateTimezoneFormat(pValue)) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "Invalid timzone format"); + } + } else { // add default client timezone + addTimezoneParam(pFunc->pParameterList); + } + + // set result type pFunc->node.resType = (SDataType){.bytes = 64, .type = TSDB_DATA_TYPE_BINARY}; return TSDB_CODE_SUCCESS; } @@ -920,6 +1143,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .finalizeFunc = functionFinalize, .invertFunc = countInvertFunction, .combineFunc = combineFunction, + // .pPartialFunc = "count", + // .pMergeFunc = "sum" }, { .name = "sum", @@ -1087,6 +1312,16 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .finalizeFunc = firstLastFinalize, .combineFunc = lastCombine, }, + { + .name = "twa", + .type = FUNCTION_TYPE_TWA, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC, + .translateFunc = translateInNumOutDou, + .getEnvFunc = getTwaFuncEnv, + .initFunc = twaFunctionSetup, + .processFunc = twaFunction, + .finalizeFunc = twaFinalize + }, { .name = "histogram", .type = FUNCTION_TYPE_HISTOGRAM, @@ -1118,7 +1353,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .finalizeFunc = functionFinalize }, { - .name = "state_count", + .name = "statecount", .type = FUNCTION_TYPE_STATE_COUNT, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC, .translateFunc = translateStateCount, @@ -1128,7 +1363,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .finalizeFunc = NULL }, { - .name = "state_duration", + .name = "stateduration", .type = FUNCTION_TYPE_STATE_DURATION, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC, .translateFunc = translateStateDuration, @@ -1201,7 +1436,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "log", .type = FUNCTION_TYPE_LOG, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateIn2NumOutDou, + .translateFunc = translateLogarithm, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = logFunction, diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 3b453a8b1f19f5ab02e5f20489cf2281db9ec44b..be18150234c5b1d7dc4064dfb561900290e3722b 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -14,6 +14,7 @@ */ #include "builtinsimpl.h" +#include "tglobal.h" #include "cJSON.h" #include "function.h" #include "querynodes.h" @@ -225,6 +226,7 @@ typedef struct SUniqueInfo { int32_t numOfPoints; uint8_t colType; int16_t colBytes; + bool hasNull; //null is not hashable, handle separately SHashObj *pHash; char pItems[]; } SUniqueInfo; @@ -299,7 +301,7 @@ int32_t functionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); - //pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0; + pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0; char* in = GET_ROWCELL_INTERBUF(pResInfo); colDataAppend(pCol, pBlock->info.rows, in, pResInfo->isNullRes); @@ -356,7 +358,7 @@ bool getCountFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) { return true; } -static FORCE_INLINE int32_t getNumofElem(SqlFunctionCtx* pCtx) { +static FORCE_INLINE int32_t getNumOfElems(SqlFunctionCtx* pCtx) { int32_t numOfElem = 0; /* @@ -391,11 +393,12 @@ static FORCE_INLINE int32_t getNumofElem(SqlFunctionCtx* pCtx) { * count function does not use the pCtx->interResBuf to keep the intermediate buffer */ int32_t countFunction(SqlFunctionCtx* pCtx) { - int32_t numOfElem = getNumofElem(pCtx); - SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); + int32_t numOfElem = getNumOfElems(pCtx); + SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); SInputColumnInfoData* pInput = &pCtx->input; - int32_t type = pInput->pData[0]->info.type; + + int32_t type = pInput->pData[0]->info.type; char* buf = GET_ROWCELL_INTERBUF(pResInfo); if (IS_NULL_TYPE(type)) { @@ -406,12 +409,17 @@ int32_t countFunction(SqlFunctionCtx* pCtx) { *((int64_t*)buf) += numOfElem; } - SET_VAL(pResInfo, numOfElem, 1); + if (tsCountAlwaysReturnValue) { + pResInfo->numOfRes = 1; + } else { + SET_VAL(pResInfo, 1, 1); + } + return TSDB_CODE_SUCCESS; } int32_t countInvertFunction(SqlFunctionCtx* pCtx) { - int32_t numOfElem = getNumofElem(pCtx); + int32_t numOfElem = getNumOfElems(pCtx); SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); char* buf = GET_ROWCELL_INTERBUF(pResInfo); @@ -828,17 +836,21 @@ int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SInputColumnInfoData* pInput = &pCtx->input; - int32_t type = pInput->pData[0]->info.type; - SAvgRes* pAvgRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + + int32_t type = pInput->pData[0]->info.type; + SAvgRes* pAvgRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + if (IS_INTEGER_TYPE(type)) { pAvgRes->result = pAvgRes->sum.isum / ((double)pAvgRes->count); } else { - if (isinf(pAvgRes->sum.dsum) || isnan(pAvgRes->sum.dsum)) { - GET_RES_INFO(pCtx)->isNullRes = 1; - } pAvgRes->result = pAvgRes->sum.dsum / ((double)pAvgRes->count); } + //check for overflow + if (isinf(pAvgRes->result) || isnan(pAvgRes->result)) { + GET_RES_INFO(pCtx)->isNullRes = 1; + } + return functionFinalize(pCtx, pBlock); } @@ -1643,8 +1655,8 @@ bool leastSQRFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInf pInfo->startVal = IS_FLOAT_TYPE(pCtx->param[1].param.nType) ? pCtx->param[1].param.d : (double)pCtx->param[1].param.i; - pInfo->stepVal = IS_FLOAT_TYPE(pCtx->param[1].param.nType) ? pCtx->param[2].param.d : - (double)pCtx->param[1].param.i; + pInfo->stepVal = IS_FLOAT_TYPE(pCtx->param[2].param.nType) ? pCtx->param[2].param.d : + (double)pCtx->param[2].param.i; return true; } @@ -1751,6 +1763,11 @@ int32_t leastSQRFunction(SqlFunctionCtx* pCtx) { } break; } + case TSDB_DATA_TYPE_NULL: { + GET_RES_INFO(pCtx)->isNullRes = 1; + numOfElem = 1; + break; + } default: break; @@ -1794,7 +1811,7 @@ int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { size_t len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%.6lf, intercept:%.6lf}", param[0][2], param[1][2]); varDataSetLen(buf, len); - colDataAppend(pCol, currentRow, buf, false); + colDataAppend(pCol, currentRow, buf, pResInfo->isNullRes); return pResInfo->numOfRes; } @@ -1824,7 +1841,7 @@ bool percentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultI } int32_t percentileFunction(SqlFunctionCtx* pCtx) { - int32_t notNullElems = 0; + int32_t numOfElems = 0; SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); SInputColumnInfoData* pInput = &pCtx->input; @@ -1902,11 +1919,11 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) { } char* data = colDataGetData(pCol, i); - notNullElems += 1; + numOfElems += 1; tMemBucketPut(pInfo->pMemBucket, data, 1); } - SET_VAL(pResInfo, notNullElems, 1); + SET_VAL(pResInfo, numOfElems, 1); } return TSDB_CODE_SUCCESS; @@ -1962,7 +1979,7 @@ bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResult if (pCtx->numOfParams == 2) { pInfo->algo = APERCT_ALGO_DEFAULT; } else if (pCtx->numOfParams == 3) { - pInfo->algo = getApercentileAlgo(pCtx->param[2].param.pz); + pInfo->algo = getApercentileAlgo(varDataVal(pCtx->param[2].param.pz)); if (pInfo->algo == APERCT_ALGO_UNKNOWN) { return false; } @@ -1980,7 +1997,7 @@ bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResult } int32_t apercentileFunction(SqlFunctionCtx* pCtx) { - int32_t notNullElems = 0; + int32_t numOfElems = 0; SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); SInputColumnInfoData* pInput = &pCtx->input; @@ -1997,7 +2014,7 @@ int32_t apercentileFunction(SqlFunctionCtx* pCtx) { if (colDataIsNull_f(pCol->nullbitmap, i)) { continue; } - notNullElems += 1; + numOfElems += 1; char* data = colDataGetData(pCol, i); double v = 0; // value @@ -2010,7 +2027,7 @@ int32_t apercentileFunction(SqlFunctionCtx* pCtx) { if (colDataIsNull_f(pCol->nullbitmap, i)) { continue; } - notNullElems += 1; + numOfElems += 1; char* data = colDataGetData(pCol, i); double v = 0; @@ -2019,7 +2036,7 @@ int32_t apercentileFunction(SqlFunctionCtx* pCtx) { } } - SET_VAL(pResInfo, notNullElems, 1); + SET_VAL(pResInfo, numOfElems, 1); return TSDB_CODE_SUCCESS; } @@ -2298,15 +2315,15 @@ static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) { } static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SColumnInfoData* pOutput, int32_t pos, int32_t order) { - int32_t factor = (order == TSDB_ORDER_ASC)? 1:-1; + int32_t factor = (order == TSDB_ORDER_ASC)? 1:-1; switch (type) { case TSDB_DATA_TYPE_INT: { int32_t v = *(int32_t*)pv; - int32_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null + int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { colDataSetNull_f(pOutput->nullbitmap, pos); } else { - colDataAppendInt32(pOutput, pos, &delta); + colDataAppendInt64(pOutput, pos, &delta); } pDiffInfo->prev.i64 = v; break; @@ -2314,22 +2331,22 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: { int8_t v = *(int8_t*)pv; - int8_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null + int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { colDataSetNull_f(pOutput->nullbitmap, pos); } else { - colDataAppendInt8(pOutput, pos, &delta); + colDataAppendInt64(pOutput, pos, &delta); } pDiffInfo->prev.i64 = v; break; } case TSDB_DATA_TYPE_SMALLINT: { int16_t v = *(int16_t*)pv; - int16_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null + int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { colDataSetNull_f(pOutput->nullbitmap, pos); } else { - colDataAppendInt16(pOutput, pos, &delta); + colDataAppendInt64(pOutput, pos, &delta); } pDiffInfo->prev.i64 = v; break; @@ -2347,11 +2364,11 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo } case TSDB_DATA_TYPE_FLOAT: { float v = *(float*)pv; - float delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null - if (delta < 0 && pDiffInfo->ignoreNegative) { + double delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null + if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { //check for overflow colDataSetNull_f(pOutput->nullbitmap, pos); } else { - colDataAppendFloat(pOutput, pos, &delta); + colDataAppendDouble(pOutput, pos, &delta); } pDiffInfo->prev.d64 = v; break; @@ -2359,7 +2376,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo case TSDB_DATA_TYPE_DOUBLE: { double v = *(double*)pv; double delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null - if (delta < 0 && pDiffInfo->ignoreNegative) { + if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { //check for overflow colDataSetNull_f(pOutput->nullbitmap, pos); } else { colDataAppendDouble(pOutput, pos, &delta); @@ -3232,13 +3249,13 @@ static uint64_t hllCountCnt(uint8_t *buckets) { z += buckethisto[j]; z *= 0.5; } + z += m * hllSigma(buckethisto[0]/(double)m); double E = (double)llroundl(HLL_ALPHA_INF*m*m/z); return (uint64_t) E; } - int32_t hllFunction(SqlFunctionCtx *pCtx) { SHLLInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); @@ -3271,7 +3288,6 @@ int32_t hllFunction(SqlFunctionCtx *pCtx) { if (count > oldcount) { pInfo->buckets[index] = count; } - } SET_VAL(GET_RES_INFO(pCtx), numOfElems, 1); @@ -3279,9 +3295,13 @@ int32_t hllFunction(SqlFunctionCtx *pCtx) { } int32_t hllFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { - SHLLInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + SResultRowEntryInfo *pInfo = GET_RES_INFO(pCtx); - pInfo->result = hllCountCnt(pInfo->buckets); + SHLLInfo* pHllInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + pHllInfo->result = hllCountCnt(pHllInfo->buckets); + if (tsCountAlwaysReturnValue && pHllInfo->result == 0) { + pInfo->numOfRes = 1; + } return functionFinalize(pCtx, pBlock); } @@ -3529,7 +3549,12 @@ int32_t csumFunction(SqlFunctionCtx* pCtx) { double v; GET_TYPED_DATA(v, double, type, data); pSumRes->dsum += v; - colDataAppend(pOutput, pos, (char *)&pSumRes->dsum, false); + //check for overflow + if (isinf(pSumRes->dsum) || isnan(pSumRes->dsum)) { + colDataAppendNULL(pOutput, pos); + } else { + colDataAppend(pOutput, pos, (char *)&pSumRes->dsum, false); + } } //TODO: remove this after pTsOutput is handled @@ -3603,7 +3628,12 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx) { pInfo->points[pInfo->pos] = v; double result = pInfo->sum / pInfo->numOfPoints; - colDataAppend(pOutput, pos, (char *)&result, false); + //check for overflow + if (isinf(result) || isnan(result)) { + colDataAppendNULL(pOutput, pos); + } else { + colDataAppend(pOutput, pos, (char *)&result, false); + } //TODO: remove this after pTsOutput is handled if (pTsOutput != NULL) { @@ -3677,7 +3707,6 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) { TSKEY* tsList = (int64_t*)pInput->pPTS->pData; SColumnInfoData* pInputCol = pInput->pData[0]; - SColumnInfoData* pTsOutput = pCtx->pTsOutput; SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput; int32_t startOffset = pCtx->offset; @@ -3700,24 +3729,6 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) { return pInfo->numSampled; } -//int32_t sampleFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { -// SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); -// SSampleInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); -// int32_t slotId = pCtx->pExpr->base.resSchema.slotId; -// SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); -// -// //int32_t currentRow = pBlock->info.rows; -// pResInfo->numOfRes = pInfo->numSampled; -// -// for (int32_t i = 0; i < pInfo->numSampled; ++i) { -// colDataAppend(pCol, i, pInfo->data + i * pInfo->colBytes, false); -// //TODO: handle ts output -// } -// -// return pResInfo->numOfRes; -//} - - bool getTailFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { SColumnNode* pCol = (SColumnNode*)nodesListGetNode(pFunc->pParameterList, 0); SValueNode* pVal = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); @@ -3763,6 +3774,7 @@ static void tailAssignResult(STailItem* pItem, char *data, int32_t colBytes, TSK if (isNull) { pItem->isNull = true; } else { + pItem->isNull = false; memcpy(pItem->data, data, colBytes); } } @@ -3793,7 +3805,6 @@ int32_t tailFunction(SqlFunctionCtx* pCtx) { TSKEY* tsList = (int64_t*)pInput->pPTS->pData; SColumnInfoData* pInputCol = pInput->pData[0]; - SColumnInfoData* pTsOutput = pCtx->pTsOutput; SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput; int32_t startOffset = pCtx->offset; @@ -3869,8 +3880,22 @@ bool uniqueFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) { } static void doUniqueAdd(SUniqueInfo* pInfo, char *data, TSKEY ts, bool isNull) { - int32_t hashKeyBytes = IS_VAR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes; + //handle null elements + if (isNull == true) { + int32_t size = sizeof(SUniqueItem) + pInfo->colBytes; + SUniqueItem *pItem = (SUniqueItem *)(pInfo->pItems + pInfo->numOfPoints * size); + if (pInfo->hasNull == false && pItem->isNull == false) { + pItem->timestamp = ts; + pItem->isNull = true; + pInfo->numOfPoints++; + pInfo->hasNull = true; + } else if (pItem->timestamp > ts && pItem->isNull == true) { + pItem->timestamp = ts; + } + return; + } + int32_t hashKeyBytes = IS_VAR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes; SUniqueItem *pHashItem = taosHashGet(pInfo->pHash, data, hashKeyBytes); if (pHashItem == NULL) { int32_t size = sizeof(SUniqueItem) + pInfo->colBytes; @@ -3883,7 +3908,6 @@ static void doUniqueAdd(SUniqueInfo* pInfo, char *data, TSKEY ts, bool isNull) { } else if (pHashItem->timestamp > ts) { pHashItem->timestamp = ts; } - } int32_t uniqueFunction(SqlFunctionCtx* pCtx) { @@ -3910,7 +3934,11 @@ int32_t uniqueFunction(SqlFunctionCtx* pCtx) { for (int32_t i = 0; i < pInfo->numOfPoints; ++i) { SUniqueItem *pItem = (SUniqueItem *)(pInfo->pItems + i * (sizeof(SUniqueItem) + pInfo->colBytes)); - colDataAppend(pOutput, i, pItem->data, false); + if (pItem->isNull == true) { + colDataAppendNULL(pOutput, i); + } else { + colDataAppend(pOutput, i, pItem->data, false); + } if (pTsOutput != NULL) { colDataAppendInt64(pTsOutput, i, &pItem->timestamp); } @@ -3921,7 +3949,7 @@ int32_t uniqueFunction(SqlFunctionCtx* pCtx) { int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); - SUniqueInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + SUniqueInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo); int32_t slotId = pCtx->pExpr->base.resSchema.slotId; SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); @@ -3934,3 +3962,260 @@ int32_t uniqueFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { return pResInfo->numOfRes; } +typedef struct STwaInfo { + double dOutput; + SPoint1 p; + STimeWindow win; +} STwaInfo; + +bool getTwaFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv) { + pEnv->calcMemSize = sizeof(STwaInfo); + return true; +} + +bool twaFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo) { + if (!functionSetup(pCtx, pResultInfo)) { + return false; + } + + STwaInfo *pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + pInfo->p.key = INT64_MIN; + pInfo->win = TSWINDOW_INITIALIZER; + return true; +} + +static double twa_get_area(SPoint1 s, SPoint1 e) { + if ((s.val >= 0 && e.val >= 0)|| (s.val <=0 && e.val <= 0)) { + return (s.val + e.val) * (e.key - s.key) / 2; + } + + double x = (s.key * e.val - e.key * s.val)/(e.val - s.val); + double val = (s.val * (x - s.key) + e.val * (e.key - x)) / 2; + return val; +} + +#define INIT_INTP_POINT(_p, _k, _v) \ + do { \ + (_p).key = (_k); \ + (_p).val = (_v); \ + } while (0) + +int32_t twaFunction(SqlFunctionCtx* pCtx) { + SInputColumnInfoData* pInput = &pCtx->input; + SColumnInfoData* pInputCol = pInput->pData[0]; + + TSKEY* tsList = (int64_t*)pInput->pPTS->pData; + + SResultRowEntryInfo *pResInfo = GET_RES_INFO(pCtx); + + STwaInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); + SPoint1 *last = &pInfo->p; + int32_t numOfElems = 0; + + int32_t i = pInput->startRowIndex; + if (pCtx->start.key != INT64_MIN) { + ASSERT((pCtx->start.key < tsList[i] && pCtx->order == TSDB_ORDER_ASC) || + (pCtx->start.key > tsList[i] && pCtx->order == TSDB_ORDER_DESC)); + + ASSERT(last->key == INT64_MIN); + last->key = tsList[i]; + + GET_TYPED_DATA(last->val, double, pInputCol->info.type, colDataGetData(pInputCol, i)); + + pInfo->dOutput += twa_get_area(pCtx->start, *last); + pInfo->win.skey = pCtx->start.key; + numOfElems++; + i += 1; + } else if (pInfo->p.key == INT64_MIN) { + last->key = tsList[i]; + GET_TYPED_DATA(last->val, double, pInputCol->info.type, colDataGetData(pInputCol, i)); + + pInfo->win.skey = last->key; + numOfElems++; + i += 1; + } + + SPoint1 st = {0}; + + // calculate the value of + switch(pInputCol->info.type) { + case TSDB_DATA_TYPE_TINYINT: { + int8_t *val = (int8_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + + case TSDB_DATA_TYPE_SMALLINT: { + int16_t *val = (int16_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_INT: { + int32_t *val = (int32_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_BIGINT: { + int64_t *val = (int64_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_FLOAT: { + float *val = (float*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + double *val = (double*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + uint8_t *val = (uint8_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + uint16_t *val = (uint16_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_UINT: { + uint32_t *val = (uint32_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + uint64_t *val = (uint64_t*) colDataGetData(pInputCol, 0); + for (; i < pInput->numOfRows + pInput->startRowIndex; i += 1) { + if (colDataIsNull_f(pInputCol->nullbitmap, i)) { + continue; + } + + INIT_INTP_POINT(st, tsList[i], val[i]); + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; + } + break; + } + + default: ASSERT(0); + } + + // the last interpolated time window value + if (pCtx->end.key != INT64_MIN) { + pInfo->dOutput += twa_get_area(pInfo->p, pCtx->end); + pInfo->p = pCtx->end; + } + + pInfo->win.ekey = pInfo->p.key; + + SET_VAL(pResInfo, numOfElems, 1); + return TSDB_CODE_SUCCESS; +} + +/* + * To copy the input to interResBuf to avoid the input buffer space be over writen + * by next input data. The TWA function only applies to each table, so no merge procedure + * is required, we simply copy to the resut ot interResBuffer. + */ +//void twa_function_copy(SQLFunctionCtx *pCtx) { +// assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); +// SResultRowEntryInfo *pResInfo = GET_RES_INFO(pCtx); +// +// memcpy(GET_ROWCELL_INTERBUF(pResInfo), pCtx->pInput, (size_t)pCtx->inputBytes); +// pResInfo->hasResult = ((STwaInfo *)pCtx->pInput)->hasResult; +//} + +int32_t twaFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) { + SResultRowEntryInfo *pResInfo = GET_RES_INFO(pCtx); + + STwaInfo *pInfo = (STwaInfo *)GET_ROWCELL_INTERBUF(pResInfo); + if (pResInfo->numOfRes == 0) { + pResInfo->isNullRes = 1; + } else { + // assert(pInfo->win.ekey == pInfo->p.key && pInfo->hasResult == pResInfo->hasResult); + if (pInfo->win.ekey == pInfo->win.skey) { + pInfo->dOutput = pInfo->p.val; + } else { + pInfo->dOutput = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey); + } + + pResInfo->numOfRes = 1; + } + + return functionFinalize(pCtx, pBlock); +} + diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index 506b0eb8da98444491b2f86f0e9951b71193de75..611ae8d81fdc681c28936456b5b46c0a7e09d4c0 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -16,7 +16,6 @@ #include "functionMgt.h" #include "builtins.h" -#include "catalog.h" #include "functionMgtInt.h" #include "taos.h" #include "taoserror.h" @@ -65,35 +64,19 @@ static bool isSpecificClassifyFunc(int32_t funcId, uint64_t classification) { return FUNC_MGT_TEST_MASK(funcMgtBuiltins[funcId].classification, classification); } -static int32_t getUdfInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc) { - SFuncInfo funcInfo = {0}; - int32_t code = catalogGetUdfInfo(pParam->pCtg, pParam->pRpc, pParam->pMgmtEps, pFunc->functionName, &funcInfo); - if (TSDB_CODE_SUCCESS != code) { - return code; - } - - pFunc->funcType = FUNCTION_TYPE_UDF; - pFunc->funcId = TSDB_FUNC_TYPE_AGGREGATE == funcInfo.funcType ? FUNC_AGGREGATE_UDF_ID : FUNC_SCALAR_UDF_ID; - pFunc->node.resType.type = funcInfo.outputType; - pFunc->node.resType.bytes = funcInfo.outputLen; - pFunc->udfBufSize = funcInfo.bufSize; - tFreeSFuncInfo(&funcInfo); - return TSDB_CODE_SUCCESS; -} - int32_t fmFuncMgtInit() { taosThreadOnce(&functionHashTableInit, doInitFunctionTable); return initFunctionCode; } -int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc) { +int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen) { void* pVal = taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc->functionName, strlen(pFunc->functionName)); if (NULL != pVal) { pFunc->funcId = *(int32_t*)pVal; pFunc->funcType = funcMgtBuiltins[pFunc->funcId].type; - return funcMgtBuiltins[pFunc->funcId].translateFunc(pFunc, pParam->pErrBuf, pParam->errBufLen); + return funcMgtBuiltins[pFunc->funcId].translateFunc(pFunc, pMsg, msgLen); } - return getUdfInfo(pParam, pFunc); + return TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION; } bool fmIsBuiltinFunc(const char* pFunc) { @@ -216,3 +199,81 @@ bool fmIsInvertible(int32_t funcId) { } return res; } + +static SFunctionNode* createFunction(const char* pName, SNodeList* pParameterList) { + SFunctionNode* pFunc = nodesMakeNode(QUERY_NODE_FUNCTION); + if (NULL == pFunc) { + return NULL; + } + strcpy(pFunc->functionName, pName); + pFunc->pParameterList = pParameterList; + char msg[64] = {0}; + if (TSDB_CODE_SUCCESS != fmGetFuncInfo(pFunc, msg, sizeof(msg))) { + nodesDestroyNode(pFunc); + return NULL; + } + return pFunc; +} + +static SColumnNode* createColumnByFunc(const SFunctionNode* pFunc) { + SColumnNode* pCol = nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == pCol) { + return NULL; + } + strcpy(pCol->colName, pFunc->node.aliasName); + pCol->node.resType = pFunc->node.resType; + return pCol; +} + +bool fmIsDistExecFunc(int32_t funcId) { + if (!fmIsVectorFunc(funcId)) { + return true; + } + return (NULL != funcMgtBuiltins[funcId].pPartialFunc && NULL != funcMgtBuiltins[funcId].pMergeFunc); +} + +static int32_t createPartialFunction(const SFunctionNode* pSrcFunc, SFunctionNode** pPartialFunc) { + SNodeList* pParameterList = nodesCloneList(pSrcFunc->pParameterList); + if (NULL == pParameterList) { + return TSDB_CODE_OUT_OF_MEMORY; + } + *pPartialFunc = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pPartialFunc, pParameterList); + if (NULL == *pPartialFunc) { + nodesDestroyList(pParameterList); + return TSDB_CODE_OUT_OF_MEMORY; + } + snprintf((*pPartialFunc)->node.aliasName, sizeof((*pPartialFunc)->node.aliasName), "%s.%p", + (*pPartialFunc)->functionName, pSrcFunc); + return TSDB_CODE_SUCCESS; +} + +static int32_t createMergeFunction(const SFunctionNode* pSrcFunc, const SFunctionNode* pPartialFunc, + SFunctionNode** pMergeFunc) { + SNodeList* pParameterList = NULL; + nodesListMakeStrictAppend(&pParameterList, createColumnByFunc(pPartialFunc)); + *pMergeFunc = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pParameterList); + if (NULL == *pMergeFunc) { + nodesDestroyList(pParameterList); + return TSDB_CODE_OUT_OF_MEMORY; + } + strcpy((*pMergeFunc)->node.aliasName, pSrcFunc->node.aliasName); + return TSDB_CODE_SUCCESS; +} + +int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc) { + if (!fmIsDistExecFunc(pFunc->funcId)) { + return TSDB_CODE_FAILED; + } + + int32_t code = createPartialFunction(pFunc, pPartialFunc); + if (TSDB_CODE_SUCCESS == code) { + code = createMergeFunction(pFunc, *pPartialFunc, pMergeFunc); + } + + if (TSDB_CODE_SUCCESS != code) { + nodesDestroyNode(*pPartialFunc); + nodesDestroyNode(*pMergeFunc); + } + + return code; +} diff --git a/source/libs/function/src/taggfunction.c b/source/libs/function/src/taggfunction.c index 950655e480b2b3413f26bc56d4771461b0dc4277..e683a38cbd1fd97ac7ba081a65f2af8ac18b8fee 100644 --- a/source/libs/function/src/taggfunction.c +++ b/source/libs/function/src/taggfunction.c @@ -236,7 +236,7 @@ bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry) { bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry) { return pEntry->initialized; } - +#if 0 int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, SResultDataInfo* pInfo, int16_t extLength, bool isSuperTable/*, SUdfInfo* pUdfInfo*/) { if (!isValidDataType(dataType)) { @@ -470,6 +470,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI return TSDB_CODE_SUCCESS; } +#endif static bool function_setup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo) { if (pResultInfo->initialized) { diff --git a/source/libs/function/src/texpr.c b/source/libs/function/src/texpr.c index b91af2d1577fc994ccaa6b11b8e9044ffb88b594..703b19ced7e1abeee312a414aafe6b34b936c271 100644 --- a/source/libs/function/src/texpr.c +++ b/source/libs/function/src/texpr.c @@ -36,12 +36,7 @@ void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)) { if (pNode->nodeType == TEXPR_BINARYEXPR_NODE || pNode->nodeType == TEXPR_UNARYEXPR_NODE) { doExprTreeDestroy(&pNode, fp); - } else if (pNode->nodeType == TEXPR_VALUE_NODE) { - taosVariantDestroy(pNode->pVal); - } else if (pNode->nodeType == TEXPR_COL_NODE) { - taosMemoryFreeClear(pNode->pSchema); } - taosMemoryFree(pNode); } @@ -49,15 +44,6 @@ static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) { if (*pExpr == NULL) { return; } - - int32_t type = (*pExpr)->nodeType; - if (type == TEXPR_VALUE_NODE) { - taosVariantDestroy((*pExpr)->pVal); - taosMemoryFree((*pExpr)->pVal); - } else if (type == TEXPR_COL_NODE) { - taosMemoryFree((*pExpr)->pSchema); - } - taosMemoryFree(*pExpr); *pExpr = NULL; } diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 441648e52b2ef78326d73d1944bcfbfd0009abc6..472d67260730ca10522ee0d07fc1d608b132688e 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -66,7 +66,7 @@ void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int termSignal) { } static int32_t udfSpawnUdfd(SUdfdData* pData) { - fnInfo("dnode start spawning udfd"); + fnInfo("start to init udfd"); uv_process_options_t options = {0}; char path[PATH_MAX] = {0}; @@ -140,6 +140,8 @@ static int32_t udfSpawnUdfd(SUdfdData* pData) { if (err != 0) { fnError("can not spawn udfd. path: %s, error: %s", path, uv_strerror(err)); + } else { + fnInfo("udfd is initialized"); } return err; } diff --git a/source/libs/index/CMakeLists.txt b/source/libs/index/CMakeLists.txt index e55b004972d841a2049dc0474dbf3343b1cc300a..75eac2430f70c7a4cfc215eee5515a392d1bcd40 100644 --- a/source/libs/index/CMakeLists.txt +++ b/source/libs/index/CMakeLists.txt @@ -12,6 +12,7 @@ target_link_libraries( PUBLIC os PUBLIC util PUBLIC common + PUBLIC vnode PUBLIC nodes PUBLIC scalar PUBLIC function diff --git a/source/libs/index/inc/indexCache.h b/source/libs/index/inc/indexCache.h index aff2e0e836c0f2aae9a1fe63dd984cd4f5eb7850..6e68163d74677ad0b7c9df944b73d2ebe602d93a 100644 --- a/source/libs/index/inc/indexCache.h +++ b/source/libs/index/inc/indexCache.h @@ -36,9 +36,10 @@ typedef struct MemTable { typedef struct IndexCache { T_REF_DECLARE() MemTable *mem, *imm; + int32_t merging; SIndex* index; char* colName; - int32_t version; + int64_t version; int64_t occupiedMem; int8_t type; uint64_t suid; @@ -47,12 +48,12 @@ typedef struct IndexCache { TdThreadCond finished; } IndexCache; -#define CACHE_VERSION(cache) atomic_load_32(&cache->version) +#define CACHE_VERSION(cache) atomic_load_64(&cache->version) typedef struct CacheTerm { // key char* colVal; - int32_t version; + int64_t version; // value uint64_t uid; int8_t colType; @@ -74,7 +75,7 @@ void indexCacheIteratorDestroy(Iterate* iiter); int indexCachePut(void* cache, SIndexTerm* term, uint64_t uid); // int indexCacheGet(void *cache, uint64_t *rst); -int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* tr, STermValueType* s); +int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* tr, STermValueType* s); void indexCacheRef(IndexCache* cache); void indexCacheUnRef(IndexCache* cache); diff --git a/source/libs/index/inc/indexComm.h b/source/libs/index/inc/indexComm.h index 3066fd1c2c57481cc80a6b19a7dc2de1a9b4d6cc..c338300b57d1c5d2d570130f596303503ee30187 100644 --- a/source/libs/index/inc/indexComm.h +++ b/source/libs/index/inc/indexComm.h @@ -33,8 +33,9 @@ typedef enum { MATCH, CONTINUE, BREAK } TExeCond; typedef TExeCond (*_cache_range_compare)(void* a, void* b, int8_t type); -TExeCond tCompare(__compar_fn_t func, int8_t cmpType, void* a, void* b, int8_t dType); -TExeCond tDoCompare(__compar_fn_t func, int8_t cmpType, void* a, void* b); +__compar_fn_t indexGetCompar(int8_t type); +TExeCond tCompare(__compar_fn_t func, int8_t cmpType, void* a, void* b, int8_t dType); +TExeCond tDoCompare(__compar_fn_t func, int8_t cmpType, void* a, void* b); _cache_range_compare indexGetCompare(RangeType ty); diff --git a/source/libs/index/inc/indexInt.h b/source/libs/index/inc/indexInt.h index 0bdcb131b69befd518b233e38a2653a17e67bde8..24a4e99970692b202ab36fd1d1a83a45a09bcaa4 100644 --- a/source/libs/index/inc/indexInt.h +++ b/source/libs/index/inc/indexInt.h @@ -34,6 +34,15 @@ extern "C" { #endif +// clang-format off +#define indexFatal(...) do { if (idxDebugFlag & DEBUG_FATAL) { taosPrintLog("INDEX FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while (0) +#define indexError(...) do { if (idxDebugFlag & DEBUG_ERROR) { taosPrintLog("INDEX ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while (0) +#define indexWarn(...) do { if (idxDebugFlag & DEBUG_WARN) { taosPrintLog("INDEX WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while (0) +#define indexInfo(...) do { if (idxDebugFlag & DEBUG_INFO) { taosPrintLog("INDEX ", DEBUG_INFO, 255, __VA_ARGS__); } } while (0) +#define indexDebug(...) do { if (idxDebugFlag & DEBUG_DEBUG) { taosPrintLog("INDEX ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__);} } while (0) +#define indexTrace(...) do { if (idxDebugFlag & DEBUG_TRACE) { taosPrintLog("INDEX ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__);} } while (0) +// clang-format on + typedef enum { LT, LE, GT, GE } RangeType; typedef enum { kTypeValue, kTypeDeletion } STermValueType; @@ -122,8 +131,7 @@ typedef struct TFileCacheKey { char* colName; int32_t nColName; } ICacheKey; - -int indexFlushCacheToTFile(SIndex* sIdx, void*); +int indexFlushCacheToTFile(SIndex* sIdx, void*, bool quit); int64_t indexAddRef(void* p); int32_t indexRemoveRef(int64_t ref); @@ -134,15 +142,6 @@ int32_t indexSerialCacheKey(ICacheKey* key, char* buf); // int32_t indexSerialKey(ICacheKey* key, char* buf); // int32_t indexSerialTermKey(SIndexTerm* itm, char* buf); -// clang-format off -#define indexFatal(...) do { if (sDebugFlag & DEBUG_FATAL) { taosPrintLog("INDEX FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while (0) -#define indexError(...) do { if (sDebugFlag & DEBUG_ERROR) { taosPrintLog("INDEX ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while (0) -#define indexWarn(...) do { if (sDebugFlag & DEBUG_WARN) { taosPrintLog("INDEX WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while (0) -#define indexInfo(...) do { if (sDebugFlag & DEBUG_INFO) { taosPrintLog("INDEX ", DEBUG_INFO, 255, __VA_ARGS__); } } while (0) -#define indexDebug(...) do { if (sDebugFlag & DEBUG_DEBUG) { taosPrintLog("INDEX ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__);} } while (0) -#define indexTrace(...) do { if (sDebugFlag & DEBUG_TRACE) { taosPrintLog("INDEX ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__);} } while (0) -// clang-format on - #define INDEX_TYPE_CONTAIN_EXTERN_TYPE(ty, exTy) (((ty >> 4) & (exTy)) != 0) #define INDEX_TYPE_GET_TYPE(ty) (ty & 0x0F) diff --git a/source/libs/index/inc/indexTfile.h b/source/libs/index/inc/indexTfile.h index 85ed397b0ac5d14984a4020b265fbdcf6951c68e..ca55aa93da5a47bcefa26bf880d115abeb46b8c8 100644 --- a/source/libs/index/inc/indexTfile.h +++ b/source/libs/index/inc/indexTfile.h @@ -28,12 +28,12 @@ extern "C" { // tfile header content // |<---suid--->|<---version--->|<-------colName------>|<---type-->|<--fstOffset->| -// |<-uint64_t->|<---int32_t--->|<--TSDB_COL_NAME_LEN-->|<-uint8_t->|<---int32_t-->| +// |<-uint64_t->|<---int64_t--->|<--TSDB_COL_NAME_LEN-->|<-uint8_t->|<---int32_t-->| #pragma pack(push, 1) typedef struct TFileHeader { uint64_t suid; - int32_t version; + int64_t version; char colName[TSDB_COL_NAME_LEN]; // uint8_t colType; int32_t fstOffset; @@ -74,9 +74,10 @@ typedef struct TFileReader { } TFileReader; typedef struct IndexTFile { - char* path; - TFileCache* cache; - TFileWriter* tw; + char* path; + TFileCache* cache; + TFileWriter* tw; + TdThreadMutex mtx; } IndexTFile; typedef struct TFileWriterOpt { @@ -101,14 +102,14 @@ void tfileCachePut(TFileCache* tcache, ICacheKey* key, TFileReader* read TFileReader* tfileGetReaderByCol(IndexTFile* tf, uint64_t suid, char* colName); -TFileReader* tfileReaderOpen(char* path, uint64_t suid, int32_t version, const char* colName); +TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const char* colName); TFileReader* tfileReaderCreate(WriterCtx* ctx); void tfileReaderDestroy(TFileReader* reader); -int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResult* tr); +int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr); void tfileReaderRef(TFileReader* reader); void tfileReaderUnRef(TFileReader* reader); -TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int32_t version, const char* colName, uint8_t type); +TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const char* colName, uint8_t type); void tfileWriterClose(TFileWriter* tw); TFileWriter* tfileWriterCreate(WriterCtx* ctx, TFileHeader* header); void tfileWriterDestroy(TFileWriter* tw); @@ -119,7 +120,7 @@ int tfileWriterFinish(TFileWriter* tw); IndexTFile* indexTFileCreate(const char* path); void indexTFileDestroy(IndexTFile* tfile); int indexTFilePut(void* tfile, SIndexTerm* term, uint64_t uid); -int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* tr); +int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTRslt* tr); Iterate* tfileIteratorCreate(TFileReader* reader); void tfileIteratorDestroy(Iterate* iterator); diff --git a/source/libs/index/inc/indexUtil.h b/source/libs/index/inc/indexUtil.h index f1676ed411a5e2074667816d1746dc607dc0f44d..dbaecaa9630b04b8b50f108c1a59e499f04899dc 100644 --- a/source/libs/index/inc/indexUtil.h +++ b/source/libs/index/inc/indexUtil.h @@ -66,7 +66,7 @@ extern "C" { * [1, 4, 5] * output:[4, 5] */ -void iIntersection(SArray *interResults, SArray *finalResult); +void iIntersection(SArray *in, SArray *out); /* multi sorted result union * input: [1, 2, 4, 5] @@ -74,7 +74,7 @@ void iIntersection(SArray *interResults, SArray *finalResult); * [1, 4, 5] * output:[1, 2, 3, 4, 5] */ -void iUnion(SArray *interResults, SArray *finalResult); +void iUnion(SArray *in, SArray *out); /* see example * total: [1, 2, 4, 5, 7, 8] @@ -92,19 +92,24 @@ typedef struct { uint64_t data; } SIdxVerdata; +/* + * index temp result + * + */ typedef struct { SArray *total; - SArray *added; - SArray *deled; -} SIdxTempResult; + SArray *add; + SArray *del; +} SIdxTRslt; + +SIdxTRslt *idxTRsltCreate(); -SIdxTempResult *sIdxTempResultCreate(); +void idxTRsltClear(SIdxTRslt *tr); -void sIdxTempResultClear(SIdxTempResult *tr); +void idxTRsltDestroy(SIdxTRslt *tr); -void sIdxTempResultDestroy(SIdxTempResult *tr); +void idxTRsltMergeTo(SIdxTRslt *tr, SArray *out); -void sIdxTempResultMergeTo(SArray *result, SIdxTempResult *tr); #ifdef __cplusplus } #endif diff --git a/source/libs/index/src/index.c b/source/libs/index/src/index.c index 6add788a896f8149f49d9f224538d5b3ab4e5b57..ba3aea969f6c8a5214a3999a7d4ca2c68ec503ac 100644 --- a/source/libs/index/src/index.c +++ b/source/libs/index/src/index.c @@ -29,7 +29,7 @@ #include "lucene++/Lucene_c.h" #endif -#define INDEX_NUM_OF_THREADS 1 +#define INDEX_NUM_OF_THREADS 5 #define INDEX_QUEUE_SIZE 200 #define INDEX_DATA_BOOL_NULL 0x02 @@ -80,12 +80,12 @@ static TdThreadOnce isInit = PTHREAD_ONCE_INIT; static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* term, SArray** result); static void indexInterResultsDestroy(SArray* results); -static int indexMergeFinalResults(SArray* interResults, EIndexOperatorType oType, SArray* finalResult); +static int indexMergeFinalResults(SArray* in, EIndexOperatorType oType, SArray* out); static int indexGenTFile(SIndex* index, IndexCache* cache, SArray* batch); // merge cache and tfile by opera type -static void indexMergeCacheAndTFile(SArray* result, IterateValue* icache, IterateValue* iTfv, SIdxTempResult* helper); +static void indexMergeCacheAndTFile(SArray* result, IterateValue* icache, IterateValue* iTfv, SIdxTRslt* helper); // static int32_t indexSerialTermKey(SIndexTerm* itm, char* buf); // int32_t indexSerialKey(ICacheKey* key, char* buf); @@ -150,6 +150,7 @@ void indexClose(SIndex* sIdx) { indexCacheForceToMerge((void*)(*pCache)); indexInfo("%s wait to merge", (*pCache)->colName); indexWait((void*)(sIdx)); + indexInfo("%s finish to wait", (*pCache)->colName); iter = taosHashIterate(sIdx->colObj, iter); indexCacheUnRef(*pCache); } @@ -201,6 +202,7 @@ int indexPut(SIndex* index, SIndexMultiTerm* fVals, uint64_t uid) { char buf[128] = {0}; ICacheKey key = {.suid = p->suid, .colName = p->colName, .nColName = strlen(p->colName), .colType = p->colType}; int32_t sz = indexSerialCacheKey(&key, buf); + indexDebug("suid: %" PRIu64 ", colName: %s, colType: %d", key.suid, key.colName, key.colType); IndexCache** cache = taosHashGet(index->colObj, buf, sz); assert(*cache != NULL); @@ -328,6 +330,7 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result char buf[128] = {0}; ICacheKey key = { .suid = term->suid, .colName = term->colName, .nColName = strlen(term->colName), .colType = term->colType}; + indexDebug("suid: %" PRIu64 ", colName: %s, colType: %d", key.suid, key.colName, key.colType); int32_t sz = indexSerialCacheKey(&key, buf); taosThreadMutexLock(&sIdx->mtx); @@ -341,7 +344,7 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result int64_t st = taosGetTimestampUs(); - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); if (0 == indexCacheSearch(cache, query, tr, &s)) { if (s == kTypeDeletion) { indexInfo("col: %s already drop by", term->colName); @@ -363,12 +366,12 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result int64_t cost = taosGetTimestampUs() - st; indexInfo("search cost: %" PRIu64 "us", cost); - sIdxTempResultMergeTo(*result, tr); + idxTRsltMergeTo(tr, *result); - sIdxTempResultDestroy(tr); + idxTRsltDestroy(tr); return 0; END: - sIdxTempResultDestroy(tr); + idxTRsltDestroy(tr); return -1; } static void indexInterResultsDestroy(SArray* results) { @@ -384,38 +387,38 @@ static void indexInterResultsDestroy(SArray* results) { taosArrayDestroy(results); } -static int indexMergeFinalResults(SArray* interResults, EIndexOperatorType oType, SArray* fResults) { +static int indexMergeFinalResults(SArray* in, EIndexOperatorType oType, SArray* out) { // refactor, merge interResults into fResults by oType - for (int i = 0; i < taosArrayGetSize(interResults); i--) { - SArray* t = taosArrayGetP(interResults, i); + for (int i = 0; i < taosArrayGetSize(in); i--) { + SArray* t = taosArrayGetP(in, i); taosArraySort(t, uidCompare); taosArrayRemoveDuplicate(t, uidCompare, NULL); } if (oType == MUST) { - iIntersection(interResults, fResults); + iIntersection(in, out); } else if (oType == SHOULD) { - iUnion(interResults, fResults); + iUnion(in, out); } else if (oType == NOT) { // just one column index, enhance later - taosArrayAddAll(fResults, interResults); + // taosArrayAddAll(fResults, interResults); // not use currently } return 0; } -static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdxTempResult* tr) { +static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdxTRslt* tr) { int32_t sz = taosArrayGetSize(result); if (sz > 0) { TFileValue* lv = taosArrayGetP(result, sz - 1); if (tfv != NULL && strcmp(lv->colVal, tfv->colVal) != 0) { - sIdxTempResultMergeTo(lv->tableId, tr); - sIdxTempResultClear(tr); + idxTRsltMergeTo(tr, lv->tableId); + idxTRsltClear(tr); taosArrayPush(result, &tfv); } else if (tfv == NULL) { // handle last iterator - sIdxTempResultMergeTo(lv->tableId, tr); + idxTRsltMergeTo(tr, lv->tableId); } else { // temp result saved in help tfileValueDestroy(tfv); @@ -424,7 +427,7 @@ static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdx taosArrayPush(result, &tfv); } } -static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateValue* tv, SIdxTempResult* tr) { +static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateValue* tv, SIdxTRslt* tr) { char* colVal = (cv != NULL) ? cv->colVal : tv->colVal; TFileValue* tfv = tfileValueCreate(colVal); @@ -434,9 +437,9 @@ static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateVal uint64_t id = *(uint64_t*)taosArrayGet(cv->val, 0); uint32_t ver = cv->ver; if (cv->type == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, id) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, id) } else if (cv->type == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, id) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, id) } } if (tv != NULL) { @@ -452,7 +455,7 @@ static void indexDestroyFinalResult(SArray* result) { taosArrayDestroy(result); } -int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { +int indexFlushCacheToTFile(SIndex* sIdx, void* cache, bool quit) { if (sIdx == NULL) { return -1; } @@ -460,7 +463,10 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { int64_t st = taosGetTimestampUs(); - IndexCache* pCache = (IndexCache*)cache; + IndexCache* pCache = (IndexCache*)cache; + + while (quit && atomic_load_32(&pCache->merging) == 1) { + } TFileReader* pReader = tfileGetReaderByCol(sIdx->tindex, pCache->suid, pCache->colName); if (pReader == NULL) { indexWarn("empty tfile reader found"); @@ -471,9 +477,9 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { indexError("%p immtable is empty, ignore merge opera", pCache); indexCacheDestroyImm(pCache); tfileReaderUnRef(pReader); - if (sIdx->quit) { + atomic_store_32(&pCache->merging, 0); + if (quit) { indexPost(sIdx); - // indexCacheBroadcast(pCache); } indexReleaseRef(sIdx->refId); return 0; @@ -489,7 +495,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { bool cn = cacheIter ? cacheIter->next(cacheIter) : false; bool tn = tfileIter ? tfileIter->next(tfileIter) : false; - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); while (cn == true || tn == true) { IterateValue* cv = (cn == true) ? cacheIter->getValue(cacheIter) : NULL; IterateValue* tv = (tn == true) ? tfileIter->getValue(tfileIter) : NULL; @@ -515,7 +521,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { } } indexMayMergeTempToFinalResult(result, NULL, tr); - sIdxTempResultDestroy(tr); + idxTRsltDestroy(tr); int ret = indexGenTFile(sIdx, pCache, result); indexDestroyFinalResult(result); @@ -534,7 +540,8 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { } else { indexInfo("success to merge , time cost: %" PRId64 "ms", cost / 1000); } - if (sIdx->quit) { + atomic_store_32(&pCache->merging, 0); + if (quit) { indexPost(sIdx); } indexReleaseRef(sIdx->refId); @@ -557,20 +564,18 @@ void iterateValueDestroy(IterateValue* value, bool destroy) { static int64_t indexGetAvaialbleVer(SIndex* sIdx, IndexCache* cache) { ICacheKey key = {.suid = cache->suid, .colName = cache->colName, .nColName = strlen(cache->colName)}; int64_t ver = CACHE_VERSION(cache); - taosThreadMutexLock(&sIdx->mtx); - TFileReader* trd = tfileCacheGet(((IndexTFile*)sIdx->tindex)->cache, &key); - if (trd != NULL) { - if (ver < trd->header.version) { - ver = trd->header.version + 1; - } else { - ver += 1; - } - indexInfo("header: %d, ver: %" PRId64 "", trd->header.version, ver); - tfileReaderUnRef(trd); - } else { - indexInfo("not found reader base %p", trd); + + IndexTFile* tf = (IndexTFile*)(sIdx->tindex); + + taosThreadMutexLock(&tf->mtx); + TFileReader* rd = tfileCacheGet(tf->cache, &key); + taosThreadMutexUnlock(&tf->mtx); + + if (rd != NULL) { + ver = (ver > rd->header.version ? ver : rd->header.version) + 1; + indexInfo("header: %" PRId64 ", ver: %" PRId64 "", rd->header.version, ver); } - taosThreadMutexUnlock(&sIdx->mtx); + tfileReaderUnRef(rd); return ver; } static int indexGenTFile(SIndex* sIdx, IndexCache* cache, SArray* batch) { @@ -597,13 +602,15 @@ static int indexGenTFile(SIndex* sIdx, IndexCache* cache, SArray* batch) { } indexInfo("success to create tfile, reopen it, %s", reader->ctx->file.buf); + IndexTFile* tf = (IndexTFile*)sIdx->tindex; + TFileHeader* header = &reader->header; ICacheKey key = {.suid = cache->suid, .colName = header->colName, .nColName = strlen(header->colName)}; - taosThreadMutexLock(&sIdx->mtx); - IndexTFile* ifile = (IndexTFile*)sIdx->tindex; - tfileCachePut(ifile->cache, &key, reader); - taosThreadMutexUnlock(&sIdx->mtx); + taosThreadMutexLock(&tf->mtx); + tfileCachePut(tf->cache, &key, reader); + taosThreadMutexUnlock(&tf->mtx); + return ret; END: if (tw != NULL) { diff --git a/source/libs/index/src/indexCache.c b/source/libs/index/src/indexCache.c index d704e3876e4979cdf8c1354e9b3d2ef23bf91132..4e7be245ef7fb0a4c383a0abf0b242ebbb46522c 100644 --- a/source/libs/index/src/indexCache.c +++ b/source/libs/index/src/indexCache.c @@ -36,32 +36,31 @@ static char* indexCacheTermGet(const void* pData); static MemTable* indexInternalCacheCreate(int8_t type); -static int32_t cacheSearchTerm(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchPrefix(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchSuffix(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRegex(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessThan(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRange(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); +static int32_t cacheSearchTerm(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchPrefix(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchSuffix(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRegex(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessThan(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRange(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); /*comm func of compare, used in (LE/LT/GE/GT compare)*/ -static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s, - RangeType type); -static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); - -static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s, +static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s, RangeType type); +static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); + +static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type); -static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s) = { +static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s) = { {cacheSearchTerm, cacheSearchPrefix, cacheSearchSuffix, cacheSearchRegex, cacheSearchLessThan, cacheSearchLessEqual, cacheSearchGreaterThan, cacheSearchGreaterEqual, cacheSearchRange}, {cacheSearchTerm_JSON, cacheSearchPrefix_JSON, cacheSearchSuffix_JSON, cacheSearchRegex_JSON, @@ -71,7 +70,7 @@ static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTemp static void doMergeWork(SSchedMsg* msg); static bool indexCacheIteratorNext(Iterate* itera); -static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { if (cache == NULL) { return 0; } @@ -80,7 +79,7 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm)); pCt->colVal = term->colVal; - pCt->version = atomic_load_32(&pCache->version); + pCt->version = atomic_load_64(&pCache->version); char* key = indexCacheTermGet(pCt); @@ -93,11 +92,11 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr CacheTerm* c = (CacheTerm*)SL_GET_NODE_DATA(node); if (0 == strcmp(c->colVal, pCt->colVal)) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else { break; @@ -108,20 +107,19 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr tSkipListDestroyIter(iter); return 0; } -static int32_t cacheSearchPrefix(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchPrefix(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } -static int32_t cacheSearchSuffix(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchSuffix(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } -static int32_t cacheSearchRegex(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRegex(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } -static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s, - RangeType type) { +static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type) { if (cache == NULL) { return 0; } @@ -133,7 +131,8 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm)); pCt->colVal = term->colVal; - pCt->version = atomic_load_32(&pCache->version); + pCt->colType = term->colType; + pCt->version = atomic_load_64(&pCache->version); char* key = indexCacheTermGet(pCt); @@ -147,11 +146,11 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes TExeCond cond = cmpFn(c->colVal, pCt->colVal, pCt->colType); if (cond == MATCH) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else if (cond == CONTINUE) { continue; @@ -163,20 +162,20 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes tSkipListDestroyIter(iter); return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchLessThan(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessThan(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, LT); } -static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, LE); } -static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, GT); } -static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, GE); } -static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { if (cache == NULL) { return 0; } @@ -185,7 +184,7 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm)); pCt->colVal = term->colVal; - pCt->version = atomic_load_32(&pCache->version); + pCt->version = atomic_load_64(&pCache->version); char* exBuf = NULL; if (INDEX_TYPE_CONTAIN_EXTERN_TYPE(term->colType, TSDB_DATA_TYPE_JSON)) { @@ -204,11 +203,11 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul if (0 == strcmp(c->colVal, pCt->colVal)) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else { break; @@ -222,32 +221,32 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, LT); } -static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, LE); } -static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, GT); } -static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, GE); } -static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s, +static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type) { if (cache == NULL) { return 0; @@ -259,7 +258,7 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm)); pCt->colVal = term->colVal; - pCt->version = atomic_load_32(&pCache->version); + pCt->version = atomic_load_64(&pCache->version); int8_t dType = INDEX_TYPE_GET_TYPE(term->colType); int skip = 0; @@ -289,11 +288,11 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe TExeCond cond = cmpFn(p + skip, term->colVal, dType); if (cond == MATCH) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else if (cond == CONTINUE) { continue; @@ -309,7 +308,7 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchRange(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRange(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } @@ -356,7 +355,7 @@ void indexCacheDebug(IndexCache* cache) { CacheTerm* ct = (CacheTerm*)SL_GET_NODE_DATA(node); if (ct != NULL) { // TODO, add more debug info - indexInfo("{colVal: %s, version: %d} \t", ct->colVal, ct->version); + indexInfo("{colVal: %s, version: %" PRId64 "} \t", ct->colVal, ct->version); } } tSkipListDestroyIter(iter); @@ -377,7 +376,7 @@ void indexCacheDebug(IndexCache* cache) { CacheTerm* ct = (CacheTerm*)SL_GET_NODE_DATA(node); if (ct != NULL) { // TODO, add more debug info - indexInfo("{colVal: %s, version: %d} \t", ct->colVal, ct->version); + indexInfo("{colVal: %s, version: %" PRId64 "} \t", ct->colVal, ct->version); } } tSkipListDestroyIter(iter); @@ -495,16 +494,19 @@ static void indexCacheMakeRoomForWrite(IndexCache* cache) { // TODO: wake up by condition variable indexCacheWait(cache); } else { - bool notifyQuit = cache->occupiedMem >= MEM_SIGNAL_QUIT ? true : false; + bool quit = cache->occupiedMem >= MEM_SIGNAL_QUIT ? true : false; indexCacheRef(cache); cache->imm = cache->mem; cache->mem = indexInternalCacheCreate(cache->type); cache->mem->pCache = cache; cache->occupiedMem = 0; + if (quit == false) { + atomic_store_32(&cache->merging, 1); + } // sched to merge // unref cache in bgwork - indexCacheSchedToMerge(cache, notifyQuit); + indexCacheSchedToMerge(cache, quit); } } } @@ -529,7 +531,7 @@ int indexCachePut(void* cache, SIndexTerm* term, uint64_t uid) { ct->colVal = (char*)taosMemoryCalloc(1, sizeof(char) * (term->nColVal + 1)); memcpy(ct->colVal, term->colVal, term->nColVal); } - ct->version = atomic_add_fetch_32(&pCache->version, 1); + ct->version = atomic_add_fetch_64(&pCache->version, 1); // set value ct->uid = uid; ct->operaType = term->operType; @@ -568,7 +570,7 @@ int indexCacheDel(void* cache, const char* fieldValue, int32_t fvlen, uint64_t u return 0; } -static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTempResult* tr, STermValueType* s) { +static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTRslt* tr, STermValueType* s) { if (mem == NULL) { return 0; } @@ -582,7 +584,7 @@ static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTempResu return cacheSearch[0][qtype](mem, term, tr, s); } } -int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result, STermValueType* s) { +int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* result, STermValueType* s) { int64_t st = taosGetTimestampUs(); if (cache == NULL) { return 0; @@ -597,10 +599,10 @@ int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result indexMemRef(imm); taosThreadMutexUnlock(&pCache->mtx); - int ret = indexQueryMem(mem, query, result, s); + int ret = (mem && mem->mem) ? indexQueryMem(mem, query, result, s) : 0; if (ret == 0 && *s != kTypeDeletion) { // continue search in imm - ret = indexQueryMem(imm, query, result, s); + ret = (imm && imm->mem) ? indexQueryMem(imm, query, result, s) : 0; } indexMemUnRef(mem); @@ -663,7 +665,11 @@ static int32_t indexCacheTermCompare(const void* l, const void* r) { // compare colVal int32_t cmp = strcmp(lt->colVal, rt->colVal); if (cmp == 0) { - return rt->version - lt->version; + if (rt->version == lt->version) { + cmp = 0; + } else { + cmp = rt->version < lt->version ? -1 : 1; + } } return cmp; } @@ -705,7 +711,7 @@ static int32_t indexCacheJsonTermCompare(const void* l, const void* r) { return cmp; } static MemTable* indexInternalCacheCreate(int8_t type) { - int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : type; + int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : TSDB_DATA_TYPE_BINARY; int32_t (*cmpFn)(const void* l, const void* r) = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? indexCacheJsonTermCompare : indexCacheTermCompare; @@ -722,9 +728,9 @@ static void doMergeWork(SSchedMsg* msg) { IndexCache* pCache = msg->ahandle; SIndex* sidx = (SIndex*)pCache->index; - sidx->quit = msg->thandle ? true : false; + int quit = msg->thandle ? true : false; taosMemoryFree(msg->thandle); - indexFlushCacheToTFile(sidx, pCache); + indexFlushCacheToTFile(sidx, pCache, quit); } static bool indexCacheIteratorNext(Iterate* itera) { SSkipListIterator* iter = itera->iter; diff --git a/source/libs/index/src/indexComm.c b/source/libs/index/src/indexComm.c index 78c7babb681e44629281f0ffd6ea6ba835495b5b..5310e1c3451dee18bd3a31922b2ce14f752ebc1d 100644 --- a/source/libs/index/src/indexComm.c +++ b/source/libs/index/src/indexComm.c @@ -75,7 +75,7 @@ char* indexInt2str(int64_t val, char* dst, int radix) { ; return dst - 1; } -static __compar_fn_t indexGetCompar(int8_t type) { +__compar_fn_t indexGetCompar(int8_t type) { if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { return (__compar_fn_t)strcmp; } @@ -182,6 +182,9 @@ TExeCond tDoCompare(__compar_fn_t func, int8_t comparType, void* a, void* b) { case QUERY_GREATER_EQUAL: { if (ret >= 0) return MATCH; } + case QUERY_TERM: { + if (ret == 0) return MATCH; + } } return CONTINUE; } diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index 0273867ccf040f3d3344066270ef3b8aa6a3bae2..b41006b6dddf68e8d239e7c9c6bd348ee6177a9a 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -14,11 +14,13 @@ */ #include "index.h" +#include "indexComm.h" #include "indexInt.h" #include "nodes.h" #include "querynodes.h" #include "scalar.h" #include "tdatablock.h" +#include "vnode.h" // clang-format off #define SIF_ERR_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; return _code; } } while (0) @@ -37,12 +39,15 @@ typedef struct SIFParam { int64_t suid; // add later char dbName[TSDB_DB_NAME_LEN]; char colName[TSDB_COL_NAME_LEN]; + + SIndexMetaArg arg; } SIFParam; typedef struct SIFCtx { - int32_t code; - SHashObj *pRes; /* element is SIFParam */ - bool noExec; // true: just iterate condition tree, and add hint to executor plan + int32_t code; + SHashObj * pRes; /* element is SIFParam */ + bool noExec; // true: just iterate condition tree, and add hint to executor plan + SIndexMetaArg arg; // SIdxFltStatus st; } SIFCtx; @@ -256,8 +261,53 @@ static int32_t sifExecFunction(SFunctionNode *node, SIFCtx *ctx, SIFParam *outpu indexError("index-filter not support buildin function"); return TSDB_CODE_QRY_INVALID_INPUT; } + +typedef int (*Filter)(void *a, void *b, int16_t dtype); + +int sifGreaterThan(void *a, void *b, int16_t dtype) { + __compar_fn_t func = getComparFunc(dtype, 0); + return tDoCompare(func, QUERY_GREATER_THAN, a, b); +} +int sifGreaterEqual(void *a, void *b, int16_t dtype) { + __compar_fn_t func = getComparFunc(dtype, 0); + return tDoCompare(func, QUERY_GREATER_EQUAL, a, b); +} +int sifLessEqual(void *a, void *b, int16_t dtype) { + __compar_fn_t func = getComparFunc(dtype, 0); + return tDoCompare(func, QUERY_LESS_EQUAL, a, b); +} +int sifLessThan(void *a, void *b, int16_t dtype) { + __compar_fn_t func = getComparFunc(dtype, 0); + return (int)tDoCompare(func, QUERY_LESS_THAN, a, b); +} +int sifEqual(void *a, void *b, int16_t dtype) { + __compar_fn_t func = getComparFunc(dtype, 0); + //__compar_fn_t func = indexGetCompar(dtype); + return (int)tDoCompare(func, QUERY_TERM, a, b); +} +static Filter sifGetFilterFunc(EIndexQueryType type, bool *reverse) { + if (type == QUERY_LESS_EQUAL || type == QUERY_LESS_THAN) { + *reverse = true; + } else { + *reverse = false; + } + if (type == QUERY_LESS_EQUAL) + return sifLessEqual; + else if (type == QUERY_LESS_THAN) + return sifLessThan; + else if (type == QUERY_GREATER_EQUAL) + return sifGreaterEqual; + else if (type == QUERY_GREATER_THAN) + return sifGreaterThan; + else if (type == QUERY_TERM) { + return sifEqual; + } + return NULL; +} static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFParam *output) { - SIndexTerm *tm = indexTermCreate(left->suid, DEFAULT, left->colValType, left->colName, strlen(left->colName), + SIndexMetaArg *arg = &output->arg; +#ifdef USE_INVERTED_INDEX + SIndexTerm *tm = indexTermCreate(arg->suid, DEFAULT, left->colValType, left->colName, strlen(left->colName), right->condValue, strlen(right->condValue)); if (tm == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; @@ -268,9 +318,27 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP SIndexMultiTermQuery *mtm = indexMultiTermQueryCreate(MUST); indexMultiTermQueryAdd(mtm, tm, qtype); - int ret = indexSearch(NULL, mtm, output->result); + int ret = indexSearch(arg->metaHandle, mtm, output->result); + indexDebug("index filter data size: %d", (int)taosArrayGetSize(output->result)); indexMultiTermQueryDestroy(mtm); return ret; +#else + EIndexQueryType qtype = 0; + SIF_ERR_RET(sifGetFuncFromSql(operType, &qtype)); + bool reverse; + Filter filterFunc = sifGetFilterFunc(qtype, &reverse); + + SMetaFltParam param = {.suid = arg->suid, + .cid = left->colId, + .type = left->colValType, + .val = right->condValue, + .reverse = reverse, + .filterFunc = filterFunc}; + + int ret = metaFilteTableIds(arg->metaEx, ¶m, output->result); + return ret; +#endif + return 0; } static int32_t sifLessThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) { @@ -330,38 +398,62 @@ static int32_t sifDefaultFunc(SIFParam *left, SIFParam *right, SIFParam *output) return TSDB_CODE_QRY_INVALID_INPUT; } -static sif_func_t sifGetOperFn(int32_t funcId) { +static int32_t sifGetOperFn(int32_t funcId, sif_func_t *func, SIdxFltStatus *status) { // impl later + *status = SFLT_ACCURATE_INDEX; switch (funcId) { case OP_TYPE_GREATER_THAN: - return sifGreaterThanFunc; + *func = sifGreaterThanFunc; + return 0; case OP_TYPE_GREATER_EQUAL: - return sifGreaterEqualFunc; + *func = sifGreaterEqualFunc; + return 0; case OP_TYPE_LOWER_THAN: - return sifLessThanFunc; + *func = sifLessThanFunc; + return 0; case OP_TYPE_LOWER_EQUAL: - return sifLessEqualFunc; + *func = sifLessEqualFunc; + return 0; case OP_TYPE_EQUAL: - return sifEqualFunc; + *func = sifEqualFunc; + return 0; case OP_TYPE_NOT_EQUAL: - return sifNotEqualFunc; + *status = SFLT_NOT_INDEX; + *func = sifNotEqualFunc; + return 0; case OP_TYPE_IN: - return sifInFunc; + *status = SFLT_NOT_INDEX; + *func = sifInFunc; + return 0; case OP_TYPE_NOT_IN: - return sifNotInFunc; + *status = SFLT_NOT_INDEX; + *func = sifNotInFunc; + return 0; case OP_TYPE_LIKE: - return sifLikeFunc; + *status = SFLT_NOT_INDEX; + *func = sifLikeFunc; + return 0; case OP_TYPE_NOT_LIKE: - return sifNotLikeFunc; + *status = SFLT_NOT_INDEX; + *func = sifNotLikeFunc; + return 0; case OP_TYPE_MATCH: - return sifMatchFunc; + *status = SFLT_NOT_INDEX; + *func = sifMatchFunc; + return 0; case OP_TYPE_NMATCH: - return sifNotMatchFunc; + *status = SFLT_NOT_INDEX; + *func = sifNotMatchFunc; + return 0; default: - return sifNullFunc; + *status = SFLT_NOT_INDEX; + *func = sifNullFunc; + return 0; } - return sifNullFunc; + return 0; } +// typedef struct filterFuncDict { + static int32_t sifExecOper(SOperatorNode *node, SIFCtx *ctx, SIFParam *output) { int32_t code = 0; int32_t nParam = sifGetOperParamNum(node->opType); @@ -372,18 +464,16 @@ static int32_t sifExecOper(SOperatorNode *node, SIFCtx *ctx, SIFParam *output) { SIFParam *params = NULL; SIF_ERR_RET(sifInitOperParams(¶ms, node, ctx)); - sif_func_t operFn = sifGetOperFn(node->opType); - if (ctx->noExec && operFn == NULL) { - output->status = SFLT_NOT_INDEX; - } else { - output->status = SFLT_ACCURATE_INDEX; - } + // ugly code, refactor later + output->arg = ctx->arg; + sif_func_t operFn = sifNullFunc; + code = sifGetOperFn(node->opType, &operFn, &output->status); if (ctx->noExec) { SIF_RET(code); + } else { + return operFn(¶ms[0], nParam > 1 ? ¶ms[1] : NULL, output); } - - return operFn(¶ms[0], nParam > 1 ? ¶ms[1] : NULL, output); _return: taosMemoryFree(params); SIF_RET(code); @@ -408,7 +498,7 @@ static int32_t sifExecLogic(SLogicConditionNode *node, SIFCtx *ctx, SIFParam *ou } else if (node->condType == LOGIC_COND_TYPE_OR) { taosArrayAddAll(output->result, params[m].result); } else if (node->condType == LOGIC_COND_TYPE_NOT) { - taosArrayAddAll(output->result, params[m].result); + // taosArrayAddAll(output->result, params[m].result); } } } else { @@ -423,7 +513,7 @@ _return: static EDealRes sifWalkFunction(SNode *pNode, void *context) { SFunctionNode *node = (SFunctionNode *)pNode; - SIFParam output = {0}; + SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))}; SIFCtx *ctx = context; ctx->code = sifExecFunction(node, ctx, &output); @@ -439,7 +529,8 @@ static EDealRes sifWalkFunction(SNode *pNode, void *context) { } static EDealRes sifWalkLogic(SNode *pNode, void *context) { SLogicConditionNode *node = (SLogicConditionNode *)pNode; - SIFParam output = {0}; + + SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))}; SIFCtx *ctx = context; ctx->code = sifExecLogic(node, ctx, &output); @@ -455,7 +546,7 @@ static EDealRes sifWalkLogic(SNode *pNode, void *context) { } static EDealRes sifWalkOper(SNode *pNode, void *context) { SOperatorNode *node = (SOperatorNode *)pNode; - SIFParam output = {0}; + SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))}; SIFCtx *ctx = context; ctx->code = sifExecOper(node, ctx, &output); @@ -507,8 +598,9 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) { return TSDB_CODE_QRY_INVALID_INPUT; } int32_t code = 0; - SIFCtx ctx = {.code = 0, .noExec = false}; + SIFCtx ctx = {.code = 0, .noExec = false, .arg = pDst->arg}; ctx.pRes = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + if (NULL == ctx.pRes) { indexError("index-filter failed to taosHashInit"); return TSDB_CODE_QRY_OUT_OF_MEMORY; @@ -523,7 +615,9 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) { indexError("no valid res in hash, node:(%p), type(%d)", (void *)&pNode, nodeType(pNode)); SIF_ERR_RET(TSDB_CODE_QRY_APP_ERROR); } - taosArrayAddAll(pDst->result, res->result); + if (res->result != NULL) { + taosArrayAddAll(pDst->result, res->result); + } sifFreeParam(res); taosHashRemove(ctx.pRes, (void *)&pNode, POINTER_BYTES); @@ -561,7 +655,7 @@ static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status) { SIF_RET(code); } -int32_t doFilterTag(const SNode *pFilterNode, SArray *result) { +int32_t doFilterTag(const SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result) { if (pFilterNode == NULL) { return TSDB_CODE_SUCCESS; } @@ -570,10 +664,12 @@ int32_t doFilterTag(const SNode *pFilterNode, SArray *result) { // todo move to the initialization function // SIF_ERR_RET(filterInitFromNode((SNode *)pFilterNode, &filter, 0)); - SIFParam param = {0}; + SArray * output = taosArrayInit(8, sizeof(uint64_t)); + SIFParam param = {.arg = *metaArg, .result = output}; SIF_ERR_RET(sifCalculate((SNode *)pFilterNode, ¶m)); taosArrayAddAll(result, param.result); + // taosArrayAddAll(result, param.result); sifFreeParam(¶m); SIF_RET(TSDB_CODE_SUCCESS); } diff --git a/source/libs/index/src/indexFst.c b/source/libs/index/src/indexFst.c index 335b0865269604432259847de072a53854286c2c..892716f38708fed46bc755548436f2477d1e91e5 100644 --- a/source/libs/index/src/indexFst.c +++ b/source/libs/index/src/indexFst.c @@ -1324,7 +1324,7 @@ StreamWithStateResult* streamWithStateNextWith(StreamWithState* sws, StreamCallb if (FST_NODE_ADDR(p->node) != fstGetRootAddr(sws->fst)) { taosArrayPop(sws->inp); } - // streamStateDestroy(p); + streamStateDestroy(p); continue; } FstTransition trn; diff --git a/source/libs/index/src/indexFstUtil.c b/source/libs/index/src/indexFstUtil.c index a980c6b740ab4f5b0e128479de342ce84c159c3c..5760b24900ef47e6a52419ade3d91cee9870709a 100644 --- a/source/libs/index/src/indexFstUtil.c +++ b/source/libs/index/src/indexFstUtil.c @@ -93,14 +93,15 @@ FstSlice fstSliceCreate(uint8_t* data, uint64_t len) { // just shallow copy FstSlice fstSliceCopy(FstSlice* s, int32_t start, int32_t end) { FstString* str = s->str; - str->ref++; + atomic_add_fetch_32(&str->ref, 1); FstSlice t = {.str = str, .start = start + s->start, .end = end + s->start}; return t; } FstSlice fstSliceDeepCopy(FstSlice* s, int32_t start, int32_t end) { - int32_t tlen = end - start + 1; - int32_t slen; + int32_t tlen = end - start + 1; + int32_t slen; + uint8_t* data = fstSliceData(s, &slen); assert(tlen <= slen); @@ -129,8 +130,9 @@ uint8_t* fstSliceData(FstSlice* s, int32_t* size) { } void fstSliceDestroy(FstSlice* s) { FstString* str = s->str; - str->ref--; - if (str->ref == 0) { + + int32_t ref = atomic_sub_fetch_32(&str->ref, 1); + if (ref == 0) { taosMemoryFree(str->data); taosMemoryFree(str); s->str = NULL; diff --git a/source/libs/index/src/indexJson.c b/source/libs/index/src/indexJson.c index de88ff3c8ae287eda194fd4c9d7bff7080edd15c..a2f0563d470f30cf989f71bf068c16e38b236ce4 100644 --- a/source/libs/index/src/indexJson.c +++ b/source/libs/index/src/indexJson.c @@ -24,8 +24,8 @@ int tIndexJsonPut(SIndexJson *index, SIndexJsonMultiTerm *terms, uint64_t uid) { SIndexJsonTerm *p = taosArrayGetP(terms, i); INDEX_TYPE_ADD_EXTERN_TYPE(p->colType, TSDB_DATA_TYPE_JSON); } - return indexPut(index, terms, uid); // handle put + return indexPut(index, terms, uid); } int tIndexJsonSearch(SIndexJson *index, SIndexJsonMultiTermQuery *tq, SArray *result) { @@ -34,11 +34,11 @@ int tIndexJsonSearch(SIndexJson *index, SIndexJsonMultiTermQuery *tq, SArray *re SIndexJsonTerm *p = taosArrayGetP(terms, i); INDEX_TYPE_ADD_EXTERN_TYPE(p->colType, TSDB_DATA_TYPE_JSON); } - return indexSearch(index, tq, result); // handle search + return indexSearch(index, tq, result); } void tIndexJsonClose(SIndexJson *index) { - return indexClose(index); // handle close + return indexClose(index); } diff --git a/source/libs/index/src/indexTfile.c b/source/libs/index/src/indexTfile.c index 3d85646bd25596e7d3a666b99287d6b5e3d5e902..53dd2923ac8c1f07b62098a3663c030016b46a72 100644 --- a/source/libs/index/src/indexTfile.c +++ b/source/libs/index/src/indexTfile.c @@ -54,37 +54,37 @@ static SArray* tfileGetFileList(const char* path); static int tfileRmExpireFile(SArray* result); static void tfileDestroyFileName(void* elem); static int tfileCompare(const void* a, const void* b); -static int tfileParseFileName(const char* filename, uint64_t* suid, char* col, int* version); -static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int version); -static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int32_t version); +static int tfileParseFileName(const char* filename, uint64_t* suid, char* col, int64_t* version); +static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int64_t version); +static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int64_t version); /* * search from tfile */ -static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr); - -static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype); - -static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); - -static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype); - -static int32_t (*tfSearch[][QUERY_MAX])(void* reader, SIndexTerm* tem, SIdxTempResult* tr) = { +static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTRslt* tr); + +static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype); + +static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); + +static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype); + +static int32_t (*tfSearch[][QUERY_MAX])(void* reader, SIndexTerm* tem, SIdxTRslt* tr) = { {tfSearchTerm, tfSearchPrefix, tfSearchSuffix, tfSearchRegex, tfSearchLessThan, tfSearchLessEqual, tfSearchGreaterThan, tfSearchGreaterEqual, tfSearchRange}, {tfSearchTerm_JSON, tfSearchPrefix_JSON, tfSearchSuffix_JSON, tfSearchRegex_JSON, tfSearchLessThan_JSON, @@ -151,13 +151,10 @@ TFileReader* tfileCacheGet(TFileCache* tcache, ICacheKey* key) { char buf[128] = {0}; int32_t sz = indexSerialCacheKey(key, buf); assert(sz < sizeof(buf)); - indexInfo("Try to get key: %s", buf); TFileReader** reader = taosHashGet(tcache->tableCache, buf, sz); if (reader == NULL || *reader == NULL) { - indexInfo("failed to get key: %s", buf); return NULL; } - indexInfo("Get key: %s file: %s", buf, (*reader)->ctx->file.buf); tfileReaderRef(*reader); return *reader; @@ -168,11 +165,11 @@ void tfileCachePut(TFileCache* tcache, ICacheKey* key, TFileReader* reader) { // remove last version index reader TFileReader** p = taosHashGet(tcache->tableCache, buf, sz); if (p != NULL && *p != NULL) { - TFileReader* oldReader = *p; + TFileReader* oldRdr = *p; taosHashRemove(tcache->tableCache, buf, sz); - indexInfo("found %s, remove file %s", buf, oldReader->ctx->file.buf); - oldReader->remove = true; - tfileReaderUnRef(oldReader); + indexInfo("found %s, should remove file %s", buf, oldRdr->ctx->file.buf); + oldRdr->remove = true; + tfileReaderUnRef(oldRdr); } taosHashPut(tcache->tableCache, buf, sz, &reader, sizeof(void*)); tfileReaderRef(reader); @@ -214,10 +211,16 @@ void tfileReaderDestroy(TFileReader* reader) { } // T_REF_INC(reader); fstDestroy(reader->fst); + if (reader->remove) { + indexInfo("%s is removed", reader->ctx->file.buf); + } else { + indexInfo("%s is not removed", reader->ctx->file.buf); + } writerCtxDestroy(reader->ctx, reader->remove); + taosMemoryFree(reader); } -static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { int ret = 0; char* p = tem->colVal; uint64_t sz = tem->nColVal; @@ -240,7 +243,7 @@ static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { return 0; } -static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); char* p = tem->colVal; uint64_t sz = tem->nColVal; @@ -276,7 +279,7 @@ static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) } return 0; } -static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); int ret = 0; @@ -295,7 +298,7 @@ static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) fstSliceDestroy(&key); return 0; } -static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); int ret = 0; @@ -316,7 +319,7 @@ static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr) return 0; } -static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType type) { +static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType type) { int ret = 0; char* p = tem->colVal; int skip = 0; @@ -355,19 +358,19 @@ static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult fstStreamBuilderDestroy(sb); return TSDB_CODE_SUCCESS; } -static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, LT); } -static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, LE); } -static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, GT); } -static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, GE); } -static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); int ret = 0; char* p = tem->colVal; @@ -396,7 +399,7 @@ static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr) fstSliceDestroy(&key); return 0; } -static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { int ret = 0; char* p = indexPackJsonData(tem); int sz = strlen(p); @@ -421,36 +424,36 @@ static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* // deprecate api return TSDB_CODE_SUCCESS; } -static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, LT); } -static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, LE); } -static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, GT); } -static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, GE); } -static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype) { +static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype) { int ret = 0; int skip = 0; @@ -498,7 +501,7 @@ static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempR fstStreamBuilderDestroy(sb); return TSDB_CODE_SUCCESS; } -int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResult* tr) { +int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr) { SIndexTerm* term = query->term; EIndexQueryType qtype = query->qType; int ret = 0; @@ -512,7 +515,7 @@ int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResul return ret; } -TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int32_t version, const char* colName, uint8_t colType) { +TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const char* colName, uint8_t colType) { char fullname[256] = {0}; tfileGenFileFullName(fullname, path, suid, colName, version); // indexInfo("open write file name %s", fullname); @@ -529,7 +532,7 @@ TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int32_t version, const c return tfileWriterCreate(wcx, &tfh); } -TFileReader* tfileReaderOpen(char* path, uint64_t suid, int32_t version, const char* colName) { +TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const char* colName) { char fullname[256] = {0}; tfileGenFileFullName(fullname, path, suid, colName, version); @@ -657,7 +660,7 @@ IndexTFile* indexTFileCreate(const char* path) { tfileCacheDestroy(cache); return NULL; } - + taosThreadMutexInit(&tfile->mtx, NULL); tfile->cache = cache; return tfile; } @@ -665,11 +668,12 @@ void indexTFileDestroy(IndexTFile* tfile) { if (tfile == NULL) { return; } + taosThreadMutexDestroy(&tfile->mtx); tfileCacheDestroy(tfile->cache); taosMemoryFree(tfile); } -int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* result) { +int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTRslt* result) { int ret = -1; if (tfile == NULL) { return ret; @@ -680,7 +684,10 @@ int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* result SIndexTerm* term = query->term; ICacheKey key = {.suid = term->suid, .colType = term->colType, .colName = term->colName, .nColName = term->nColName}; + + taosThreadMutexLock(&pTfile->mtx); TFileReader* reader = tfileCacheGet(pTfile->cache, &key); + taosThreadMutexUnlock(&pTfile->mtx); if (reader == NULL) { return 0; } @@ -780,8 +787,13 @@ TFileReader* tfileGetReaderByCol(IndexTFile* tf, uint64_t suid, char* colName) { if (tf == NULL) { return NULL; } - ICacheKey key = {.suid = suid, .colType = TSDB_DATA_TYPE_BINARY, .colName = colName, .nColName = strlen(colName)}; - return tfileCacheGet(tf->cache, &key); + TFileReader* rd = NULL; + ICacheKey key = {.suid = suid, .colType = TSDB_DATA_TYPE_BINARY, .colName = colName, .nColName = strlen(colName)}; + + taosThreadMutexLock(&tf->mtx); + rd = tfileCacheGet(tf->cache, &key); + taosThreadMutexUnlock(&tf->mtx); + return rd; } static int tfileUidCompare(const void* a, const void* b) { @@ -1013,7 +1025,7 @@ void tfileReaderUnRef(TFileReader* reader) { static SArray* tfileGetFileList(const char* path) { char buf[128] = {0}; uint64_t suid; - uint32_t version; + int64_t version; SArray* files = taosArrayInit(4, sizeof(void*)); TdDirPtr pDir = taosOpenDir(path); @@ -1053,19 +1065,19 @@ static int tfileCompare(const void* a, const void* b) { return strcmp(as, bs); } -static int tfileParseFileName(const char* filename, uint64_t* suid, char* col, int* version) { - if (3 == sscanf(filename, "%" PRIu64 "-%[^-]-%d.tindex", suid, col, version)) { +static int tfileParseFileName(const char* filename, uint64_t* suid, char* col, int64_t* version) { + if (3 == sscanf(filename, "%" PRIu64 "-%[^-]-%" PRId64 ".tindex", suid, col, version)) { // read suid & colid & version success return 0; } return -1; } // tfile name suid-colId-version.tindex -static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int version) { - sprintf(filename, "%" PRIu64 "-%s-%d.tindex", suid, col, version); +static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int64_t version) { + sprintf(filename, "%" PRIu64 "-%s-%" PRId64 ".tindex", suid, col, version); return; } -static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int32_t version) { +static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int64_t version) { char filename[128] = {0}; tfileGenFileName(filename, suid, col, version); sprintf(fullname, "%s/%s", path, filename); diff --git a/source/libs/index/src/indexUtil.c b/source/libs/index/src/indexUtil.c index a618787fd49c96b729e782b4a01a5374c76639be..1d2027889572fcd809e378dcae13560b0bae51c1 100644 --- a/source/libs/index/src/indexUtil.c +++ b/source/libs/index/src/indexUtil.c @@ -36,24 +36,24 @@ static int iBinarySearch(SArray *arr, int s, int e, uint64_t k) { return s; } -void iIntersection(SArray *inters, SArray *final) { - int32_t sz = (int32_t)taosArrayGetSize(inters); +void iIntersection(SArray *in, SArray *out) { + int32_t sz = (int32_t)taosArrayGetSize(in); if (sz <= 0) { return; } MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex)); for (int i = 0; i < sz; i++) { - SArray *t = taosArrayGetP(inters, i); + SArray *t = taosArrayGetP(in, i); mi[i].len = (int32_t)taosArrayGetSize(t); mi[i].idx = 0; } - SArray *base = taosArrayGetP(inters, 0); + SArray *base = taosArrayGetP(in, 0); for (int i = 0; i < taosArrayGetSize(base); i++) { uint64_t tgt = *(uint64_t *)taosArrayGet(base, i); bool has = true; - for (int j = 1; j < taosArrayGetSize(inters); j++) { - SArray *oth = taosArrayGetP(inters, j); + for (int j = 1; j < taosArrayGetSize(in); j++) { + SArray *oth = taosArrayGetP(in, j); int mid = iBinarySearch(oth, mi[j].idx, mi[j].len - 1, tgt); if (mid >= 0 && mid < mi[j].len) { uint64_t val = *(uint64_t *)taosArrayGet(oth, mid); @@ -64,33 +64,33 @@ void iIntersection(SArray *inters, SArray *final) { } } if (has == true) { - taosArrayPush(final, &tgt); + taosArrayPush(out, &tgt); } } taosMemoryFreeClear(mi); } -void iUnion(SArray *inters, SArray *final) { - int32_t sz = (int32_t)taosArrayGetSize(inters); +void iUnion(SArray *in, SArray *out) { + int32_t sz = (int32_t)taosArrayGetSize(in); if (sz <= 0) { return; } if (sz == 1) { - taosArrayAddAll(final, taosArrayGetP(inters, 0)); + taosArrayAddAll(out, taosArrayGetP(in, 0)); return; } MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex)); for (int i = 0; i < sz; i++) { - SArray *t = taosArrayGetP(inters, i); + SArray *t = taosArrayGetP(in, i); mi[i].len = (int32_t)taosArrayGetSize(t); mi[i].idx = 0; } while (1) { - uint64_t mVal = UINT_MAX; + uint64_t mVal = UINT64_MAX; int mIdx = -1; for (int j = 0; j < sz; j++) { - SArray *t = taosArrayGetP(inters, j); + SArray *t = taosArrayGetP(in, j); if (mi[j].idx >= mi[j].len) { continue; } @@ -102,13 +102,13 @@ void iUnion(SArray *inters, SArray *final) { } if (mIdx != -1) { mi[mIdx].idx++; - if (taosArrayGetSize(final) > 0) { - uint64_t lVal = *(uint64_t *)taosArrayGetLast(final); + if (taosArrayGetSize(out) > 0) { + uint64_t lVal = *(uint64_t *)taosArrayGetLast(out); if (lVal == mVal) { continue; } } - taosArrayPush(final, &mVal); + taosArrayPush(out, &mVal); } else { break; } @@ -158,41 +158,44 @@ int verdataCompare(const void *a, const void *b) { return cmp; } -SIdxTempResult *sIdxTempResultCreate() { - SIdxTempResult *tr = taosMemoryCalloc(1, sizeof(SIdxTempResult)); +SIdxTRslt *idxTRsltCreate() { + SIdxTRslt *tr = taosMemoryCalloc(1, sizeof(SIdxTRslt)); tr->total = taosArrayInit(4, sizeof(uint64_t)); - tr->added = taosArrayInit(4, sizeof(uint64_t)); - tr->deled = taosArrayInit(4, sizeof(uint64_t)); + tr->add = taosArrayInit(4, sizeof(uint64_t)); + tr->del = taosArrayInit(4, sizeof(uint64_t)); return tr; } -void sIdxTempResultClear(SIdxTempResult *tr) { +void idxTRsltClear(SIdxTRslt *tr) { if (tr == NULL) { return; } taosArrayClear(tr->total); - taosArrayClear(tr->added); - taosArrayClear(tr->deled); + taosArrayClear(tr->add); + taosArrayClear(tr->del); } -void sIdxTempResultDestroy(SIdxTempResult *tr) { +void idxTRsltDestroy(SIdxTRslt *tr) { if (tr == NULL) { return; } taosArrayDestroy(tr->total); - taosArrayDestroy(tr->added); - taosArrayDestroy(tr->deled); + taosArrayDestroy(tr->add); + taosArrayDestroy(tr->del); } -void sIdxTempResultMergeTo(SArray *result, SIdxTempResult *tr) { +void idxTRsltMergeTo(SIdxTRslt *tr, SArray *result) { taosArraySort(tr->total, uidCompare); - taosArraySort(tr->added, uidCompare); - taosArraySort(tr->deled, uidCompare); - - SArray *arrs = taosArrayInit(2, sizeof(void *)); - taosArrayPush(arrs, &tr->total); - taosArrayPush(arrs, &tr->added); - - iUnion(arrs, result); - taosArrayDestroy(arrs); - - iExcept(result, tr->deled); + taosArraySort(tr->add, uidCompare); + taosArraySort(tr->del, uidCompare); + + if (taosArrayGetSize(tr->total) == 0 || taosArrayGetSize(tr->add) == 0) { + SArray *t = taosArrayGetSize(tr->total) == 0 ? tr->add : tr->total; + taosArrayAddAll(result, t); + } else { + SArray *arrs = taosArrayInit(2, sizeof(void *)); + taosArrayPush(arrs, &tr->total); + taosArrayPush(arrs, &tr->add); + iUnion(arrs, result); + taosArrayDestroy(arrs); + } + iExcept(result, tr->del); } diff --git a/source/libs/index/test/CMakeLists.txt b/source/libs/index/test/CMakeLists.txt index c0b47e74c6b0561141806dae8ce14ab4d632ec8e..2835084a81b87e358916c20ce0e6c70cf6884021 100644 --- a/source/libs/index/test/CMakeLists.txt +++ b/source/libs/index/test/CMakeLists.txt @@ -1,74 +1,74 @@ -add_executable(indexTest "") -add_executable(fstTest "") -add_executable(fstUT "") -add_executable(UtilUT "") -add_executable(jsonUT "") +add_executable(idxTest "") +add_executable(idxFstTest "") +add_executable(idxFstUT "") +add_executable(idxUtilUT "") +add_executable(idxJsonUT "") -target_sources(indexTest +target_sources(idxTest PRIVATE "indexTests.cc" ) -target_sources(fstTest +target_sources(idxFstTest PRIVATE "fstTest.cc" ) -target_sources(fstUT +target_sources(idxFstUT PRIVATE "fstUT.cc" ) -target_sources(UtilUT +target_sources(idxUtilUT PRIVATE "utilUT.cc" ) -target_sources(jsonUT +target_sources(idxJsonUT PRIVATE "jsonUT.cc" ) -target_include_directories ( indexTest +target_include_directories (idxTest PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories ( fstTest +target_include_directories (idxFstTest PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories ( fstUT +target_include_directories (idxFstUT PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories ( UtilUT +target_include_directories (idxUtilUT PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories (jsonUT +target_include_directories (idxJsonUT PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_link_libraries (indexTest +target_link_libraries (idxTest os util common gtest_main index ) -target_link_libraries (fstTest +target_link_libraries (idxFstTest os util common gtest_main index ) -target_link_libraries (fstUT +target_link_libraries (idxFstUT os util common @@ -76,7 +76,7 @@ target_link_libraries (fstUT index ) -target_link_libraries (UtilUT +target_link_libraries (idxUtilUT os util common @@ -84,7 +84,7 @@ target_link_libraries (UtilUT index ) -target_link_libraries (jsonUT +target_link_libraries (idxJsonUT os util common @@ -92,19 +92,21 @@ target_link_libraries (jsonUT index ) -add_test( - NAME idxtest - COMMAND indexTest -) -add_test( - NAME idxJsonUT - COMMAND jsonUT -) +if(NOT TD_WINDOWS) + add_test( + NAME idxtest + COMMAND idxTest + ) + add_test( + NAME idxJsonUT + COMMAND idxJsonUT + ) +endif(NOT TD_WINDOWS) add_test( NAME idxUtilUT - COMMAND UtilUT + COMMAND idxUtilUT ) add_test( NAME idxFstUT - COMMAND fstUT + COMMAND idxFstUT ) diff --git a/source/libs/index/test/indexTests.cc b/source/libs/index/test/indexTests.cc index f848cee86b4af0376af61640eb01a07eb1c22371..74a30c3387ea3c3133e4e4f82ffd3dd8dc38f540 100644 --- a/source/libs/index/test/indexTests.cc +++ b/source/libs/index/test/indexTests.cc @@ -272,14 +272,14 @@ void validateFst() { } delete m; } -static std::string logDir = "/tmp/log"; +static std::string logDir = TD_TMP_DIR_PATH "log"; static void initLog() { const char* defaultLogFileNamePrefix = "taoslog"; const int32_t maxLogFileNum = 10; tsAsyncLog = 0; - sDebugFlag = 143; + idxDebugFlag = 143; strcpy(tsLogDir, logDir.c_str()); taosRemoveDir(tsLogDir); taosMkDir(tsLogDir); @@ -387,7 +387,7 @@ class TFileObj { std::string path(path_); int colId = 2; char buf[64] = {0}; - sprintf(buf, "%" PRIu64 "-%d-%d.tindex", header.suid, colId_, header.version); + sprintf(buf, "%" PRIu64 "-%d-%" PRId64 ".tindex", header.suid, colId_, header.version); path.append("/").append(buf); fileName_ = path; @@ -411,12 +411,12 @@ class TFileObj { // // } - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); int ret = tfileReaderSearch(reader_, query, tr); - sIdxTempResultMergeTo(result, tr); - sIdxTempResultDestroy(tr); + idxTRsltMergeTo(tr, result); + idxTRsltDestroy(tr); return ret; } ~TFileObj() { @@ -531,11 +531,11 @@ class CacheObj { indexCacheDebug(cache); } int Get(SIndexTermQuery* query, int16_t colId, int32_t version, SArray* result, STermValueType* s) { - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); int ret = indexCacheSearch(cache, query, tr, s); - sIdxTempResultMergeTo(result, tr); - sIdxTempResultDestroy(tr); + idxTRsltMergeTo(tr, result); + idxTRsltDestroy(tr); if (ret != 0) { std::cout << "failed to get from cache:" << ret << std::endl; @@ -794,10 +794,10 @@ class IndexObj { } int sz = taosArrayGetSize(result); indexMultiTermQueryDestroy(mq); - taosArrayDestroy(result); assert(sz == 1); uint64_t* ret = (uint64_t*)taosArrayGet(result, 0); assert(val = *ret); + taosArrayDestroy(result); return sz; } @@ -916,7 +916,7 @@ TEST_F(IndexEnv2, testIndexOpen) { } } TEST_F(IndexEnv2, testEmptyIndexOpen) { - std::string path = "/tmp/test"; + std::string path = TD_TMP_DIR_PATH "test"; if (index->Init(path) != 0) { std::cout << "failed to init index" << std::endl; exit(1); @@ -953,8 +953,8 @@ TEST_F(IndexEnv2, testIndex_TrigeFlush) { } static void single_write_and_search(IndexObj* idx) { - int target = idx->SearchOne("tag1", "Hello"); - target = idx->SearchOne("tag2", "Test"); + // int target = idx->SearchOne("tag1", "Hello"); + // target = idx->SearchOne("tag2", "Test"); } static void multi_write_and_search(IndexObj* idx) { idx->PutOne("tag1", "Hello"); diff --git a/source/libs/index/test/index_executor_tests.cpp b/source/libs/index/test/index_executor_tests.cpp index b0c2a983d1b5f60b50e4f5734a8c99fb3729d80e..b88ffe5b8bdb2058a66d1e56020206643c246e42 100644 --- a/source/libs/index/test/index_executor_tests.cpp +++ b/source/libs/index/test/index_executor_tests.cpp @@ -24,11 +24,7 @@ #pragma GCC diagnostic ignored "-Wunused-variable" #pragma GCC diagnostic ignored "-Wsign-compare" -#include "executor.h" -#include "executorimpl.h" -#include "indexoperator.h" -#include "os.h" - +#include "index.h" #include "stub.h" #include "taos.h" #include "tcompare.h" diff --git a/source/libs/index/test/jsonUT.cc b/source/libs/index/test/jsonUT.cc index 8a837c5700da2b8c70d083d5f282933844091673..48ce8839c459bb2c523d710f1804346f2bede33a 100644 --- a/source/libs/index/test/jsonUT.cc +++ b/source/libs/index/test/jsonUT.cc @@ -24,7 +24,7 @@ static void initLog() { const int32_t maxLogFileNum = 10; tsAsyncLog = 0; - sDebugFlag = 143; + idxDebugFlag = 143; strcpy(tsLogDir, logDir.c_str()); taosRemoveDir(tsLogDir); taosMkDir(tsLogDir); @@ -51,6 +51,7 @@ class JsonEnv : public ::testing::Test { tIndexJsonClose(index); indexOptsDestroy(opts); printf("destory\n"); + taosMsleep(1000); } SIndexJsonOpts* opts; SIndexJson* index; diff --git a/source/libs/index/test/utilUT.cc b/source/libs/index/test/utilUT.cc index 18a2b457c41c2cd66f20a01f3690d0af4fe69d3d..4a30160244d82b8c00b3e7b031d6fd492057ec21 100644 --- a/source/libs/index/test/utilUT.cc +++ b/source/libs/index/test/utilUT.cc @@ -226,6 +226,22 @@ TEST_F(UtilEnv, 04union) { iUnion(src, rslt); assert(taosArrayGetSize(rslt) == 12); } +TEST_F(UtilEnv, 05unionExcept) { + clearSourceArray(src); + clearFinalArray(rslt); + + uint64_t arr2[] = {7}; + SArray * f = (SArray *)taosArrayGetP(src, 1); + for (int i = 0; i < sizeof(arr2) / sizeof(arr2[0]); i++) { + taosArrayPush(f, &arr2[i]); + } + + iUnion(src, rslt); + + SArray *ept = taosArrayInit(0, sizeof(uint64_t)); + iExcept(rslt, ept); + EXPECT_EQ(taosArrayGetSize(rslt), 1); +} TEST_F(UtilEnv, 01Except) { SArray *total = taosArrayInit(4, sizeof(uint64_t)); { @@ -308,16 +324,36 @@ TEST_F(UtilEnv, 01Except) { ASSERT_EQ(*(uint64_t *)taosArrayGet(total, 1), 100); } TEST_F(UtilEnv, testFill) { - for (int i = 0; i < 10000000; i++) { + for (int i = 0; i < 1000000; i++) { int64_t val = i; char buf[65] = {0}; indexInt2str(val, buf, 1); EXPECT_EQ(val, taosStr2int64(buf)); } - for (int i = 0; i < 10000000; i++) { + for (int i = 0; i < 1000000; i++) { int64_t val = 0 - i; char buf[65] = {0}; indexInt2str(val, buf, -1); EXPECT_EQ(val, taosStr2int64(buf)); } } +TEST_F(UtilEnv, TempResult) { + SIdxTRslt *relt = idxTRsltCreate(); + + SArray *f = taosArrayInit(0, sizeof(uint64_t)); + + uint64_t val = UINT64_MAX - 1; + taosArrayPush(relt->add, &val); + idxTRsltMergeTo(relt, f); + EXPECT_EQ(taosArrayGetSize(f), 1); +} +TEST_F(UtilEnv, TempResultExcept) { + SIdxTRslt *relt = idxTRsltCreate(); + + SArray *f = taosArrayInit(0, sizeof(uint64_t)); + + uint64_t val = UINT64_MAX; + taosArrayPush(relt->add, &val); + idxTRsltMergeTo(relt, f); + EXPECT_EQ(taosArrayGetSize(f), 1); +} diff --git a/source/libs/monitor/src/monMsg.c b/source/libs/monitor/src/monMsg.c index e106cbd428b48f7751785b019e21f8c5e547969c..944a7b54750c9e8850d0fe124f36561c54a6630e 100644 --- a/source/libs/monitor/src/monMsg.c +++ b/source/libs/monitor/src/monMsg.c @@ -556,4 +556,50 @@ int32_t tDeserializeSMonMloadInfo(void *buf, int32_t bufLen, SMonMloadInfo *pInf tDecoderClear(&decoder); return 0; -} \ No newline at end of file +} + + +int32_t tSerializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo) { + SEncoder encoder = {0}; + tEncoderInit(&encoder, buf, bufLen); + + if (tStartEncode(&encoder) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->numOfProcessedQuery) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->numOfProcessedCQuery) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->numOfProcessedFetch) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->numOfProcessedDrop) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->numOfProcessedHb) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->cacheDataSize) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->numOfQueryInQueue) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->numOfFetchInQueue) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->timeInQueryQueue) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->timeInFetchQueue) < 0) return -1; + tEndEncode(&encoder); + + int32_t tlen = encoder.pos; + tEncoderClear(&encoder); + return tlen; +} + +int32_t tDeserializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo) { + SDecoder decoder = {0}; + tDecoderInit(&decoder, buf, bufLen); + + if (tStartDecode(&decoder) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->numOfProcessedQuery) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->numOfProcessedCQuery) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->numOfProcessedFetch) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->numOfProcessedDrop) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->numOfProcessedHb) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->cacheDataSize) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->numOfQueryInQueue) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->numOfFetchInQueue) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->timeInQueryQueue) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->timeInFetchQueue) < 0) return -1; + tEndDecode(&decoder); + + tDecoderClear(&decoder); + return 0; +} + + diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 68d3741b482105d02d4751847f01f3fbdc32986f..35b4da7013f2c9cce51c1382368cc38cd9aafa93 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -142,14 +142,16 @@ static SNode* valueNodeCopy(const SValueNode* pSrc, SValueNode* pDst) { break; case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_VARCHAR: - case TSDB_DATA_TYPE_VARBINARY: - pDst->datum.p = taosMemoryMalloc(pSrc->node.resType.bytes + VARSTR_HEADER_SIZE + 1); + case TSDB_DATA_TYPE_VARBINARY: { + int32_t len = varDataTLen(pSrc->datum.p) + 1; + pDst->datum.p = taosMemoryCalloc(1, len); if (NULL == pDst->datum.p) { nodesDestroyNode(pDst); return NULL; } - memcpy(pDst->datum.p, pSrc->datum.p, pSrc->node.resType.bytes + VARSTR_HEADER_SIZE + 1); + memcpy(pDst->datum.p, pSrc->datum.p, len); break; + } case TSDB_DATA_TYPE_JSON: case TSDB_DATA_TYPE_DECIMAL: case TSDB_DATA_TYPE_BLOB: @@ -305,6 +307,7 @@ static SNode* logicNodeCopy(const SLogicNode* pSrc, SLogicNode* pDst) { CLONE_NODE_FIELD(pConditions); CLONE_NODE_LIST_FIELD(pChildren); COPY_SCALAR_FIELD(optimizedFlag); + COPY_SCALAR_FIELD(precision); return (SNode*)pDst; } @@ -328,6 +331,10 @@ static SNode* logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) { COPY_SCALAR_FIELD(intervalUnit); COPY_SCALAR_FIELD(slidingUnit); CLONE_NODE_FIELD(pTagCond); + COPY_SCALAR_FIELD(triggerType); + COPY_SCALAR_FIELD(watermark); + COPY_SCALAR_FIELD(tsColId); + COPY_SCALAR_FIELD(filesFactor); return (SNode*)pDst; } @@ -366,7 +373,14 @@ static SNode* logicVnodeModifCopy(const SVnodeModifLogicNode* pSrc, SVnodeModifL static SNode* logicExchangeCopy(const SExchangeLogicNode* pSrc, SExchangeLogicNode* pDst) { COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); COPY_SCALAR_FIELD(srcGroupId); - COPY_SCALAR_FIELD(precision); + return (SNode*)pDst; +} + +static SNode* logicMergeCopy(const SMergeLogicNode* pSrc, SMergeLogicNode* pDst) { + COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); + CLONE_NODE_LIST_FIELD(pMergeKeys); + COPY_SCALAR_FIELD(numOfChannels); + COPY_SCALAR_FIELD(srcGroupId); return (SNode*)pDst; } @@ -384,6 +398,8 @@ static SNode* logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* pD CLONE_NODE_FIELD(pStateExpr); COPY_SCALAR_FIELD(triggerType); COPY_SCALAR_FIELD(watermark); + COPY_SCALAR_FIELD(filesFactor); + COPY_SCALAR_FIELD(stmInterAlgo); return (SNode*)pDst; } @@ -529,6 +545,8 @@ SNodeptr nodesCloneNode(const SNodeptr pNode) { return logicVnodeModifCopy((const SVnodeModifLogicNode*)pNode, (SVnodeModifLogicNode*)pDst); case QUERY_NODE_LOGIC_PLAN_EXCHANGE: return logicExchangeCopy((const SExchangeLogicNode*)pNode, (SExchangeLogicNode*)pDst); + case QUERY_NODE_LOGIC_PLAN_MERGE: + return logicMergeCopy((const SMergeLogicNode*)pNode, (SMergeLogicNode*)pDst); case QUERY_NODE_LOGIC_PLAN_WINDOW: return logicWindowCopy((const SWindowLogicNode*)pNode, (SWindowLogicNode*)pDst); case QUERY_NODE_LOGIC_PLAN_FILL: diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 8887b9841ac8dc907d3a9a71360db20674278cfd..54754ace51558ed23baf56dd9155c5ca1a41dbe3 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -190,6 +190,8 @@ const char* nodesNodeName(ENodeType type) { return "LogicVnodeModif"; case QUERY_NODE_LOGIC_PLAN_EXCHANGE: return "LogicExchange"; + case QUERY_NODE_LOGIC_PLAN_MERGE: + return "LogicMerge"; case QUERY_NODE_LOGIC_PLAN_WINDOW: return "LogicWindow"; case QUERY_NODE_LOGIC_PLAN_FILL: @@ -220,12 +222,18 @@ const char* nodesNodeName(ENodeType type) { return "PhysiAgg"; case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE: return "PhysiExchange"; + case QUERY_NODE_PHYSICAL_PLAN_MERGE: + return "PhysiMerge"; case QUERY_NODE_PHYSICAL_PLAN_SORT: return "PhysiSort"; case QUERY_NODE_PHYSICAL_PLAN_INTERVAL: return "PhysiInterval"; case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL: return "PhysiStreamInterval"; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL: + return "PhysiStreamFinalInterval"; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL: + return "PhysiStreamSemiInterval"; case QUERY_NODE_PHYSICAL_PLAN_FILL: return "PhysiFill"; case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: @@ -596,7 +604,6 @@ static int32_t jsonToLogicProjectNode(const SJson* pJson, void* pObj) { } static const char* jkExchangeLogicPlanSrcGroupId = "SrcGroupId"; -static const char* jkExchangeLogicPlanSrcPrecision = "Precision"; static int32_t logicExchangeNodeToJson(const void* pObj, SJson* pJson) { const SExchangeLogicNode* pNode = (const SExchangeLogicNode*)pObj; @@ -605,9 +612,6 @@ static int32_t logicExchangeNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkExchangeLogicPlanSrcGroupId, pNode->srcGroupId); } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkExchangeLogicPlanSrcPrecision, pNode->precision); - } return code; } @@ -619,8 +623,144 @@ static int32_t jsonToLogicExchangeNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetIntValue(pJson, jkExchangeLogicPlanSrcGroupId, &pNode->srcGroupId); } + + return code; +} + +static const char* jkMergeLogicPlanMergeKeys = "MergeKeys"; +static const char* jkMergeLogicPlanNumOfChannels = "NumOfChannels"; +static const char* jkMergeLogicPlanSrcGroupId = "SrcGroupId"; + +static int32_t logicMergeNodeToJson(const void* pObj, SJson* pJson) { + const SMergeLogicNode* pNode = (const SMergeLogicNode*)pObj; + + int32_t code = logicPlanNodeToJson(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkMergeLogicPlanMergeKeys, pNode->pMergeKeys); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkMergeLogicPlanNumOfChannels, pNode->numOfChannels); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkMergeLogicPlanSrcGroupId, pNode->srcGroupId); + } + + return code; +} + +static int32_t jsonToLogicMergeNode(const SJson* pJson, void* pObj) { + SMergeLogicNode* pNode = (SMergeLogicNode*)pObj; + + int32_t code = jsonToLogicPlanNode(pJson, pObj); + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkMergeLogicPlanMergeKeys, &pNode->pMergeKeys); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkMergeLogicPlanNumOfChannels, &pNode->numOfChannels); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkMergeLogicPlanSrcGroupId, &pNode->srcGroupId); + } + + return code; +} + +static const char* jkWindowLogicPlanWinType = "WinType"; +static const char* jkWindowLogicPlanFuncs = "Funcs"; +static const char* jkWindowLogicPlanInterval = "Interval"; +static const char* jkWindowLogicPlanOffset = "Offset"; +static const char* jkWindowLogicPlanSliding = "Sliding"; +static const char* jkWindowLogicPlanIntervalUnit = "IntervalUnit"; +static const char* jkWindowLogicPlanSlidingUnit = "SlidingUnit"; +static const char* jkWindowLogicPlanSessionGap = "SessionGap"; +static const char* jkWindowLogicPlanTspk = "Tspk"; +static const char* jkWindowLogicPlanStateExpr = "StateExpr"; +static const char* jkWindowLogicPlanTriggerType = "TriggerType"; +static const char* jkWindowLogicPlanWatermark = "Watermark"; + +static int32_t logicWindowNodeToJson(const void* pObj, SJson* pJson) { + const SWindowLogicNode* pNode = (const SWindowLogicNode*)pObj; + + int32_t code = logicPlanNodeToJson(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanWinType, pNode->winType); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkWindowLogicPlanFuncs, pNode->pFuncs); + } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetUTinyIntValue(pJson, jkExchangeLogicPlanSrcPrecision, &pNode->precision); + code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanInterval, pNode->interval); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanOffset, pNode->offset); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanSliding, pNode->sliding); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanIntervalUnit, pNode->intervalUnit); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanSlidingUnit, pNode->slidingUnit); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanSessionGap, pNode->sessionGap); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkWindowLogicPlanTspk, nodeToJson, pNode->pTspk); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkWindowLogicPlanStateExpr, nodeToJson, pNode->pStateExpr); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanTriggerType, pNode->triggerType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkWindowLogicPlanWatermark, pNode->watermark); + } + + return code; +} + +static int32_t jsonToLogicWindowNode(const SJson* pJson, void* pObj) { + SWindowLogicNode* pNode = (SWindowLogicNode*)pObj; + + int32_t code = jsonToLogicPlanNode(pJson, pObj); + if (TSDB_CODE_SUCCESS == code) { + tjsonGetNumberValue(pJson, jkWindowLogicPlanWinType, pNode->winType, code); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkWindowLogicPlanFuncs, &pNode->pFuncs); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBigIntValue(pJson, jkWindowLogicPlanInterval, &pNode->interval); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBigIntValue(pJson, jkWindowLogicPlanOffset, &pNode->offset); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBigIntValue(pJson, jkWindowLogicPlanSliding, &pNode->sliding); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkWindowLogicPlanIntervalUnit, &pNode->intervalUnit); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkWindowLogicPlanSlidingUnit, &pNode->slidingUnit); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBigIntValue(pJson, jkWindowLogicPlanSessionGap, &pNode->sessionGap); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkWindowLogicPlanTspk, &pNode->pTspk); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkWindowLogicPlanStateExpr, &pNode->pStateExpr); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkWindowLogicPlanTriggerType, &pNode->triggerType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBigIntValue(pJson, jkWindowLogicPlanWatermark, &pNode->watermark); } return code; @@ -1130,6 +1270,10 @@ static const char* jkTableScanPhysiPlanOffset = "Offset"; static const char* jkTableScanPhysiPlanSliding = "Sliding"; static const char* jkTableScanPhysiPlanIntervalUnit = "intervalUnit"; static const char* jkTableScanPhysiPlanSlidingUnit = "slidingUnit"; +static const char* jkTableScanPhysiPlanTriggerType = "triggerType"; +static const char* jkTableScanPhysiPlanWatermark = "watermark"; +static const char* jkTableScanPhysiPlanTsColId = "tsColId"; +static const char* jkTableScanPhysiPlanFilesFactor = "FilesFactor"; static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj; @@ -1171,6 +1315,18 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTriggerType, pNode->triggerType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanWatermark, pNode->watermark); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddDoubleToObject(pJson, jkTableScanPhysiPlanFilesFactor, pNode->filesFactor); + } return code; } @@ -1221,7 +1377,18 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) { tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit, code); ; } - + if (TSDB_CODE_SUCCESS == code) { + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTriggerType, pNode->triggerType, code); + } + if (TSDB_CODE_SUCCESS == code) { + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanWatermark, pNode->watermark, code); + } + if (TSDB_CODE_SUCCESS == code) { + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId, code); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetDoubleValue(pJson, jkTableScanPhysiPlanFilesFactor, &pNode->filesFactor); + } return code; } @@ -1432,6 +1599,44 @@ static int32_t jsonToPhysiExchangeNode(const SJson* pJson, void* pObj) { return code; } +static const char* jkMergePhysiPlanMergeKeys = "MergeKeys"; +static const char* jkMergePhysiPlanNumOfChannels = "NumOfChannels"; +static const char* jkMergePhysiPlanSrcGroupId = "SrcGroupId"; + +static int32_t physiMergeNodeToJson(const void* pObj, SJson* pJson) { + const SMergePhysiNode* pNode = (const SMergePhysiNode*)pObj; + + int32_t code = physicPlanNodeToJson(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkMergePhysiPlanMergeKeys, pNode->pMergeKeys); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkMergePhysiPlanNumOfChannels, pNode->numOfChannels); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkMergePhysiPlanSrcGroupId, pNode->srcGroupId); + } + + return code; +} + +static int32_t jsonToPhysiMergeNode(const SJson* pJson, void* pObj) { + SMergePhysiNode* pNode = (SMergePhysiNode*)pObj; + + int32_t code = jsonToPhysicPlanNode(pJson, pObj); + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkMergePhysiPlanMergeKeys, &pNode->pMergeKeys); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkMergePhysiPlanNumOfChannels, &pNode->numOfChannels); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkMergePhysiPlanSrcGroupId, &pNode->srcGroupId); + } + + return code; +} + static const char* jkSortPhysiPlanExprs = "Exprs"; static const char* jkSortPhysiPlanSortKeys = "SortKeys"; static const char* jkSortPhysiPlanTargets = "Targets"; @@ -1475,6 +1680,7 @@ static const char* jkWindowPhysiPlanFuncs = "Funcs"; static const char* jkWindowPhysiPlanTsPk = "TsPk"; static const char* jkWindowPhysiPlanTriggerType = "TriggerType"; static const char* jkWindowPhysiPlanWatermark = "Watermark"; +static const char* jkWindowPhysiPlanFilesFactor = "FilesFactor"; static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) { const SWinodwPhysiNode* pNode = (const SWinodwPhysiNode*)pObj; @@ -1495,6 +1701,9 @@ static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanWatermark, pNode->watermark); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddDoubleToObject(pJson, jkWindowPhysiPlanFilesFactor, pNode->filesFactor); + } return code; } @@ -1520,6 +1729,9 @@ static int32_t jsonToPhysiWindowNode(const SJson* pJson, void* pObj) { tjsonGetNumberValue(pJson, jkWindowPhysiPlanWatermark, pNode->watermark, code); ; } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetDoubleValue(pJson, jkWindowPhysiPlanFilesFactor, &pNode->filesFactor); + } return code; } @@ -2534,7 +2746,7 @@ static const char* jkSessionWindowTsPrimaryKey = "TsPrimaryKey"; static const char* jkSessionWindowGap = "Gap"; static int32_t sessionWindowNodeToJson(const void* pObj, SJson* pJson) { - const SSessionWindowNode * pNode = (const SSessionWindowNode*)pObj; + const SSessionWindowNode* pNode = (const SSessionWindowNode*)pObj; int32_t code = tjsonAddObject(pJson, jkSessionWindowTsPrimaryKey, nodeToJson, pNode->pCol); if (TSDB_CODE_SUCCESS == code) { @@ -2546,9 +2758,9 @@ static int32_t sessionWindowNodeToJson(const void* pObj, SJson* pJson) { static int32_t jsonToSessionWindowNode(const SJson* pJson, void* pObj) { SSessionWindowNode* pNode = (SSessionWindowNode*)pObj; - int32_t code = jsonToNodeObject(pJson, jkSessionWindowTsPrimaryKey, (SNode **)&pNode->pCol); + int32_t code = jsonToNodeObject(pJson, jkSessionWindowTsPrimaryKey, (SNode**)&pNode->pCol); if (TSDB_CODE_SUCCESS == code) { - code = jsonToNodeObject(pJson, jkSessionWindowGap, (SNode **)&pNode->pGap); + code = jsonToNodeObject(pJson, jkSessionWindowGap, (SNode**)&pNode->pGap); } return code; } @@ -2775,6 +2987,150 @@ static int32_t jsonToDownstreamSourceNode(const SJson* pJson, void* pObj) { return code; } +static const char* jkDatabaseOptionsBuffer = "Buffer"; +static const char* jkDatabaseOptionsCachelast = "Cachelast"; +static const char* jkDatabaseOptionsCompressionLevel = "CompressionLevel"; +static const char* jkDatabaseOptionsDaysPerFileNode = "DaysPerFileNode"; +static const char* jkDatabaseOptionsDaysPerFile = "DaysPerFile"; +static const char* jkDatabaseOptionsFsyncPeriod = "FsyncPeriod"; +static const char* jkDatabaseOptionsMaxRowsPerBlock = "MaxRowsPerBlock"; +static const char* jkDatabaseOptionsMinRowsPerBlock = "MinRowsPerBlock"; +static const char* jkDatabaseOptionsKeep = "Keep"; +static const char* jkDatabaseOptionsPages = "Pages"; +static const char* jkDatabaseOptionsPagesize = "Pagesize"; +static const char* jkDatabaseOptionsPrecision = "Precision"; +static const char* jkDatabaseOptionsReplica = "Replica"; +static const char* jkDatabaseOptionsStrict = "Strict"; +static const char* jkDatabaseOptionsWalLevel = "WalLevel"; +static const char* jkDatabaseOptionsNumOfVgroups = "NumOfVgroups"; +static const char* jkDatabaseOptionsSingleStable = "SingleStable"; +static const char* jkDatabaseOptionsRetentions = "Retentions"; +static const char* jkDatabaseOptionsSchemaless = "Schemaless"; + +static int32_t databaseOptionsToJson(const void* pObj, SJson* pJson) { + const SDatabaseOptions* pNode = (const SDatabaseOptions*)pObj; + + int32_t code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsBuffer, pNode->buffer); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCachelast, pNode->cachelast); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCompressionLevel, pNode->compressionLevel); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkDatabaseOptionsDaysPerFileNode, nodeToJson, pNode->pDaysPerFile); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsDaysPerFile, pNode->daysPerFile); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsFsyncPeriod, pNode->fsyncPeriod); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsMaxRowsPerBlock, pNode->maxRowsPerBlock); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsMinRowsPerBlock, pNode->minRowsPerBlock); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkDatabaseOptionsKeep, pNode->pKeep); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsPages, pNode->pages); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsPagesize, pNode->pagesize); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkDatabaseOptionsPrecision, pNode->precisionStr); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsReplica, pNode->replica); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsStrict, pNode->strict); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsWalLevel, pNode->walLevel); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsNumOfVgroups, pNode->numOfVgroups); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsSingleStable, pNode->singleStable); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkDatabaseOptionsRetentions, pNode->pRetentions); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsSchemaless, pNode->schemaless); + } + + return code; +} + +static int32_t jsonToDatabaseOptions(const SJson* pJson, void* pObj) { + SDatabaseOptions* pNode = (SDatabaseOptions*)pObj; + + int32_t code = tjsonGetIntValue(pJson, jkDatabaseOptionsBuffer, &pNode->buffer); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCachelast, &pNode->cachelast); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCompressionLevel, &pNode->compressionLevel); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkDatabaseOptionsDaysPerFileNode, (SNode**)&pNode->pDaysPerFile); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsDaysPerFile, &pNode->daysPerFile); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsFsyncPeriod, &pNode->fsyncPeriod); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsMaxRowsPerBlock, &pNode->maxRowsPerBlock); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsMinRowsPerBlock, &pNode->minRowsPerBlock); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkDatabaseOptionsKeep, &pNode->pKeep); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsPages, &pNode->pages); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsPagesize, &pNode->pagesize); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkDatabaseOptionsPrecision, pNode->precisionStr); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsReplica, &pNode->replica); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsStrict, &pNode->strict); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsWalLevel, &pNode->walLevel); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsNumOfVgroups, &pNode->numOfVgroups); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsSingleStable, &pNode->singleStable); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkDatabaseOptionsRetentions, &pNode->pRetentions); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsSchemaless, &pNode->schemaless); + } + + return code; +} + static const char* jkDataBlockDescDataBlockId = "DataBlockId"; static const char* jkDataBlockDescSlots = "Slots"; static const char* jkDataBlockTotalRowSize = "TotalRowSize"; @@ -2977,6 +3333,130 @@ static int32_t jsonToSelectStmt(const SJson* pJson, void* pObj) { return code; } +static const char* jkAlterDatabaseStmtDbName = "DbName"; +static const char* jkAlterDatabaseStmtOptions = "Options"; + +static int32_t alterDatabaseStmtToJson(const void* pObj, SJson* pJson) { + const SAlterDatabaseStmt* pNode = (const SAlterDatabaseStmt*)pObj; + + int32_t code = tjsonAddStringToObject(pJson, jkAlterDatabaseStmtDbName, pNode->dbName); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkAlterDatabaseStmtOptions, nodeToJson, pNode->pOptions); + } + + return code; +} + +static int32_t jsonToAlterDatabaseStmt(const SJson* pJson, void* pObj) { + SAlterDatabaseStmt* pNode = (SAlterDatabaseStmt*)pObj; + + int32_t code = tjsonGetStringValue(pJson, jkAlterDatabaseStmtDbName, pNode->dbName); + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkAlterDatabaseStmtOptions, (SNode**)&pNode->pOptions); + } + + return code; +} + +static const char* jkAlterTableStmtDbName = "DbName"; +static const char* jkAlterTableStmtTableName = "TableName"; +static const char* jkAlterTableStmtAlterType = "AlterType"; +static const char* jkAlterTableStmtColName = "ColName"; +static const char* jkAlterTableStmtNewColName = "NewColName"; +static const char* jkAlterTableStmtOptions = "Options"; +static const char* jkAlterTableStmtNewDataType = "NewDataType"; +static const char* jkAlterTableStmtNewTagVal = "NewTagVal"; + +static int32_t alterTableStmtToJson(const void* pObj, SJson* pJson) { + const SAlterTableStmt* pNode = (const SAlterTableStmt*)pObj; + + int32_t code = tjsonAddStringToObject(pJson, jkAlterTableStmtDbName, pNode->dbName); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterTableStmtTableName, pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkAlterTableStmtAlterType, pNode->alterType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterTableStmtColName, pNode->colName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterTableStmtNewColName, pNode->newColName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkAlterTableStmtOptions, nodeToJson, pNode->pOptions); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkAlterTableStmtNewDataType, dataTypeToJson, &pNode->dataType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkAlterTableStmtOptions, nodeToJson, pNode->pVal); + } + + return code; +} + +static int32_t jsonToAlterTableStmt(const SJson* pJson, void* pObj) { + SAlterTableStmt* pNode = (SAlterTableStmt*)pObj; + + int32_t code = tjsonGetStringValue(pJson, jkAlterTableStmtDbName, pNode->dbName); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterTableStmtTableName, pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkAlterTableStmtAlterType, &pNode->alterType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterTableStmtColName, pNode->colName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterTableStmtNewColName, pNode->newColName); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkAlterTableStmtOptions, (SNode**)&pNode->pOptions); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonToObject(pJson, jkAlterTableStmtNewDataType, jsonToDataType, &pNode->dataType); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkAlterTableStmtOptions, (SNode**)&pNode->pVal); + } + + return code; +} + +static const char* jkAlterDnodeStmtDnodeId = "DnodeId"; +static const char* jkAlterDnodeStmtConfig = "Config"; +static const char* jkAlterDnodeStmtValue = "Value"; + +static int32_t alterDnodeStmtToJson(const void* pObj, SJson* pJson) { + const SAlterDnodeStmt* pNode = (const SAlterDnodeStmt*)pObj; + + int32_t code = tjsonAddIntegerToObject(pJson, jkAlterDnodeStmtDnodeId, pNode->dnodeId); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterDnodeStmtConfig, pNode->config); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterDnodeStmtValue, pNode->value); + } + + return code; +} + +static int32_t jsonToAlterDnodeStmt(const SJson* pJson, void* pObj) { + SAlterDnodeStmt* pNode = (SAlterDnodeStmt*)pObj; + + int32_t code = tjsonGetIntValue(pJson, jkAlterDnodeStmtDnodeId, &pNode->dnodeId); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterDnodeStmtConfig, pNode->config); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterDnodeStmtValue, pNode->value); + } + + return code; +} + static const char* jkCreateTopicStmtTopicName = "TopicName"; static const char* jkCreateTopicStmtSubscribeDbName = "SubscribeDbName"; static const char* jkCreateTopicStmtIgnoreExists = "IgnoreExists"; @@ -2987,7 +3467,7 @@ static int32_t createTopicStmtToJson(const void* pObj, SJson* pJson) { int32_t code = tjsonAddStringToObject(pJson, jkCreateTopicStmtTopicName, pNode->topicName); if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddStringToObject(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subscribeDbName); + code = tjsonAddStringToObject(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subDbName); } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkCreateTopicStmtIgnoreExists, pNode->ignoreExists); @@ -3004,7 +3484,7 @@ static int32_t jsonToCreateTopicStmt(const SJson* pJson, void* pObj) { int32_t code = tjsonGetStringValue(pJson, jkCreateTopicStmtTopicName, pNode->topicName); if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetStringValue(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subscribeDbName); + code = tjsonGetStringValue(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subDbName); } if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkCreateTopicStmtIgnoreExists, &pNode->ignoreExists); @@ -3061,6 +3541,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { break; case QUERY_NODE_DOWNSTREAM_SOURCE: return downstreamSourceNodeToJson(pObj, pJson); + case QUERY_NODE_DATABASE_OPTIONS: + return databaseOptionsToJson(pObj, pJson); case QUERY_NODE_LEFT_VALUE: return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to serialize. case QUERY_NODE_SET_OPERATOR: @@ -3069,8 +3551,17 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { return selectStmtToJson(pObj, pJson); case QUERY_NODE_VNODE_MODIF_STMT: case QUERY_NODE_CREATE_DATABASE_STMT: + break; + case QUERY_NODE_ALTER_DATABASE_STMT: + return alterDatabaseStmtToJson(pObj, pJson); case QUERY_NODE_CREATE_TABLE_STMT: + break; + case QUERY_NODE_ALTER_TABLE_STMT: + return alterTableStmtToJson(pObj, pJson); case QUERY_NODE_USE_DATABASE_STMT: + break; + case QUERY_NODE_ALTER_DNODE_STMT: + return alterDnodeStmtToJson(pObj, pJson); case QUERY_NODE_SHOW_DATABASES_STMT: case QUERY_NODE_SHOW_TABLES_STMT: break; @@ -3088,6 +3579,10 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { break; case QUERY_NODE_LOGIC_PLAN_EXCHANGE: return logicExchangeNodeToJson(pObj, pJson); + case QUERY_NODE_LOGIC_PLAN_MERGE: + return logicMergeNodeToJson(pObj, pJson); + case QUERY_NODE_LOGIC_PLAN_WINDOW: + return logicWindowNodeToJson(pObj, pJson); case QUERY_NODE_LOGIC_PLAN_FILL: return logicFillNodeToJson(pObj, pJson); case QUERY_NODE_LOGIC_PLAN_SORT: @@ -3114,10 +3609,14 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { return physiAggNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE: return physiExchangeNodeToJson(pObj, pJson); + case QUERY_NODE_PHYSICAL_PLAN_MERGE: + return physiMergeNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_SORT: return physiSortNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_INTERVAL: case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL: return physiIntervalNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_FILL: return physiFillNodeToJson(pObj, pJson); @@ -3177,12 +3676,20 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { return jsonToSlotDescNode(pJson, pObj); case QUERY_NODE_DOWNSTREAM_SOURCE: return jsonToDownstreamSourceNode(pJson, pObj); + case QUERY_NODE_DATABASE_OPTIONS: + return jsonToDatabaseOptions(pJson, pObj); case QUERY_NODE_LEFT_VALUE: return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to deserialize. case QUERY_NODE_SET_OPERATOR: return jsonToSetOperator(pJson, pObj); case QUERY_NODE_SELECT_STMT: return jsonToSelectStmt(pJson, pObj); + case QUERY_NODE_ALTER_DATABASE_STMT: + return jsonToAlterDatabaseStmt(pJson, pObj); + case QUERY_NODE_ALTER_TABLE_STMT: + return jsonToAlterTableStmt(pJson, pObj); + case QUERY_NODE_ALTER_DNODE_STMT: + return jsonToAlterDnodeStmt(pJson, pObj); case QUERY_NODE_CREATE_TOPIC_STMT: return jsonToCreateTopicStmt(pJson, pObj); case QUERY_NODE_LOGIC_PLAN_SCAN: @@ -3191,6 +3698,10 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { return jsonToLogicProjectNode(pJson, pObj); case QUERY_NODE_LOGIC_PLAN_EXCHANGE: return jsonToLogicExchangeNode(pJson, pObj); + case QUERY_NODE_LOGIC_PLAN_MERGE: + return jsonToLogicMergeNode(pJson, pObj); + case QUERY_NODE_LOGIC_PLAN_WINDOW: + return jsonToLogicWindowNode(pJson, pObj); case QUERY_NODE_LOGIC_PLAN_FILL: return jsonToLogicFillNode(pJson, pObj); case QUERY_NODE_LOGIC_PLAN_SORT: @@ -3217,10 +3728,14 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { return jsonToPhysiAggNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE: return jsonToPhysiExchangeNode(pJson, pObj); + case QUERY_NODE_PHYSICAL_PLAN_MERGE: + return jsonToPhysiMergeNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_SORT: return jsonToPhysiSortNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_INTERVAL: case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL: return jsonToPhysiIntervalNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_FILL: return jsonToPhysiFillNode(pJson, pObj); diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index e28844f2e16f07c57232b073f0052411d60a2d0f..76f15afc8e050ee57b4bc5b774f0fd1e57338972 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -86,8 +86,6 @@ SNodeptr nodesMakeNode(ENodeType type) { return makeNode(type, sizeof(SExplainOptions)); case QUERY_NODE_STREAM_OPTIONS: return makeNode(type, sizeof(SStreamOptions)); - case QUERY_NODE_TOPIC_OPTIONS: - return makeNode(type, sizeof(STopicOptions)); case QUERY_NODE_LEFT_VALUE: return makeNode(type, sizeof(SLeftValueNode)); case QUERY_NODE_SET_OPERATOR: @@ -222,6 +220,8 @@ SNodeptr nodesMakeNode(ENodeType type) { return makeNode(type, sizeof(SVnodeModifLogicNode)); case QUERY_NODE_LOGIC_PLAN_EXCHANGE: return makeNode(type, sizeof(SExchangeLogicNode)); + case QUERY_NODE_LOGIC_PLAN_MERGE: + return makeNode(type, sizeof(SMergeLogicNode)); case QUERY_NODE_LOGIC_PLAN_WINDOW: return makeNode(type, sizeof(SWindowLogicNode)); case QUERY_NODE_LOGIC_PLAN_FILL: @@ -252,12 +252,18 @@ SNodeptr nodesMakeNode(ENodeType type) { return makeNode(type, sizeof(SAggPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE: return makeNode(type, sizeof(SExchangePhysiNode)); + case QUERY_NODE_PHYSICAL_PLAN_MERGE: + return makeNode(type, sizeof(SMergePhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_SORT: return makeNode(type, sizeof(SSortPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_INTERVAL: return makeNode(type, sizeof(SIntervalPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL: return makeNode(type, sizeof(SStreamIntervalPhysiNode)); + case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL: + return makeNode(type, sizeof(SStreamFinalIntervalPhysiNode)); + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL: + return makeNode(type, sizeof(SStreamSemiIntervalPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_FILL: return makeNode(type, sizeof(SFillPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW: diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index a1c304118bfcdc5078bf0a19b73a8bde17e3c0cf..7dd0ef2616bf3fda27192fa7099906348753c163 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -59,7 +59,6 @@ typedef enum EDatabaseOptionType { typedef enum ETableOptionType { TABLE_OPTION_COMMENT = 1, - TABLE_OPTION_DELAY, TABLE_OPTION_FILE_FACTOR, TABLE_OPTION_ROLLUP, TABLE_OPTION_TTL, @@ -168,7 +167,7 @@ SNode* createCreateComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, co SNode* createDropComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pDnodeId); SNode* createTopicOptions(SAstCreateContext* pCxt); SNode* createCreateTopicStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pTopicName, SNode* pQuery, - const SToken* pSubscribeDbName, SNode* pOptions); + const SToken* pSubDbName, SNode* pRealTable); SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pTopicName); SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId, const SToken* pTopicName); diff --git a/source/libs/parser/inc/parInt.h b/source/libs/parser/inc/parInt.h index 2ad1ebc1121d96f243fff9d55980b26bffdf6c04..8ec20cde5a07b54a5609a6097316e4ae5e538a83 100644 --- a/source/libs/parser/inc/parInt.h +++ b/source/libs/parser/inc/parInt.h @@ -24,12 +24,15 @@ extern "C" { #include "parUtil.h" #include "parser.h" +int32_t parseInsertSyntax(SParseContext* pContext, SQuery** pQuery); int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery); int32_t parse(SParseContext* pParseCxt, SQuery** pQuery); +int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery); int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery); int32_t translate(SParseContext* pParseCxt, SQuery* pQuery); int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema); int32_t calculateConstant(SParseContext* pParseCxt, SQuery* pQuery); +int32_t isNotSchemalessDb(SParseContext* pContext, char *dbName); #ifdef __cplusplus } diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h index f82d29d27eeb2f8b80baf56ff7c065025abcc3b5..0351023f5bc8fdbae4476e810e66f7d8bfc4e71f 100644 --- a/source/libs/parser/inc/parUtil.h +++ b/source/libs/parser/inc/parUtil.h @@ -20,15 +20,16 @@ extern "C" { #endif +#include "catalog.h" #include "os.h" #include "query.h" -#define parserFatal(param, ...) qFatal("PARSER: " param, __VA_ARGS__) -#define parserError(param, ...) qError("PARSER: " param, __VA_ARGS__) -#define parserWarn(param, ...) qWarn("PARSER: " param, __VA_ARGS__) -#define parserInfo(param, ...) qInfo("PARSER: " param, __VA_ARGS__) -#define parserDebug(param, ...) qDebug("PARSER: " param, __VA_ARGS__) -#define parserTrace(param, ...) qTrace("PARSER: " param, __VA_ARGS__) +#define parserFatal(param, ...) qFatal("PARSER: " param, ##__VA_ARGS__) +#define parserError(param, ...) qError("PARSER: " param, ##__VA_ARGS__) +#define parserWarn(param, ...) qWarn("PARSER: " param, ##__VA_ARGS__) +#define parserInfo(param, ...) qInfo("PARSER: " param, ##__VA_ARGS__) +#define parserDebug(param, ...) qDebug("PARSER: " param, ##__VA_ARGS__) +#define parserTrace(param, ...) qTrace("PARSER: " param, ##__VA_ARGS__) #define PK_TS_COL_INTERNAL_NAME "_rowts" @@ -37,6 +38,16 @@ typedef struct SMsgBuf { char* buf; } SMsgBuf; +typedef struct SParseMetaCache { + SHashObj* pTableMeta; // key is tbFName, element is STableMeta* + SHashObj* pDbVgroup; // key is dbFName, element is SArray* + SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo* + SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo* + SHashObj* pDbInfo; // key is tbFName, element is SDbInfo* + SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass + SHashObj* pUdf; // key is funcName, element is SFuncInfo* +} SParseMetaCache; + int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...); int32_t buildInvalidOperationMsg(SMsgBuf* pMsgBuf, const char* msg); int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* sourceStr); @@ -47,10 +58,33 @@ int32_t getNumOfColumns(const STableMeta* pTableMeta); int32_t getNumOfTags(const STableMeta* pTableMeta); STableComInfo getTableInfo(const STableMeta* pTableMeta); STableMeta* tableMetaDup(const STableMeta* pTableMeta); -int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* errMsg, int16_t startColId); +int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag **ppTag, SMsgBuf* pMsgBuf); int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen); +int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq); +int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache); +int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache); +int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache); +int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); +int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache); +int32_t reserveTableVgroupInCacheExt(const SName* pName, SParseMetaCache* pMetaCache); +int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); +int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); +int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, + SParseMetaCache* pMetaCache); +int32_t reserveUserAuthInCacheExt(const char* pUser, const SName* pName, AUTH_TYPE type, SParseMetaCache* pMetaCache); +int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache); +int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta); +int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo); +int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup); +int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId, + int32_t* pTableNum); +int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo); +int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type, + bool* pPass); +int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo); + #ifdef __cplusplus } #endif diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 1fb60f83a5a822e627f8cbdf54b3a1e42c4daa5d..6c090a07901c1d96a567961b8a2ec3daabaaedf8 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -15,11 +15,15 @@ #include #include +#define ALLOW_FORBID_FUNC + #include "functionMgt.h" #include "nodes.h" #include "parToken.h" #include "ttokendef.h" #include "parAst.h" + +#define YYSTACKDEPTH 0 } %syntax_error { @@ -313,7 +317,6 @@ tags_def(A) ::= TAGS NK_LP column_def_list(B) NK_RP. table_options(A) ::= . { A = createDefaultTableOptions(pCxt); } table_options(A) ::= table_options(B) COMMENT NK_STRING(C). { A = setTableOption(pCxt, B, TABLE_OPTION_COMMENT, &C); } -table_options(A) ::= table_options(B) DELAY NK_INTEGER(C). { A = setTableOption(pCxt, B, TABLE_OPTION_DELAY, &C); } table_options(A) ::= table_options(B) FILE_FACTOR NK_FLOAT(C). { A = setTableOption(pCxt, B, TABLE_OPTION_FILE_FACTOR, &C); } table_options(A) ::= table_options(B) ROLLUP NK_LP func_name_list(C) NK_RP. { A = setTableOption(pCxt, B, TABLE_OPTION_ROLLUP, C); } table_options(A) ::= table_options(B) TTL NK_INTEGER(C). { A = setTableOption(pCxt, B, TABLE_OPTION_TTL, &C); } @@ -403,17 +406,12 @@ func_list(A) ::= func_list(B) NK_COMMA func(C). func(A) ::= function_name(B) NK_LP expression_list(C) NK_RP. { A = createFunctionNode(pCxt, &B, C); } /************************************************ create/drop topic ***************************************************/ -cmd ::= CREATE TOPIC not_exists_opt(A) - topic_name(B) topic_options(D) AS query_expression(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, C, NULL, D); } -cmd ::= CREATE TOPIC not_exists_opt(A) - topic_name(B) topic_options(D) AS db_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, &C, D); } +cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS query_expression(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, C, NULL, NULL); } +cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS DATABASE db_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, &C, NULL); } +cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) + AS STABLE full_table_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, NULL, C); } cmd ::= DROP TOPIC exists_opt(A) topic_name(B). { pCxt->pRootNode = createDropTopicStmt(pCxt, A, &B); } -cmd ::= DROP CGROUP exists_opt(A) cgroup_name(B) ON topic_name(C). { pCxt->pRootNode = createDropCGroupStmt(pCxt, A, &B, &C); } - -topic_options(A) ::= . { A = createTopicOptions(pCxt); } -topic_options(A) ::= topic_options(B) WITH TABLE. { ((STopicOptions*)B)->withTable = true; A = B; } -topic_options(A) ::= topic_options(B) WITH SCHEMA. { ((STopicOptions*)B)->withSchema = true; A = B; } -topic_options(A) ::= topic_options(B) WITH TAG. { ((STopicOptions*)B)->withTag = true; A = B; } +cmd ::= DROP CONSUMER GROUP exists_opt(A) cgroup_name(B) ON topic_name(C). { pCxt->pRootNode = createDropCGroupStmt(pCxt, A, &B, &C); } /************************************************ desc/describe *******************************************************/ cmd ::= DESC full_table_name(A). { pCxt->pRootNode = createDescribeStmt(pCxt, A); } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 6b4c5f0ce5b0d5afb1ef3b4676fb30dc7cf822b8..72a88548d2270d6d4776e2614bf05fc8c2b7ebf6 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -39,6 +39,7 @@ SToken nil_token = {.type = TK_NK_NIL, .n = 0, .z = NULL}; void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) { + memset(pCxt, 0, sizeof(SAstCreateContext)); pCxt->pQueryCxt = pParseCxt; pCxt->msgBuf.buf = pParseCxt->pMsg; pCxt->msgBuf.len = pParseCxt->msgLen; @@ -856,7 +857,6 @@ SNode* createDefaultTableOptions(SAstCreateContext* pCxt) { CHECK_PARSER_STATUS(pCxt); STableOptions* pOptions = nodesMakeNode(QUERY_NODE_TABLE_OPTIONS); CHECK_OUT_OF_MEM(pOptions); - pOptions->delay = TSDB_DEFAULT_ROLLUP_DELAY; pOptions->filesFactor = TSDB_DEFAULT_ROLLUP_FILE_FACTOR; pOptions->ttl = TSDB_DEFAULT_TABLE_TTL; return (SNode*)pOptions; @@ -866,7 +866,6 @@ SNode* createAlterTableOptions(SAstCreateContext* pCxt) { CHECK_PARSER_STATUS(pCxt); STableOptions* pOptions = nodesMakeNode(QUERY_NODE_TABLE_OPTIONS); CHECK_OUT_OF_MEM(pOptions); - pOptions->delay = -1; pOptions->filesFactor = -1; pOptions->ttl = -1; return (SNode*)pOptions; @@ -881,11 +880,8 @@ SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType sizeof(((STableOptions*)pOptions)->comment)); } break; - case TABLE_OPTION_DELAY: - ((STableOptions*)pOptions)->delay = taosStr2Int32(((SToken*)pVal)->z, NULL, 10); - break; case TABLE_OPTION_FILE_FACTOR: - ((STableOptions*)pOptions)->filesFactor = taosStr2Float(((SToken*)pVal)->z, NULL); + ((STableOptions*)pOptions)->filesFactor = taosStr2Double(((SToken*)pVal)->z, NULL); break; case TABLE_OPTION_ROLLUP: ((STableOptions*)pOptions)->pRollupFuncs = pVal; @@ -1265,28 +1261,22 @@ SNode* createDropComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, cons return (SNode*)pStmt; } -SNode* createTopicOptions(SAstCreateContext* pCxt) { - CHECK_PARSER_STATUS(pCxt); - STopicOptions* pOptions = nodesMakeNode(QUERY_NODE_TOPIC_OPTIONS); - CHECK_OUT_OF_MEM(pOptions); - pOptions->withTable = false; - pOptions->withSchema = false; - pOptions->withTag = false; - return (SNode*)pOptions; -} - SNode* createCreateTopicStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pTopicName, SNode* pQuery, - const SToken* pSubscribeDbName, SNode* pOptions) { + const SToken* pSubDbName, SNode* pRealTable) { CHECK_PARSER_STATUS(pCxt); SCreateTopicStmt* pStmt = nodesMakeNode(QUERY_NODE_CREATE_TOPIC_STMT); CHECK_OUT_OF_MEM(pStmt); strncpy(pStmt->topicName, pTopicName->z, pTopicName->n); pStmt->ignoreExists = ignoreExists; - pStmt->pQuery = pQuery; - if (NULL != pSubscribeDbName) { - strncpy(pStmt->subscribeDbName, pSubscribeDbName->z, pSubscribeDbName->n); + if (NULL != pRealTable) { + strcpy(pStmt->subDbName, ((SRealTableNode*)pRealTable)->table.dbName); + strcpy(pStmt->subSTbName, ((SRealTableNode*)pRealTable)->table.tableName); + nodesDestroyNode(pRealTable); + } else if (NULL != pSubDbName) { + strncpy(pStmt->subDbName, pSubDbName->z, pSubDbName->n); + } else { + pStmt->pQuery = pQuery; } - pStmt->pOptions = (STopicOptions*)pOptions; return (SNode*)pStmt; } diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index 5b59d1c080978217577581184834595432d6edc7..68c9684c97ac8eba986a339b7618e51bc02d7d79 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -13,11 +13,12 @@ * along with this program. If not, see . */ +#include "functionMgt.h" #include "os.h" -#include "parInt.h" - #include "parAst.h" +#include "parInt.h" #include "parToken.h" +#include "systable.h" typedef void* (*FMalloc)(size_t); typedef void (*FFree)(void*); @@ -86,3 +87,317 @@ abort_parse: taosArrayDestroy(cxt.pPlaceholderValues); return cxt.errCode; } + +typedef struct SCollectMetaKeyCxt { + SParseContext* pParseCxt; + SParseMetaCache* pMetaCache; +} SCollectMetaKeyCxt; + +static void destroyCollectMetaKeyCxt(SCollectMetaKeyCxt* pCxt) { + if (NULL != pCxt->pMetaCache) { + // TODO + } +} + +typedef struct SCollectMetaKeyFromExprCxt { + SCollectMetaKeyCxt* pComCxt; + int32_t errCode; +} SCollectMetaKeyFromExprCxt; + +static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt); + +static EDealRes collectMetaKeyFromFunction(SCollectMetaKeyFromExprCxt* pCxt, SFunctionNode* pFunc) { + if (fmIsBuiltinFunc(pFunc->functionName)) { + return TSDB_CODE_SUCCESS; + } + return reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache); +} + +static EDealRes collectMetaKeyFromRealTable(SCollectMetaKeyFromExprCxt* pCxt, SRealTableNode* pRealTable) { + pCxt->errCode = reserveTableMetaInCache(pCxt->pComCxt->pParseCxt->acctId, pRealTable->table.dbName, + pRealTable->table.tableName, pCxt->pComCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == pCxt->errCode) { + pCxt->errCode = reserveTableVgroupInCache(pCxt->pComCxt->pParseCxt->acctId, pRealTable->table.dbName, + pRealTable->table.tableName, pCxt->pComCxt->pMetaCache); + } + if (TSDB_CODE_SUCCESS == pCxt->errCode) { + pCxt->errCode = reserveUserAuthInCache(pCxt->pComCxt->pParseCxt->acctId, pCxt->pComCxt->pParseCxt->pUser, + pRealTable->table.dbName, AUTH_TYPE_READ, pCxt->pComCxt->pMetaCache); + } + return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; +} + +static EDealRes collectMetaKeyFromTempTable(SCollectMetaKeyFromExprCxt* pCxt, STempTableNode* pTempTable) { + pCxt->errCode = collectMetaKeyFromQuery(pCxt->pComCxt, pTempTable->pSubquery); + return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; +} + +static EDealRes collectMetaKeyFromExprImpl(SNode* pNode, void* pContext) { + SCollectMetaKeyFromExprCxt* pCxt = pContext; + switch (nodeType(pNode)) { + case QUERY_NODE_FUNCTION: + return collectMetaKeyFromFunction(pCxt, (SFunctionNode*)pNode); + case QUERY_NODE_REAL_TABLE: + return collectMetaKeyFromRealTable(pCxt, (SRealTableNode*)pNode); + case QUERY_NODE_TEMP_TABLE: + return collectMetaKeyFromTempTable(pCxt, (STempTableNode*)pNode); + default: + break; + } + return DEAL_RES_CONTINUE; +} + +static int32_t collectMetaKeyFromExprs(SCollectMetaKeyCxt* pCxt, SNodeList* pList) { + SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS}; + nodesWalkExprs(pList, collectMetaKeyFromExprImpl, &cxt); + return cxt.errCode; +} + +static int32_t collectMetaKeyFromSetOperator(SCollectMetaKeyCxt* pCxt, SSetOperator* pStmt) { + int32_t code = collectMetaKeyFromQuery(pCxt, pStmt->pLeft); + if (TSDB_CODE_SUCCESS == code) { + code = collectMetaKeyFromQuery(pCxt, pStmt->pRight); + } + if (TSDB_CODE_SUCCESS == code) { + code = collectMetaKeyFromExprs(pCxt, pStmt->pOrderByList); + } + return code; +} + +static int32_t collectMetaKeyFromSelect(SCollectMetaKeyCxt* pCxt, SSelectStmt* pStmt) { + SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS}; + nodesWalkSelectStmt(pStmt, SQL_CLAUSE_FROM, collectMetaKeyFromExprImpl, &cxt); + return cxt.errCode; +} + +static int32_t collectMetaKeyFromCreateTable(SCollectMetaKeyCxt* pCxt, SCreateTableStmt* pStmt) { + if (NULL == pStmt->pTags) { + return reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); + } else { + return reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache); + } +} + +static int32_t collectMetaKeyFromCreateMultiTable(SCollectMetaKeyCxt* pCxt, SCreateMultiTableStmt* pStmt) { + int32_t code = TSDB_CODE_SUCCESS; + SNode* pNode = NULL; + FOREACH(pNode, pStmt->pSubTables) { + SCreateSubTableClause* pClause = (SCreateSubTableClause*)pNode; + code = + reserveTableMetaInCache(pCxt->pParseCxt->acctId, pClause->useDbName, pClause->useTableName, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pClause->dbName, pClause->tableName, pCxt->pMetaCache); + } + if (TSDB_CODE_SUCCESS != code) { + break; + } + } + return code; +} + +static int32_t collectMetaKeyFromAlterTable(SCollectMetaKeyCxt* pCxt, SAlterTableStmt* pStmt) { + int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); + } + return code; +} + +static int32_t collectMetaKeyFromUseDatabase(SCollectMetaKeyCxt* pCxt, SUseDatabaseStmt* pStmt) { + return reserveDbVgVersionInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromCreateIndex(SCollectMetaKeyCxt* pCxt, SCreateIndexStmt* pStmt) { + int32_t code = TSDB_CODE_SUCCESS; + if (INDEX_TYPE_SMA == pStmt->indexType) { + code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->tableName, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + code = + reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->tableName, pCxt->pMetaCache); + } + } + return code; +} + +static int32_t collectMetaKeyFromCreateTopic(SCollectMetaKeyCxt* pCxt, SCreateTopicStmt* pStmt) { + if (NULL != pStmt->pQuery) { + return collectMetaKeyFromQuery(pCxt, pStmt->pQuery); + } + return TSDB_CODE_SUCCESS; +} + +static int32_t collectMetaKeyFromExplain(SCollectMetaKeyCxt* pCxt, SExplainStmt* pStmt) { + return collectMetaKeyFromQuery(pCxt, pStmt->pQuery); +} + +static int32_t collectMetaKeyFromCreateStream(SCollectMetaKeyCxt* pCxt, SCreateStreamStmt* pStmt) { + return collectMetaKeyFromQuery(pCxt, pStmt->pQuery); +} + +static int32_t collectMetaKeyFromShowDnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowMnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowModules(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MODULES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowQnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_QNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowSnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowBnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_BNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowDatabases(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_DATABASES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowFunctions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_FUNCTIONS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowIndexes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_INDEXES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowStables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_STABLES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowStreams(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowTables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, + TSDB_INS_TABLE_USER_TABLES, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + if (NULL != pStmt->pDbName) { + code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache); + } else { + code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, pCxt->pMetaCache); + } + } + return code; +} + +static int32_t collectMetaKeyFromShowUsers(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_USERS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowLicence(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_LICENCES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowVgroups(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VGROUPS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowTopics(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TOPICS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowTransactions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TRANS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { + switch (nodeType(pStmt)) { + case QUERY_NODE_SET_OPERATOR: + return collectMetaKeyFromSetOperator(pCxt, (SSetOperator*)pStmt); + case QUERY_NODE_SELECT_STMT: + return collectMetaKeyFromSelect(pCxt, (SSelectStmt*)pStmt); + case QUERY_NODE_CREATE_TABLE_STMT: + return collectMetaKeyFromCreateTable(pCxt, (SCreateTableStmt*)pStmt); + case QUERY_NODE_CREATE_MULTI_TABLE_STMT: + return collectMetaKeyFromCreateMultiTable(pCxt, (SCreateMultiTableStmt*)pStmt); + case QUERY_NODE_ALTER_TABLE_STMT: + return collectMetaKeyFromAlterTable(pCxt, (SAlterTableStmt*)pStmt); + case QUERY_NODE_USE_DATABASE_STMT: + return collectMetaKeyFromUseDatabase(pCxt, (SUseDatabaseStmt*)pStmt); + case QUERY_NODE_CREATE_INDEX_STMT: + return collectMetaKeyFromCreateIndex(pCxt, (SCreateIndexStmt*)pStmt); + case QUERY_NODE_CREATE_TOPIC_STMT: + return collectMetaKeyFromCreateTopic(pCxt, (SCreateTopicStmt*)pStmt); + case QUERY_NODE_EXPLAIN_STMT: + return collectMetaKeyFromExplain(pCxt, (SExplainStmt*)pStmt); + case QUERY_NODE_CREATE_STREAM_STMT: + return collectMetaKeyFromCreateStream(pCxt, (SCreateStreamStmt*)pStmt); + case QUERY_NODE_SHOW_DNODES_STMT: + return collectMetaKeyFromShowDnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_MNODES_STMT: + return collectMetaKeyFromShowMnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_MODULES_STMT: + return collectMetaKeyFromShowModules(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_QNODES_STMT: + return collectMetaKeyFromShowQnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_SNODES_STMT: + return collectMetaKeyFromShowSnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_BNODES_STMT: + return collectMetaKeyFromShowBnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_DATABASES_STMT: + return collectMetaKeyFromShowDatabases(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_FUNCTIONS_STMT: + return collectMetaKeyFromShowFunctions(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_INDEXES_STMT: + return collectMetaKeyFromShowIndexes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_STABLES_STMT: + return collectMetaKeyFromShowStables(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_STREAMS_STMT: + return collectMetaKeyFromShowStreams(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_TABLES_STMT: + return collectMetaKeyFromShowTables(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_USERS_STMT: + return collectMetaKeyFromShowUsers(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_LICENCE_STMT: + return collectMetaKeyFromShowLicence(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_VGROUPS_STMT: + return collectMetaKeyFromShowVgroups(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_TOPICS_STMT: + return collectMetaKeyFromShowTopics(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_TRANSACTIONS_STMT: + return collectMetaKeyFromShowTransactions(pCxt, (SShowStmt*)pStmt); + default: + break; + } + return TSDB_CODE_SUCCESS; +} + +int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery) { + SCollectMetaKeyCxt cxt = {.pParseCxt = pParseCxt, .pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache))}; + if (NULL == cxt.pMetaCache) { + return TSDB_CODE_OUT_OF_MEMORY; + } + int32_t code = collectMetaKeyFromQuery(&cxt, pQuery->pRoot); + if (TSDB_CODE_SUCCESS == code) { + TSWAP(pQuery->pMetaCache, cxt.pMetaCache); + } + destroyCollectMetaKeyCxt(&cxt); + return code; +} diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c index 250e7910d69847a130fa4f0b2132b3dcb99da8e7..2670e5710b9f5418c401e9799678c68d82c8f29d 100644 --- a/source/libs/parser/src/parAuthenticator.c +++ b/source/libs/parser/src/parAuthenticator.c @@ -18,23 +18,30 @@ #include "parInt.h" typedef struct SAuthCxt { - SParseContext* pParseCxt; - int32_t errCode; + SParseContext* pParseCxt; + SParseMetaCache* pMetaCache; + int32_t errCode; } SAuthCxt; static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt); -static int32_t checkAuth(SParseContext* pCxt, const char* pDbName, AUTH_TYPE type) { - if (pCxt->isSuperUser) { +static int32_t checkAuth(SAuthCxt* pCxt, const char* pDbName, AUTH_TYPE type) { + SParseContext* pParseCxt = pCxt->pParseCxt; + if (pParseCxt->isSuperUser) { return TSDB_CODE_SUCCESS; } SName name; - tNameSetDbName(&name, pCxt->acctId, pDbName, strlen(pDbName)); + tNameSetDbName(&name, pParseCxt->acctId, pDbName, strlen(pDbName)); char dbFname[TSDB_DB_FNAME_LEN] = {0}; tNameGetFullDbName(&name, dbFname); + int32_t code = TSDB_CODE_SUCCESS; bool pass = false; - int32_t code = - catalogChkAuth(pCxt->pCatalog, pCxt->pTransporter, &pCxt->mgmtEpSet, pCxt->pUser, dbFname, type, &pass); + if (NULL != pCxt->pMetaCache) { + code = getUserAuthFromCache(pCxt->pMetaCache, pParseCxt->pUser, dbFname, type, &pass); + } else { + code = catalogChkAuth(pParseCxt->pCatalog, pParseCxt->pTransporter, &pParseCxt->mgmtEpSet, pParseCxt->pUser, + dbFname, type, &pass); + } return TSDB_CODE_SUCCESS == code ? (pass ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED) : code; } @@ -45,7 +52,7 @@ static EDealRes authSubquery(SAuthCxt* pCxt, SNode* pStmt) { static EDealRes authSelectImpl(SNode* pNode, void* pContext) { SAuthCxt* pCxt = pContext; if (QUERY_NODE_REAL_TABLE == nodeType(pNode)) { - pCxt->errCode = checkAuth(pCxt->pParseCxt, ((SRealTableNode*)pNode)->table.dbName, AUTH_TYPE_READ); + pCxt->errCode = checkAuth(pCxt, ((SRealTableNode*)pNode)->table.dbName, AUTH_TYPE_READ); return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; } else if (QUERY_NODE_TEMP_TABLE == nodeType(pNode)) { return authSubquery(pCxt, ((STempTableNode*)pNode)->pSubquery); @@ -79,87 +86,8 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { return authSetOperator(pCxt, (SSetOperator*)pStmt); case QUERY_NODE_SELECT_STMT: return authSelect(pCxt, (SSelectStmt*)pStmt); - case QUERY_NODE_CREATE_DATABASE_STMT: - case QUERY_NODE_DROP_DATABASE_STMT: - case QUERY_NODE_ALTER_DATABASE_STMT: - case QUERY_NODE_CREATE_TABLE_STMT: - case QUERY_NODE_CREATE_SUBTABLE_CLAUSE: - case QUERY_NODE_CREATE_MULTI_TABLE_STMT: - case QUERY_NODE_DROP_TABLE_CLAUSE: - case QUERY_NODE_DROP_TABLE_STMT: - case QUERY_NODE_DROP_SUPER_TABLE_STMT: - case QUERY_NODE_ALTER_TABLE_STMT: - case QUERY_NODE_CREATE_USER_STMT: - case QUERY_NODE_ALTER_USER_STMT: - break; - case QUERY_NODE_DROP_USER_STMT: { + case QUERY_NODE_DROP_USER_STMT: return authDropUser(pCxt, (SDropUserStmt*)pStmt); - } - case QUERY_NODE_USE_DATABASE_STMT: - case QUERY_NODE_CREATE_DNODE_STMT: - case QUERY_NODE_DROP_DNODE_STMT: - case QUERY_NODE_ALTER_DNODE_STMT: - case QUERY_NODE_CREATE_INDEX_STMT: - case QUERY_NODE_DROP_INDEX_STMT: - case QUERY_NODE_CREATE_QNODE_STMT: - case QUERY_NODE_DROP_QNODE_STMT: - case QUERY_NODE_CREATE_BNODE_STMT: - case QUERY_NODE_DROP_BNODE_STMT: - case QUERY_NODE_CREATE_SNODE_STMT: - case QUERY_NODE_DROP_SNODE_STMT: - case QUERY_NODE_CREATE_MNODE_STMT: - case QUERY_NODE_DROP_MNODE_STMT: - case QUERY_NODE_CREATE_TOPIC_STMT: - case QUERY_NODE_DROP_TOPIC_STMT: - case QUERY_NODE_ALTER_LOCAL_STMT: - case QUERY_NODE_EXPLAIN_STMT: - case QUERY_NODE_DESCRIBE_STMT: - case QUERY_NODE_RESET_QUERY_CACHE_STMT: - case QUERY_NODE_COMPACT_STMT: - case QUERY_NODE_CREATE_FUNCTION_STMT: - case QUERY_NODE_DROP_FUNCTION_STMT: - case QUERY_NODE_CREATE_STREAM_STMT: - case QUERY_NODE_DROP_STREAM_STMT: - case QUERY_NODE_MERGE_VGROUP_STMT: - case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT: - case QUERY_NODE_SPLIT_VGROUP_STMT: - case QUERY_NODE_SYNCDB_STMT: - case QUERY_NODE_GRANT_STMT: - case QUERY_NODE_REVOKE_STMT: - case QUERY_NODE_SHOW_DNODES_STMT: - case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: - case QUERY_NODE_SHOW_QNODES_STMT: - case QUERY_NODE_SHOW_SNODES_STMT: - case QUERY_NODE_SHOW_BNODES_STMT: - case QUERY_NODE_SHOW_CLUSTER_STMT: - case QUERY_NODE_SHOW_DATABASES_STMT: - case QUERY_NODE_SHOW_FUNCTIONS_STMT: - case QUERY_NODE_SHOW_INDEXES_STMT: - case QUERY_NODE_SHOW_STABLES_STMT: - case QUERY_NODE_SHOW_STREAMS_STMT: - case QUERY_NODE_SHOW_TABLES_STMT: - case QUERY_NODE_SHOW_USERS_STMT: - case QUERY_NODE_SHOW_LICENCE_STMT: - case QUERY_NODE_SHOW_VGROUPS_STMT: - case QUERY_NODE_SHOW_TOPICS_STMT: - case QUERY_NODE_SHOW_CONSUMERS_STMT: - case QUERY_NODE_SHOW_SUBSCRIBES_STMT: - case QUERY_NODE_SHOW_SMAS_STMT: - case QUERY_NODE_SHOW_CONFIGS_STMT: - case QUERY_NODE_SHOW_CONNECTIONS_STMT: - case QUERY_NODE_SHOW_QUERIES_STMT: - case QUERY_NODE_SHOW_VNODES_STMT: - case QUERY_NODE_SHOW_APPS_STMT: - case QUERY_NODE_SHOW_SCORES_STMT: - case QUERY_NODE_SHOW_VARIABLE_STMT: - case QUERY_NODE_SHOW_CREATE_DATABASE_STMT: - case QUERY_NODE_SHOW_CREATE_TABLE_STMT: - case QUERY_NODE_SHOW_CREATE_STABLE_STMT: - case QUERY_NODE_SHOW_TRANSACTIONS_STMT: - case QUERY_NODE_KILL_CONNECTION_STMT: - case QUERY_NODE_KILL_QUERY_STMT: - case QUERY_NODE_KILL_TRANSACTION_STMT: default: break; } @@ -168,6 +96,6 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { } int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery) { - SAuthCxt cxt = {.pParseCxt = pParseCxt, .errCode = TSDB_CODE_SUCCESS}; + SAuthCxt cxt = {.pParseCxt = pParseCxt, .pMetaCache = pQuery->pMetaCache, .errCode = TSDB_CODE_SUCCESS}; return authQuery(&cxt, pQuery->pRoot); } diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index 49e52d873fd3f53d9b605443b9fc86e80ffad009..422c48039743185070ca5ea5f704627d26217253 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -54,16 +54,17 @@ typedef struct SInsertParseContext { SMsgBuf msg; // input STableMeta* pTableMeta; // each table SParsedDataColInfo tags; // each table - SKVRowBuilder tagsBuilder; // each table SVCreateTbReq createTblReq; // each table SHashObj* pVgroupsHashObj; // global SHashObj* pTableBlockHashObj; // global SHashObj* pSubTableHashObj; // global SArray* pVgDataBlocks; // global SHashObj* pTableNameHashObj; // global + SHashObj* pDbFNameHashObj; // global int32_t totalNum; SVnodeModifOpStmt* pOutput; SStmtCallback* pStmtCb; + SParseMetaCache* pMetaCache; } SInsertParseContext; typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param); @@ -72,9 +73,10 @@ static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE; static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE; typedef struct SKvParam { - SKVRowBuilder* builder; - SSchema* schema; - char buf[TSDB_MAX_TAGS_LEN]; + int16_t pos; + SArray* pTagVals; + SSchema* schema; + char buf[TSDB_MAX_TAGS_LEN]; } SKvParam; typedef struct SMemParam { @@ -92,15 +94,15 @@ typedef struct SMemParam { } \ } while (0) -static int32_t skipInsertInto(SInsertParseContext* pCxt) { +static int32_t skipInsertInto(char** pSql, SMsgBuf* pMsg) { SToken sToken; - NEXT_TOKEN(pCxt->pSql, sToken); + NEXT_TOKEN(*pSql, sToken); if (TK_INSERT != sToken.type) { - return buildSyntaxErrMsg(&pCxt->msg, "keyword INSERT is expected", sToken.z); + return buildSyntaxErrMsg(pMsg, "keyword INSERT is expected", sToken.z); } - NEXT_TOKEN(pCxt->pSql, sToken); + NEXT_TOKEN(*pSql, sToken); if (TK_INTO != sToken.type) { - return buildSyntaxErrMsg(&pCxt->msg, "keyword INTO is expected", sToken.z); + return buildSyntaxErrMsg(pMsg, "keyword INTO is expected", sToken.z); } return TSDB_CODE_SUCCESS; } @@ -212,7 +214,7 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con return buildInvalidOperationMsg(pMsgBuf, msg4); } - char tbname[TSDB_TABLE_FNAME_LEN] = {0}; + char tbname[TSDB_TABLE_FNAME_LEN] = {0}; strncpy(tbname, p + 1, tbLen); /*tbLen = */ strdequote(tbname); @@ -250,25 +252,46 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con return code; } -static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) { +static int32_t checkAuth(SInsertParseContext* pCxt, char* pDbFname, bool* pPass) { + SParseContext* pBasicCtx = pCxt->pComCxt; + if (NULL != pCxt->pMetaCache) { + return getUserAuthFromCache(pCxt->pMetaCache, pBasicCtx->pUser, pDbFname, AUTH_TYPE_WRITE, pPass); + } + return catalogChkAuth(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pBasicCtx->pUser, pDbFname, + AUTH_TYPE_WRITE, pPass); +} + +static int32_t getTableSchema(SInsertParseContext* pCxt, SName* pTbName, bool isStb, STableMeta** pTableMeta) { + SParseContext* pBasicCtx = pCxt->pComCxt; + if (NULL != pCxt->pMetaCache) { + return getTableMetaFromCache(pCxt->pMetaCache, pTbName, pTableMeta); + } + if (isStb) { + return catalogGetSTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName, + pTableMeta); + } + return catalogGetTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName, pTableMeta); +} + +static int32_t getTableVgroup(SInsertParseContext* pCxt, SName* pTbName, SVgroupInfo* pVg) { SParseContext* pBasicCtx = pCxt->pComCxt; + if (NULL != pCxt->pMetaCache) { + return getTableVgroupFromCache(pCxt->pMetaCache, pTbName, pVg); + } + return catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName, pVg); +} +static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) { bool pass = false; - CHECK_CODE(catalogChkAuth(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pBasicCtx->pUser, - dbFname, AUTH_TYPE_WRITE, &pass)); + CHECK_CODE(checkAuth(pCxt, dbFname, &pass)); if (!pass) { return TSDB_CODE_PAR_PERMISSION_DENIED; } - if (isStb) { - CHECK_CODE(catalogGetSTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name, - &pCxt->pTableMeta)); - } else { - CHECK_CODE(catalogGetTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name, - &pCxt->pTableMeta)); - ASSERT(pCxt->pTableMeta->tableInfo.rowSize > 0); + + CHECK_CODE(getTableSchema(pCxt, name, isStb, &pCxt->pTableMeta)); + if (!isStb) { SVgroupInfo vg; - CHECK_CODE( - catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name, &vg)); + CHECK_CODE(getTableVgroup(pCxt, name, &vg)); CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); } return TSDB_CODE_SUCCESS; @@ -424,7 +447,7 @@ static int parseTime(char** end, SToken* pToken, int16_t timePrec, int64_t* time return TSDB_CODE_SUCCESS; } -static FORCE_INLINE int32_t checkAndTrimValue(SToken* pToken, uint32_t type, char* tmpTokenBuf, SMsgBuf* pMsgBuf) { +static FORCE_INLINE int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, SMsgBuf* pMsgBuf) { if ((pToken->type != TK_NOW && pToken->type != TK_TODAY && pToken->type != TK_NK_INTEGER && pToken->type != TK_NK_STRING && pToken->type != TK_NK_FLOAT && pToken->type != TK_NK_BOOL && pToken->type != TK_NULL && pToken->type != TK_NK_HEX && pToken->type != TK_NK_OCT && @@ -470,7 +493,7 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int uint64_t uv; char* endptr = NULL; - int32_t code = checkAndTrimValue(pToken, pSchema->type, tmpTokenBuf, pMsgBuf); + int32_t code = checkAndTrimValue(pToken, tmpTokenBuf, pMsgBuf); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -619,14 +642,12 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int case TSDB_DATA_TYPE_NCHAR: { return func(pMsgBuf, pToken->z, pToken->n, param); } - case TSDB_DATA_TYPE_JSON: { if (pToken->n > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { return buildSyntaxErrMsg(pMsgBuf, "json string too long than 4095", pToken->z); } return func(pMsgBuf, pToken->z, pToken->n, param); } - case TSDB_DATA_TYPE_TIMESTAMP: { int64_t tmpVal; if (parseTime(end, pToken, timePrec, &tmpVal, pMsgBuf) != TSDB_CODE_SUCCESS) { @@ -751,99 +772,282 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, SParsedDataColInfo* return TSDB_CODE_SUCCESS; } -static int32_t KvRowAppend(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param) { - SKvParam* pa = (SKvParam*)param; +static void buildCreateTbReq(SVCreateTbReq* pTbReq, const char* tname, STag* pTag, int64_t suid) { + pTbReq->type = TD_CHILD_TABLE; + pTbReq->name = strdup(tname); + pTbReq->ctb.suid = suid; + pTbReq->ctb.pTag = (uint8_t*)pTag; - int8_t type = pa->schema->type; - int16_t colId = pa->schema->colId; + return; +} - if (TSDB_DATA_TYPE_JSON == type) { - return parseJsontoTagData(value, pa->builder, pMsgBuf, colId); - } +static int32_t parseTagToken(char** end, SToken* pToken, SSchema* pSchema, int16_t timePrec, STagVal* val, + SMsgBuf* pMsgBuf) { + int64_t iv; + uint64_t uv; + char* endptr = NULL; + + if (isNullStr(pToken)) { + if (TSDB_DATA_TYPE_TIMESTAMP == pSchema->type && PRIMARYKEY_TIMESTAMP_COL_ID == pSchema->colId) { + return buildSyntaxErrMsg(pMsgBuf, "primary timestamp should not be null", pToken->z); + } - if (value == NULL) { // it is a null data - // tdAppendColValToRow(rb, pa->schema->colId, pa->schema->type, TD_VTYPE_NULL, value, false, pa->toffset, - // pa->colIdx); return TSDB_CODE_SUCCESS; } - if (TSDB_DATA_TYPE_BINARY == type) { - STR_WITH_SIZE_TO_VARSTR(pa->buf, value, len); - tdAddColToKVRow(pa->builder, colId, pa->buf, varDataTLen(pa->buf)); - } else if (TSDB_DATA_TYPE_NCHAR == type) { - // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' - int32_t output = 0; - if (!taosMbsToUcs4(value, len, (TdUcs4*)varDataVal(pa->buf), pa->schema->bytes - VARSTR_HEADER_SIZE, &output)) { - if (errno == E2BIG) { - return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pa->schema->name); + val->cid = pSchema->colId; + val->type = pSchema->type; + + switch (pSchema->type) { + case TSDB_DATA_TYPE_BOOL: { + if ((pToken->type == TK_NK_BOOL || pToken->type == TK_NK_STRING) && (pToken->n != 0)) { + if (strncmp(pToken->z, "true", pToken->n) == 0) { + *(int8_t*)(&val->i64) = TRUE_VALUE; + } else if (strncmp(pToken->z, "false", pToken->n) == 0) { + *(int8_t*)(&val->i64) = FALSE_VALUE; + } else { + return buildSyntaxErrMsg(pMsgBuf, "invalid bool data", pToken->z); + } + } else if (pToken->type == TK_NK_INTEGER) { + *(int8_t*)(&val->i64) = ((taosStr2Int64(pToken->z, NULL, 10) == 0) ? FALSE_VALUE : TRUE_VALUE); + } else if (pToken->type == TK_NK_FLOAT) { + *(int8_t*)(&val->i64) = ((taosStr2Double(pToken->z, NULL) == 0) ? FALSE_VALUE : TRUE_VALUE); + } else { + return buildSyntaxErrMsg(pMsgBuf, "invalid bool data", pToken->z); } - - char buf[512] = {0}; - snprintf(buf, tListLen(buf), " taosMbsToUcs4 error:%s", strerror(errno)); - return buildSyntaxErrMsg(pMsgBuf, buf, value); + break; } - varDataSetLen(pa->buf, output); - tdAddColToKVRow(pa->builder, colId, pa->buf, varDataTLen(pa->buf)); - } else { - tdAddColToKVRow(pa->builder, colId, value, TYPE_BYTES[type]); - } + case TSDB_DATA_TYPE_TINYINT: { + if (TSDB_CODE_SUCCESS != toInteger(pToken->z, pToken->n, 10, &iv)) { + return buildSyntaxErrMsg(pMsgBuf, "invalid tinyint data", pToken->z); + } else if (!IS_VALID_TINYINT(iv)) { + return buildSyntaxErrMsg(pMsgBuf, "tinyint data overflow", pToken->z); + } - return TSDB_CODE_SUCCESS; -} + *(int8_t*)(&val->i64) = iv; + break; + } -static int32_t buildCreateTbReq(SVCreateTbReq* pTbReq, const char* tname, SKVRow row, int64_t suid) { - pTbReq->type = TD_CHILD_TABLE; - pTbReq->name = strdup(tname); - pTbReq->ctb.suid = suid; - pTbReq->ctb.pTag = row; + case TSDB_DATA_TYPE_UTINYINT: { + if (TSDB_CODE_SUCCESS != toUInteger(pToken->z, pToken->n, 10, &uv)) { + return buildSyntaxErrMsg(pMsgBuf, "invalid unsigned tinyint data", pToken->z); + } else if (!IS_VALID_UTINYINT(uv)) { + return buildSyntaxErrMsg(pMsgBuf, "unsigned tinyint data overflow", pToken->z); + } + *(uint8_t*)(&val->i64) = uv; + break; + } + + case TSDB_DATA_TYPE_SMALLINT: { + if (TSDB_CODE_SUCCESS != toInteger(pToken->z, pToken->n, 10, &iv)) { + return buildSyntaxErrMsg(pMsgBuf, "invalid smallint data", pToken->z); + } else if (!IS_VALID_SMALLINT(iv)) { + return buildSyntaxErrMsg(pMsgBuf, "smallint data overflow", pToken->z); + } + *(int16_t*)(&val->i64) = iv; + break; + } + + case TSDB_DATA_TYPE_USMALLINT: { + if (TSDB_CODE_SUCCESS != toUInteger(pToken->z, pToken->n, 10, &uv)) { + return buildSyntaxErrMsg(pMsgBuf, "invalid unsigned smallint data", pToken->z); + } else if (!IS_VALID_USMALLINT(uv)) { + return buildSyntaxErrMsg(pMsgBuf, "unsigned smallint data overflow", pToken->z); + } + *(uint16_t*)(&val->i64) = uv; + break; + } + + case TSDB_DATA_TYPE_INT: { + if (TSDB_CODE_SUCCESS != toInteger(pToken->z, pToken->n, 10, &iv)) { + return buildSyntaxErrMsg(pMsgBuf, "invalid int data", pToken->z); + } else if (!IS_VALID_INT(iv)) { + return buildSyntaxErrMsg(pMsgBuf, "int data overflow", pToken->z); + } + *(int32_t*)(&val->i64) = iv; + break; + } + + case TSDB_DATA_TYPE_UINT: { + if (TSDB_CODE_SUCCESS != toUInteger(pToken->z, pToken->n, 10, &uv)) { + return buildSyntaxErrMsg(pMsgBuf, "invalid unsigned int data", pToken->z); + } else if (!IS_VALID_UINT(uv)) { + return buildSyntaxErrMsg(pMsgBuf, "unsigned int data overflow", pToken->z); + } + *(uint32_t*)(&val->i64) = uv; + break; + } + + case TSDB_DATA_TYPE_BIGINT: { + if (TSDB_CODE_SUCCESS != toInteger(pToken->z, pToken->n, 10, &iv)) { + return buildSyntaxErrMsg(pMsgBuf, "invalid bigint data", pToken->z); + } else if (!IS_VALID_BIGINT(iv)) { + return buildSyntaxErrMsg(pMsgBuf, "bigint data overflow", pToken->z); + } + + val->i64 = iv; + break; + } + + case TSDB_DATA_TYPE_UBIGINT: { + if (TSDB_CODE_SUCCESS != toUInteger(pToken->z, pToken->n, 10, &uv)) { + return buildSyntaxErrMsg(pMsgBuf, "invalid unsigned bigint data", pToken->z); + } else if (!IS_VALID_UBIGINT(uv)) { + return buildSyntaxErrMsg(pMsgBuf, "unsigned bigint data overflow", pToken->z); + } + *(uint64_t*)(&val->i64) = uv; + break; + } + + case TSDB_DATA_TYPE_FLOAT: { + double dv; + if (TK_NK_ILLEGAL == toDouble(pToken, &dv, &endptr)) { + return buildSyntaxErrMsg(pMsgBuf, "illegal float data", pToken->z); + } + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || + isnan(dv)) { + return buildSyntaxErrMsg(pMsgBuf, "illegal float data", pToken->z); + } + *(float*)(&val->i64) = dv; + break; + } + + case TSDB_DATA_TYPE_DOUBLE: { + double dv; + if (TK_NK_ILLEGAL == toDouble(pToken, &dv, &endptr)) { + return buildSyntaxErrMsg(pMsgBuf, "illegal double data", pToken->z); + } + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) { + return buildSyntaxErrMsg(pMsgBuf, "illegal double data", pToken->z); + } + + *(double*)(&val->i64) = dv; + break; + } + + case TSDB_DATA_TYPE_BINARY: { + // Too long values will raise the invalid sql error message + if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { + return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); + } + val->pData = pToken->z; + val->nData = pToken->n; + break; + } + + case TSDB_DATA_TYPE_NCHAR: { + int32_t output = 0; + void* p = taosMemoryCalloc(1, pToken->n * TSDB_NCHAR_SIZE); + if (p == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + if (!taosMbsToUcs4(pToken->z, pToken->n, (TdUcs4*)(p), pToken->n * TSDB_NCHAR_SIZE, &output)) { + if (errno == E2BIG) { + taosMemoryFree(p); + return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); + } + char buf[512] = {0}; + snprintf(buf, tListLen(buf), " taosMbsToUcs4 error:%s", strerror(errno)); + taosMemoryFree(p); + return buildSyntaxErrMsg(pMsgBuf, buf, pToken->z); + } + val->pData = p; + val->nData = output; + break; + } + case TSDB_DATA_TYPE_TIMESTAMP: { + if (parseTime(end, pToken, timePrec, &iv, pMsgBuf) != TSDB_CODE_SUCCESS) { + return buildSyntaxErrMsg(pMsgBuf, "invalid timestamp", pToken->z); + } + + val->i64 = iv; + break; + } + } return TSDB_CODE_SUCCESS; } // pSql -> tag1_value, ...) static int32_t parseTagsClause(SInsertParseContext* pCxt, SSchema* pSchema, uint8_t precision, const char* tName) { - if (tdInitKVRowBuilder(&pCxt->tagsBuilder) < 0) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - - SKvParam param = {.builder = &pCxt->tagsBuilder}; - SToken sToken; - bool isParseBindParam = false; - char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \" + int32_t code = TSDB_CODE_SUCCESS; + SArray* pTagVals = taosArrayInit(pCxt->tags.numOfBound, sizeof(STagVal)); + SToken sToken; + bool isParseBindParam = false; + bool isJson = false; + STag* pTag = NULL; for (int i = 0; i < pCxt->tags.numOfBound; ++i) { NEXT_TOKEN_WITH_PREV(pCxt->pSql, sToken); if (sToken.type == TK_NK_QUESTION) { isParseBindParam = true; if (NULL == pCxt->pStmtCb) { - return buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", sToken.z); + code = buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", sToken.z); + goto end; } continue; } if (isParseBindParam) { - return buildInvalidOperationMsg(&pCxt->msg, "no mix usage for ? and tag values"); + code = buildInvalidOperationMsg(&pCxt->msg, "no mix usage for ? and tag values"); + goto end; } SSchema* pTagSchema = &pSchema[pCxt->tags.boundColumns[i]]; - param.schema = pTagSchema; - CHECK_CODE( - parseValueToken(&pCxt->pSql, &sToken, pTagSchema, precision, tmpTokenBuf, KvRowAppend, ¶m, &pCxt->msg)); + char* tmpTokenBuf = taosMemoryCalloc(1, sToken.n); // this can be optimize with parse column + code = checkAndTrimValue(&sToken, tmpTokenBuf, &pCxt->msg); + if (code != TSDB_CODE_SUCCESS) { + taosMemoryFree(tmpTokenBuf); + goto end; + } + if (pTagSchema->type == TSDB_DATA_TYPE_JSON) { + if (sToken.n > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { + code = buildSyntaxErrMsg(&pCxt->msg, "json string too long than 4095", sToken.z); + taosMemoryFree(tmpTokenBuf); + goto end; + } + code = parseJsontoTagData(sToken.z, pTagVals, &pTag, &pCxt->msg); + taosMemoryFree(tmpTokenBuf); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + isJson = true; + } else { + STagVal val = {0}; + code = parseTagToken(&pCxt->pSql, &sToken, pTagSchema, precision, &val, &pCxt->msg); + if (TSDB_CODE_SUCCESS != code) { + taosMemoryFree(tmpTokenBuf); + goto end; + } + if (pTagSchema->type != TSDB_DATA_TYPE_BINARY) { + taosMemoryFree(tmpTokenBuf); + } + taosArrayPush(pTagVals, &val); + } } if (isParseBindParam) { - return TSDB_CODE_SUCCESS; + code = TSDB_CODE_SUCCESS; + goto end; } - SKVRow row = tdGetKVRowFromBuilder(&pCxt->tagsBuilder); - if (NULL == row) { - return buildInvalidOperationMsg(&pCxt->msg, "out of memory"); + if (!isJson && (code = tTagNew(pTagVals, 1, false, &pTag)) != TSDB_CODE_SUCCESS) { + goto end; } - tdSortKVRowByColIdx(row); - return buildCreateTbReq(&pCxt->createTblReq, tName, row, pCxt->pTableMeta->suid); + buildCreateTbReq(&pCxt->createTblReq, tName, pTag, pCxt->pTableMeta->suid); + +end: + for (int i = 0; i < taosArrayGetSize(pTagVals); ++i) { + STagVal* p = (STagVal*)taosArrayGet(pTagVals, i); + if (IS_VAR_DATA_TYPE(p->type)) { + taosMemoryFree(p->pData); + } + } + taosArrayDestroy(pTagVals); + return code; } static int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) { @@ -857,10 +1061,8 @@ static int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) { static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, SName* pTableName, const char* pName, int32_t len, STableMeta* pMeta) { - SVgroupInfo vg; - SParseContext* pBasicCtx = pCxt->pComCxt; - CHECK_CODE( - catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTableName, &vg)); + SVgroupInfo vg; + CHECK_CODE(getTableVgroup(pCxt, pTableName, &vg)); CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); pMeta->uid = 0; @@ -952,6 +1154,10 @@ static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks, continue; } + if (TK_NK_RP == sToken.type) { + return generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_INVALID_COLUMNS_NUM); + } + if (isParseBindParam) { return buildInvalidOperationMsg(&pCxt->msg, "no mix usage for ? and values"); } @@ -1062,7 +1268,6 @@ void destroyCreateSubTbReq(SVCreateTbReq* pReq) { static void destroyInsertParseContextForTable(SInsertParseContext* pCxt) { taosMemoryFreeClear(pCxt->pTableMeta); destroyBoundColumnInfo(&pCxt->tags); - tdDestroyKVRowBuilder(&pCxt->tagsBuilder); destroyCreateSubTbReq(&pCxt->createTblReq); } @@ -1071,6 +1276,7 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) { taosHashCleanup(pCxt->pVgroupsHashObj); taosHashCleanup(pCxt->pSubTableHashObj); taosHashCleanup(pCxt->pTableNameHashObj); + taosHashCleanup(pCxt->pDbFNameHashObj); destroyBlockHashmap(pCxt->pTableBlockHashObj); destroyBlockArrayList(pCxt->pVgDataBlocks); @@ -1082,9 +1288,9 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) { // VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path // [...]; static int32_t parseInsertBody(SInsertParseContext* pCxt) { - int32_t tbNum = 0; - char tbFName[TSDB_TABLE_FNAME_LEN]; - bool autoCreateTbl = false; + int32_t tbNum = 0; + char tbFName[TSDB_TABLE_FNAME_LEN]; + bool autoCreateTbl = false; // for each table while (1) { @@ -1129,8 +1335,13 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) { SName name; CHECK_CODE(createSName(&name, &tbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg)); + CHECK_CODE(isNotSchemalessDb(pCxt->pComCxt, name.dbname)); + tNameExtractFullName(&name, tbFName); CHECK_CODE(taosHashPut(pCxt->pTableNameHashObj, tbFName, strlen(tbFName), &name, sizeof(SName))); + char dbFName[TSDB_DB_FNAME_LEN]; + tNameGetFullDbName(&name, dbFName); + CHECK_CODE(taosHashPut(pCxt->pDbFNameHashObj, dbFName, strlen(dbFName), dbFName, sizeof(dbFName))); // USING clause if (TK_USING == sToken.type) { @@ -1138,8 +1349,6 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) { NEXT_TOKEN(pCxt->pSql, sToken); autoCreateTbl = true; } else { - char dbFName[TSDB_DB_FNAME_LEN]; - tNameGetFullDbName(&name, dbFName); CHECK_CODE(getTableMeta(pCxt, &name, dbFName)); } @@ -1186,8 +1395,8 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } memcpy(tags, &pCxt->tags, sizeof(pCxt->tags)); - (*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pCxt->pTableMeta, tags, tbFName, autoCreateTbl, pCxt->pVgroupsHashObj, - pCxt->pTableBlockHashObj); + (*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pCxt->pTableMeta, tags, tbFName, autoCreateTbl, + pCxt->pVgroupsHashObj, pCxt->pTableBlockHashObj); memset(&pCxt->tags, 0, sizeof(pCxt->tags)); pCxt->pVgroupsHashObj = NULL; @@ -1204,6 +1413,23 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) { return buildOutput(pCxt); } +int32_t isNotSchemalessDb(SParseContext* pContext, char *dbName){ + SName name; + tNameSetDbName(&name, pContext->acctId, dbName, strlen(dbName)); + char dbFname[TSDB_DB_FNAME_LEN] = {0}; + tNameGetFullDbName(&name, dbFname); + SDbCfgInfo pInfo = {0}; + int32_t code = catalogGetDBCfg(pContext->pCatalog, pContext->pTransporter, &pContext->mgmtEpSet, dbFname, &pInfo); + if (code != TSDB_CODE_SUCCESS) { + parserError("catalogGetDBCfg error, code:%s, dbFName:%s", tstrerror(code), dbFname); + return code; + } + if (pInfo.schemaless){ + parserError("can not insert into schemaless db:%s", dbFname); + return TSDB_CODE_SML_INVALID_DB_CONF; + } + return TSDB_CODE_SUCCESS; +} // INSERT INTO // tb_name // [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)] @@ -1218,6 +1444,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { .pTableMeta = NULL, .pSubTableHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), true, HASH_NO_LOCK), .pTableNameHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), true, HASH_NO_LOCK), + .pDbFNameHashObj = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), true, HASH_NO_LOCK), .totalNum = 0, .pOutput = (SVnodeModifOpStmt*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT), .pStmtCb = pContext->pStmtCb}; @@ -1232,7 +1459,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { } if (NULL == context.pVgroupsHashObj || NULL == context.pTableBlockHashObj || NULL == context.pSubTableHashObj || - NULL == context.pTableNameHashObj || NULL == context.pOutput) { + NULL == context.pTableNameHashObj || NULL == context.pDbFNameHashObj || NULL == context.pOutput) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1245,12 +1472,11 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { if (NULL == *pQuery) { return TSDB_CODE_OUT_OF_MEMORY; } - - (*pQuery)->execMode = QUERY_EXEC_MODE_SCHEDULE; - (*pQuery)->haveResultSet = false; - (*pQuery)->msgType = TDMT_VND_SUBMIT; - (*pQuery)->pRoot = (SNode*)context.pOutput; } + (*pQuery)->execMode = QUERY_EXEC_MODE_SCHEDULE; + (*pQuery)->haveResultSet = false; + (*pQuery)->msgType = TDMT_VND_SUBMIT; + (*pQuery)->pRoot = (SNode*)context.pOutput; if (NULL == (*pQuery)->pTableList) { (*pQuery)->pTableList = taosArrayInit(taosHashGetSize(context.pTableNameHashObj), sizeof(SName)); @@ -1259,9 +1485,16 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { } } + if (NULL == (*pQuery)->pDbList) { + (*pQuery)->pDbList = taosArrayInit(taosHashGetSize(context.pDbFNameHashObj), TSDB_DB_FNAME_LEN); + if (NULL == (*pQuery)->pDbList) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + context.pOutput->payloadType = PAYLOAD_TYPE_KV; - int32_t code = skipInsertInto(&context); + int32_t code = skipInsertInto(&context.pSql, &context.msg); if (TSDB_CODE_SUCCESS == code) { code = parseInsertBody(&context); } @@ -1271,11 +1504,182 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { taosArrayPush((*pQuery)->pTableList, pTable); pTable = taosHashIterate(context.pTableNameHashObj, pTable); } + + char* pDb = taosHashIterate(context.pDbFNameHashObj, NULL); + while (NULL != pDb) { + taosArrayPush((*pQuery)->pDbList, pDb); + pDb = taosHashIterate(context.pDbFNameHashObj, pDb); + } } destroyInsertParseContext(&context); return code; } +typedef struct SInsertParseSyntaxCxt { + SParseContext* pComCxt; + char* pSql; + SMsgBuf msg; + SParseMetaCache* pMetaCache; +} SInsertParseSyntaxCxt; + +static int32_t skipParentheses(SInsertParseSyntaxCxt* pCxt) { + SToken sToken; + while (1) { + NEXT_TOKEN(pCxt->pSql, sToken); + if (TK_NK_RP == sToken.type) { + break; + } + if (0 == sToken.n) { + return buildSyntaxErrMsg(&pCxt->msg, ") expected", NULL); + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t skipBoundColumns(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); } + +// pSql -> (field1_value, ...) [(field1_value2, ...) ...] +static int32_t skipValuesClause(SInsertParseSyntaxCxt* pCxt) { + int32_t numOfRows = 0; + SToken sToken; + while (1) { + int32_t index = 0; + NEXT_TOKEN_KEEP_SQL(pCxt->pSql, sToken, index); + if (TK_NK_LP != sToken.type) { + break; + } + pCxt->pSql += index; + + CHECK_CODE(skipParentheses(pCxt)); + ++numOfRows; + } + if (0 == numOfRows) { + return buildSyntaxErrMsg(&pCxt->msg, "no any data points", NULL); + } + return TSDB_CODE_SUCCESS; +} + +static int32_t skipTagsClause(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); } + +// pSql -> [(tag1_name, ...)] TAGS (tag1_value, ...) +static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) { + SToken sToken; + NEXT_TOKEN(pCxt->pSql, sToken); + if (TK_NK_LP == sToken.type) { + CHECK_CODE(skipBoundColumns(pCxt)); + NEXT_TOKEN(pCxt->pSql, sToken); + } + + if (TK_TAGS != sToken.type) { + return buildSyntaxErrMsg(&pCxt->msg, "TAGS is expected", sToken.z); + } + // pSql -> (tag1_value, ...) + NEXT_TOKEN(pCxt->pSql, sToken); + if (TK_NK_LP != sToken.type) { + return buildSyntaxErrMsg(&pCxt->msg, "( is expected", sToken.z); + } + CHECK_CODE(skipTagsClause(pCxt)); + + return TSDB_CODE_SUCCESS; +} + +static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) { + SName name; + CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg)); + CHECK_CODE(reserveUserAuthInCacheExt(pCxt->pComCxt->pUser, &name, AUTH_TYPE_WRITE, pCxt->pMetaCache)); + CHECK_CODE(reserveTableMetaInCacheExt(&name, pCxt->pMetaCache)); + CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache)); + return TSDB_CODE_SUCCESS; +} + +static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) { + bool hasData = false; + // for each table + while (1) { + SToken sToken; + + // pSql -> tb_name ... + NEXT_TOKEN(pCxt->pSql, sToken); + + // no data in the sql string anymore. + if (sToken.n == 0) { + if (sToken.type && pCxt->pSql[0]) { + return buildSyntaxErrMsg(&pCxt->msg, "invalid charactor in SQL", sToken.z); + } + + if (!hasData) { + return buildInvalidOperationMsg(&pCxt->msg, "no data in sql"); + } + break; + } + + hasData = false; + + SToken tbnameToken = sToken; + NEXT_TOKEN(pCxt->pSql, sToken); + + // USING clause + if (TK_USING == sToken.type) { + NEXT_TOKEN(pCxt->pSql, sToken); + CHECK_CODE(collectTableMetaKey(pCxt, &sToken)); + CHECK_CODE(skipUsingClause(pCxt)); + NEXT_TOKEN(pCxt->pSql, sToken); + } else { + CHECK_CODE(collectTableMetaKey(pCxt, &tbnameToken)); + } + + if (TK_NK_LP == sToken.type) { + // pSql -> field1_name, ...) + CHECK_CODE(skipBoundColumns(pCxt)); + NEXT_TOKEN(pCxt->pSql, sToken); + } + + if (TK_VALUES == sToken.type) { + // pSql -> (field1_value, ...) [(field1_value2, ...) ...] + CHECK_CODE(skipValuesClause(pCxt)); + hasData = true; + continue; + } + + // FILE csv_file_path + if (TK_FILE == sToken.type) { + // pSql -> csv_file_path + NEXT_TOKEN(pCxt->pSql, sToken); + if (0 == sToken.n || (TK_NK_STRING != sToken.type && TK_NK_ID != sToken.type)) { + return buildSyntaxErrMsg(&pCxt->msg, "file path is required following keyword FILE", sToken.z); + } + hasData = true; + continue; + } + + return buildSyntaxErrMsg(&pCxt->msg, "keyword VALUES or FILE is expected", sToken.z); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t parseInsertSyntax(SParseContext* pContext, SQuery** pQuery) { + SInsertParseSyntaxCxt context = {.pComCxt = pContext, + .pSql = (char*)pContext->pSql, + .msg = {.buf = pContext->pMsg, .len = pContext->msgLen}, + .pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache))}; + if (NULL == context.pMetaCache) { + return TSDB_CODE_OUT_OF_MEMORY; + } + int32_t code = skipInsertInto(&context.pSql, &context.msg); + if (TSDB_CODE_SUCCESS == code) { + code = parseInsertBodySyntax(&context); + } + if (TSDB_CODE_SUCCESS == code) { + *pQuery = taosMemoryCalloc(1, sizeof(SQuery)); + if (NULL == *pQuery) { + return TSDB_CODE_OUT_OF_MEMORY; + } + TSWAP((*pQuery)->pMetaCache, context.pMetaCache); + } + return code; +} + int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf, int32_t msgBufLen) { SMsgBuf msg = {.buf = msgBuf, .len = msgBufLen}; @@ -1332,46 +1736,93 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, char* tN return TSDB_CODE_QRY_APP_ERROR; } - SKVRowBuilder tagBuilder; - if (tdInitKVRowBuilder(&tagBuilder) < 0) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; + SArray* pTagArray = taosArrayInit(tags->numOfBound, sizeof(STagVal)); + if (!pTagArray) { + return buildInvalidOperationMsg(&pBuf, "out of memory"); } + int32_t code = TSDB_CODE_SUCCESS; SSchema* pSchema = pDataBlock->pTableMeta->schema; - SKvParam param = {.builder = &tagBuilder}; + + bool isJson = false; + STag* pTag = NULL; for (int c = 0; c < tags->numOfBound; ++c) { if (bind[c].is_null && bind[c].is_null[0]) { - KvRowAppend(&pBuf, NULL, 0, ¶m); continue; } SSchema* pTagSchema = &pSchema[tags->boundColumns[c]]; - param.schema = pTagSchema; - - int32_t colLen = pTagSchema->bytes; + int32_t colLen = pTagSchema->bytes; if (IS_VAR_DATA_TYPE(pTagSchema->type)) { colLen = bind[c].length[0]; } + if (pTagSchema->type == TSDB_DATA_TYPE_JSON) { + if (colLen > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { + code = buildSyntaxErrMsg(&pBuf, "json string too long than 4095", bind[c].buffer); + goto end; + } - CHECK_CODE(KvRowAppend(&pBuf, (char*)bind[c].buffer, colLen, ¶m)); + isJson = true; + char* tmp = taosMemoryCalloc(1, colLen + 1); + memcpy(tmp, bind[c].buffer, colLen); + code = parseJsontoTagData(tmp, pTagArray, &pTag, &pBuf); + taosMemoryFree(tmp); + if (code != TSDB_CODE_SUCCESS) { + goto end; + } + } else { + STagVal val = {.cid = pTagSchema->colId, .type = pTagSchema->type}; + if (pTagSchema->type == TSDB_DATA_TYPE_BINARY) { + val.pData = (uint8_t*)bind[c].buffer; + val.nData = colLen; + } else if (pTagSchema->type == TSDB_DATA_TYPE_NCHAR) { + int32_t output = 0; + void* p = taosMemoryCalloc(1, colLen * TSDB_NCHAR_SIZE); + if (p == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + if (!taosMbsToUcs4(bind[c].buffer, colLen, (TdUcs4*)(p), colLen * TSDB_NCHAR_SIZE, &output)) { + if (errno == E2BIG) { + taosMemoryFree(p); + code = generateSyntaxErrMsg(&pBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pTagSchema->name); + goto end; + } + char buf[512] = {0}; + snprintf(buf, tListLen(buf), " taosMbsToUcs4 error:%s", strerror(errno)); + taosMemoryFree(p); + code = buildSyntaxErrMsg(&pBuf, buf, bind[c].buffer); + goto end; + } + val.pData = p; + val.nData = output; + } else { + memcpy(&val.i64, bind[c].buffer, colLen); + } + taosArrayPush(pTagArray, &val); + } } - SKVRow row = tdGetKVRowFromBuilder(&tagBuilder); - if (NULL == row) { - tdDestroyKVRowBuilder(&tagBuilder); - return buildInvalidOperationMsg(&pBuf, "out of memory"); + if (!isJson && (code = tTagNew(pTagArray, 1, false, &pTag)) != TSDB_CODE_SUCCESS) { + goto end; } - tdSortKVRowByColIdx(row); SVCreateTbReq tbReq = {0}; - CHECK_CODE(buildCreateTbReq(&tbReq, tName, row, suid)); - CHECK_CODE(buildCreateTbMsg(pDataBlock, &tbReq)); - + buildCreateTbReq(&tbReq, tName, pTag, suid); + code = buildCreateTbMsg(pDataBlock, &tbReq); destroyCreateSubTbReq(&tbReq); - tdDestroyKVRowBuilder(&tagBuilder); - return TSDB_CODE_SUCCESS; +end: + for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) { + STagVal* p = (STagVal*)taosArrayGet(pTagArray, i); + if (p->type == TSDB_DATA_TYPE_NCHAR) { + taosMemoryFree(p->pData); + } + } + taosArrayDestroy(pTagArray); + + return code; } int32_t qBindStmtColsValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen) { @@ -1540,18 +1991,24 @@ int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBu return TSDB_CODE_SUCCESS; } -int32_t buildBoundFields(SParsedDataColInfo* boundInfo, SSchema* pSchema, int32_t* fieldNum, TAOS_FIELD** fields) { +int32_t buildBoundFields(SParsedDataColInfo* boundInfo, SSchema* pSchema, int32_t* fieldNum, TAOS_FIELD_E** fields, + uint8_t timePrec) { if (fields) { *fields = taosMemoryCalloc(boundInfo->numOfBound, sizeof(TAOS_FIELD)); if (NULL == *fields) { return TSDB_CODE_OUT_OF_MEMORY; } + SSchema* schema = &pSchema[boundInfo->boundColumns[0]]; + if (TSDB_DATA_TYPE_TIMESTAMP == schema->type) { + (*fields)[0].precision = timePrec; + } + for (int32_t i = 0; i < boundInfo->numOfBound; ++i) { - SSchema* pTagSchema = &pSchema[boundInfo->boundColumns[i]]; - strcpy((*fields)[i].name, pTagSchema->name); - (*fields)[i].type = pTagSchema->type; - (*fields)[i].bytes = pTagSchema->bytes; + schema = &pSchema[boundInfo->boundColumns[i]]; + strcpy((*fields)[i].name, schema->name); + (*fields)[i].type = schema->type; + (*fields)[i].bytes = schema->bytes; } } @@ -1560,7 +2017,7 @@ int32_t buildBoundFields(SParsedDataColInfo* boundInfo, SSchema* pSchema, int32_ return TSDB_CODE_SUCCESS; } -int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD** fields) { +int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields) { STableDataBlocks* pDataBlock = (STableDataBlocks*)pBlock; SParsedDataColInfo* tags = (SParsedDataColInfo*)boundTags; if (NULL == tags) { @@ -1575,12 +2032,12 @@ int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TA return TSDB_CODE_SUCCESS; } - CHECK_CODE(buildBoundFields(tags, pSchema, fieldNum, fields)); + CHECK_CODE(buildBoundFields(tags, pSchema, fieldNum, fields, 0)); return TSDB_CODE_SUCCESS; } -int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD** fields) { +int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fields) { STableDataBlocks* pDataBlock = (STableDataBlocks*)pBlock; SSchema* pSchema = getTableColumnSchema(pDataBlock->pTableMeta); if (pDataBlock->boundColumnInfo.numOfBound <= 0) { @@ -1592,7 +2049,8 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD** fields return TSDB_CODE_SUCCESS; } - CHECK_CODE(buildBoundFields(&pDataBlock->boundColumnInfo, pSchema, fieldNum, fields)); + CHECK_CODE(buildBoundFields(&pDataBlock->boundColumnInfo, pSchema, fieldNum, fields, + pDataBlock->pTableMeta->tableInfo.precision)); return TSDB_CODE_SUCCESS; } @@ -1601,7 +2059,6 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD** fields typedef struct SmlExecTableHandle { SParsedDataColInfo tags; // each table - SKVRowBuilder tagsBuilder; // each table SVCreateTbReq createTblReq; // each table } SmlExecTableHandle; @@ -1613,7 +2070,6 @@ typedef struct SmlExecHandle { static void smlDestroyTableHandle(void* pHandle) { SmlExecTableHandle* handle = (SmlExecTableHandle*)pHandle; - tdDestroyKVRowBuilder(&handle->tagsBuilder); destroyBoundColumnInfo(&handle->tags); destroyCreateSubTbReq(&handle->createTblReq); } @@ -1689,30 +2145,68 @@ static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SS return TSDB_CODE_SUCCESS; } -static int32_t smlBuildTagRow(SArray* cols, SKVRowBuilder* tagsBuilder, SParsedDataColInfo* tags, SSchema* pSchema, - SKVRow* row, SMsgBuf* msg) { - if (tdInitKVRowBuilder(tagsBuilder) < 0) { +/** + * @brief No json tag for schemaless + * + * @param cols + * @param tags + * @param pSchema + * @param ppTag + * @param msg + * @return int32_t + */ +static int32_t smlBuildTagRow(SArray* cols, SParsedDataColInfo* tags, SSchema* pSchema, STag** ppTag, SMsgBuf* msg) { + SArray* pTagArray = taosArrayInit(tags->numOfBound, sizeof(STagVal)); + if (!pTagArray) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SKvParam param = {.builder = tagsBuilder}; + int32_t code = TSDB_CODE_SUCCESS; for (int i = 0; i < tags->numOfBound; ++i) { SSchema* pTagSchema = &pSchema[tags->boundColumns[i]]; - param.schema = pTagSchema; - SSmlKv* kv = taosArrayGetP(cols, i); - if (IS_VAR_DATA_TYPE(kv->type)) { - KvRowAppend(msg, kv->value, kv->length, ¶m); + SSmlKv* kv = taosArrayGetP(cols, i); + + STagVal val = {.cid = pTagSchema->colId, .type = pTagSchema->type}; + if (pTagSchema->type == TSDB_DATA_TYPE_BINARY) { + val.pData = (uint8_t*)kv->value; + val.nData = kv->length; + } else if (pTagSchema->type == TSDB_DATA_TYPE_NCHAR) { + int32_t output = 0; + void *p = taosMemoryCalloc(1, kv->length * TSDB_NCHAR_SIZE); + if(p == NULL){ + code = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } + if (!taosMbsToUcs4(kv->value, kv->length, (TdUcs4*)(p), kv->length * TSDB_NCHAR_SIZE, &output)) { + if (errno == E2BIG) { + taosMemoryFree(p); + code = generateSyntaxErrMsg(msg, TSDB_CODE_PAR_VALUE_TOO_LONG, pTagSchema->name); + goto end; + } + char buf[512] = {0}; + snprintf(buf, tListLen(buf), " taosMbsToUcs4 error:%s", strerror(errno)); + taosMemoryFree(p); + code = buildSyntaxErrMsg(msg, buf, kv->value); + goto end; + } + val.pData = p; + val.nData = output; } else { - KvRowAppend(msg, &(kv->value), kv->length, ¶m); + memcpy(&val.i64, &(kv->value), kv->length); } + taosArrayPush(pTagArray, &val); } - *row = tdGetKVRowFromBuilder(tagsBuilder); - if (*row == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + code = tTagNew(pTagArray, 1, false, ppTag); +end: + for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) { + STagVal* p = (STagVal*)taosArrayGet(pTagArray, i); + if (p->type == TSDB_DATA_TYPE_NCHAR) { + taosMemoryFree(p->pData); + } } - tdSortKVRowByColIdx(*row); - return TSDB_CODE_SUCCESS; + taosArrayDestroy(pTagArray); + return code; } int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols, bool format, STableMeta* pTableMeta, @@ -1728,14 +2222,13 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols buildInvalidOperationMsg(&pBuf, "bound tags error"); return ret; } - SKVRow row = NULL; - ret = smlBuildTagRow(tags, &smlHandle->tableExecHandle.tagsBuilder, &smlHandle->tableExecHandle.tags, pTagsSchema, - &row, &pBuf); + STag* pTag = NULL; + ret = smlBuildTagRow(tags, &smlHandle->tableExecHandle.tags, pTagsSchema, &pTag, &pBuf); if (ret != TSDB_CODE_SUCCESS) { return ret; } - buildCreateTbReq(&smlHandle->tableExecHandle.createTblReq, tableName, row, pTableMeta->suid); + buildCreateTbReq(&smlHandle->tableExecHandle.createTblReq, tableName, pTag, pTableMeta->suid); STableDataBlocks* pDataBlock = NULL; ret = getDataBlockFromList(smlHandle->pBlockHash, &pTableMeta->uid, sizeof(pTableMeta->uid), @@ -1780,7 +2273,7 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols // 1. set the parsed value from sql string for (int c = 0, j = 0; c < spd->numOfBound; ++c) { - SSchema* pColSchema = &pSchema[spd->boundColumns[c] - 1]; + SSchema* pColSchema = &pSchema[spd->boundColumns[c]]; param.schema = pColSchema; getSTSRowAppendInfo(pBuilder->rowType, spd, c, ¶m.toffset, ¶m.colIdx); diff --git a/source/libs/parser/src/parTokenizer.c b/source/libs/parser/src/parTokenizer.c index 540de2d639be9e69e798316e04bb4a46ff9dd58e..e9539073583c6d21a100efa5b33516eb9db18393 100644 --- a/source/libs/parser/src/parTokenizer.c +++ b/source/libs/parser/src/parTokenizer.c @@ -53,7 +53,6 @@ static SKeyword keywordTable[] = { {"CACHE", TK_CACHE}, {"CACHELAST", TK_CACHELAST}, {"CAST", TK_CAST}, - {"CGROUP", TK_CGROUP}, {"CLUSTER", TK_CLUSTER}, {"COLUMN", TK_COLUMN}, {"COMMENT", TK_COMMENT}, @@ -62,13 +61,13 @@ static SKeyword keywordTable[] = { {"CONNS", TK_CONNS}, {"CONNECTION", TK_CONNECTION}, {"CONNECTIONS", TK_CONNECTIONS}, + {"CONSUMER", TK_CONSUMER}, {"COUNT", TK_COUNT}, {"CREATE", TK_CREATE}, {"DATABASE", TK_DATABASE}, {"DATABASES", TK_DATABASES}, {"DAYS", TK_DAYS}, {"DBS", TK_DBS}, - {"DELAY", TK_DELAY}, {"DESC", TK_DESC}, {"DESCRIBE", TK_DESCRIBE}, {"DISTINCT", TK_DISTINCT}, @@ -156,7 +155,6 @@ static SKeyword keywordTable[] = { {"RETENTIONS", TK_RETENTIONS}, {"REVOKE", TK_REVOKE}, {"ROLLUP", TK_ROLLUP}, - {"SCHEMA", TK_SCHEMA}, {"SCHEMALESS", TK_SCHEMALESS}, {"SCORES", TK_SCORES}, {"SELECT", TK_SELECT}, @@ -214,7 +212,6 @@ static SKeyword keywordTable[] = { {"WATERMARK", TK_WATERMARK}, {"WHERE", TK_WHERE}, {"WINDOW_CLOSE", TK_WINDOW_CLOSE}, - {"WITH", TK_WITH}, {"WRITE", TK_WRITE}, {"_C0", TK_ROWTS}, {"_QENDTS", TK_QENDTS}, diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 0e1e50cd412de2d9f9e7f4f7a8700d67438dce93..f0cba6ddc9031387db7528fc1f47706f23ee2863 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -40,6 +40,7 @@ typedef struct STranslateContext { SHashObj* pDbs; SHashObj* pTables; SExplainOptions* pExplainOpt; + SParseMetaCache* pMetaCache; } STranslateContext; typedef struct SFullDatabaseName { @@ -102,12 +103,17 @@ static int32_t collectUseTable(const SName* pName, SHashObj* pDbs) { static int32_t getTableMetaImpl(STranslateContext* pCxt, const SName* pName, STableMeta** pMeta) { SParseContext* pParCxt = pCxt->pParseCxt; - int32_t code = collectUseDatabase(pName, pCxt->pDbs); - if (TSDB_CODE_SUCCESS == code) { - code = collectUseTable(pName, pCxt->pTables); - } - if (TSDB_CODE_SUCCESS == code) { - code = catalogGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pMeta); + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getTableMetaFromCache(pCxt->pMetaCache, pName, pMeta); + } else { + code = collectUseDatabase(pName, pCxt->pDbs); + if (TSDB_CODE_SUCCESS == code) { + code = collectUseTable(pName, pCxt->pTables); + } + if (TSDB_CODE_SUCCESS == code) { + code = catalogGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pMeta); + } } if (TSDB_CODE_SUCCESS != code) { parserError("catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, @@ -126,8 +132,13 @@ static int32_t refreshGetTableMeta(STranslateContext* pCxt, const char* pDbName, SParseContext* pParCxt = pCxt->pParseCxt; SName name; toName(pCxt->pParseCxt->acctId, pDbName, pTableName, &name); - int32_t code = - catalogRefreshGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, &name, pMeta, false); + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getTableMetaFromCache(pCxt->pMetaCache, &name, pMeta); + } else { + code = + catalogRefreshGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, &name, pMeta, false); + } if (TSDB_CODE_SUCCESS != code) { parserError("catalogRefreshGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pDbName, pTableName); @@ -135,29 +146,18 @@ static int32_t refreshGetTableMeta(STranslateContext* pCxt, const char* pDbName, return code; } -static int32_t getTableDistVgInfo(STranslateContext* pCxt, const SName* pName, SArray** pVgInfo) { - SParseContext* pParCxt = pCxt->pParseCxt; - int32_t code = collectUseDatabase(pName, pCxt->pDbs); - if (TSDB_CODE_SUCCESS == code) { - code = collectUseTable(pName, pCxt->pTables); - } - if (TSDB_CODE_SUCCESS == code) { - code = catalogGetTableDistVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pVgInfo); - } - if (TSDB_CODE_SUCCESS != code) { - parserError("catalogGetTableDistVgInfo error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, - pName->tname); - } - return code; -} - static int32_t getDBVgInfoImpl(STranslateContext* pCxt, const SName* pName, SArray** pVgInfo) { SParseContext* pParCxt = pCxt->pParseCxt; char fullDbName[TSDB_DB_FNAME_LEN]; tNameGetFullDbName(pName, fullDbName); - int32_t code = collectUseDatabaseImpl(fullDbName, pCxt->pDbs); - if (TSDB_CODE_SUCCESS == code) { - code = catalogGetDBVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, fullDbName, pVgInfo); + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getDbVgInfoFromCache(pCxt->pMetaCache, fullDbName, pVgInfo); + } else { + code = collectUseDatabaseImpl(fullDbName, pCxt->pDbs); + if (TSDB_CODE_SUCCESS == code) { + code = catalogGetDBVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, fullDbName, pVgInfo); + } } if (TSDB_CODE_SUCCESS != code) { parserError("catalogGetDBVgInfo error, code:%s, dbFName:%s", tstrerror(code), fullDbName); @@ -175,12 +175,17 @@ static int32_t getDBVgInfo(STranslateContext* pCxt, const char* pDbName, SArray* static int32_t getTableHashVgroupImpl(STranslateContext* pCxt, const SName* pName, SVgroupInfo* pInfo) { SParseContext* pParCxt = pCxt->pParseCxt; - int32_t code = collectUseDatabase(pName, pCxt->pDbs); - if (TSDB_CODE_SUCCESS == code) { - code = collectUseTable(pName, pCxt->pTables); - } - if (TSDB_CODE_SUCCESS == code) { - code = catalogGetTableHashVgroup(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pInfo); + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getTableVgroupFromCache(pCxt->pMetaCache, pName, pInfo); + } else { + code = collectUseDatabase(pName, pCxt->pDbs); + if (TSDB_CODE_SUCCESS == code) { + code = collectUseTable(pName, pCxt->pTables); + } + if (TSDB_CODE_SUCCESS == code) { + code = catalogGetTableHashVgroup(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pInfo); + } } if (TSDB_CODE_SUCCESS != code) { parserError("catalogGetTableHashVgroup error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, @@ -198,9 +203,14 @@ static int32_t getTableHashVgroup(STranslateContext* pCxt, const char* pDbName, static int32_t getDBVgVersion(STranslateContext* pCxt, const char* pDbFName, int32_t* pVersion, int64_t* pDbId, int32_t* pTableNum) { SParseContext* pParCxt = pCxt->pParseCxt; - int32_t code = collectUseDatabaseImpl(pDbFName, pCxt->pDbs); - if (TSDB_CODE_SUCCESS == code) { - code = catalogGetDBVgVersion(pParCxt->pCatalog, pDbFName, pVersion, pDbId, pTableNum); + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getDbVgVersionFromCache(pCxt->pMetaCache, pDbFName, pVersion, pDbId, pTableNum); + } else { + code = collectUseDatabaseImpl(pDbFName, pCxt->pDbs); + if (TSDB_CODE_SUCCESS == code) { + code = catalogGetDBVgVersion(pParCxt->pCatalog, pDbFName, pVersion, pDbId, pTableNum); + } } if (TSDB_CODE_SUCCESS != code) { parserError("catalogGetDBVgVersion error, code:%s, dbFName:%s", tstrerror(code), pDbFName); @@ -214,9 +224,14 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo tNameSetDbName(&name, pCxt->pParseCxt->acctId, pDbName, strlen(pDbName)); char dbFname[TSDB_DB_FNAME_LEN] = {0}; tNameGetFullDbName(&name, dbFname); - int32_t code = collectUseDatabaseImpl(dbFname, pCxt->pDbs); - if (TSDB_CODE_SUCCESS == code) { - code = catalogGetDBCfg(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, dbFname, pInfo); + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getDbCfgFromCache(pCxt->pMetaCache, dbFname, pInfo); + } else { + code = collectUseDatabaseImpl(dbFname, pCxt->pDbs); + if (TSDB_CODE_SUCCESS == code) { + code = catalogGetDBCfg(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, dbFname, pInfo); + } } if (TSDB_CODE_SUCCESS != code) { parserError("catalogGetDBCfg error, code:%s, dbFName:%s", tstrerror(code), dbFname); @@ -224,7 +239,28 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo return code; } -static int32_t initTranslateContext(SParseContext* pParseCxt, STranslateContext* pCxt) { +static int32_t getUdfInfo(STranslateContext* pCxt, SFunctionNode* pFunc) { + SParseContext* pParCxt = pCxt->pParseCxt; + SFuncInfo funcInfo = {0}; + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getUdfInfoFromCache(pCxt->pMetaCache, pFunc->functionName, &funcInfo); + } else { + code = catalogGetUdfInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pFunc->functionName, + &funcInfo); + } + if (TSDB_CODE_SUCCESS == code) { + pFunc->funcType = FUNCTION_TYPE_UDF; + pFunc->funcId = TSDB_FUNC_TYPE_AGGREGATE == funcInfo.funcType ? FUNC_AGGREGATE_UDF_ID : FUNC_SCALAR_UDF_ID; + pFunc->node.resType.type = funcInfo.outputType; + pFunc->node.resType.bytes = funcInfo.outputLen; + pFunc->udfBufSize = funcInfo.bufSize; + tFreeSFuncInfo(&funcInfo); + } + return code; +} + +static int32_t initTranslateContext(SParseContext* pParseCxt, SParseMetaCache* pMetaCache, STranslateContext* pCxt) { pCxt->pParseCxt = pParseCxt; pCxt->errCode = TSDB_CODE_SUCCESS; pCxt->msgBuf.buf = pParseCxt->pMsg; @@ -232,6 +268,7 @@ static int32_t initTranslateContext(SParseContext* pParseCxt, STranslateContext* pCxt->pNsLevel = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES); pCxt->currLevel = 0; pCxt->currClause = 0; + pCxt->pMetaCache = pMetaCache; pCxt->pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pCxt->pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); if (NULL == pCxt->pNsLevel || NULL == pCxt->pDbs || NULL == pCxt->pTables) { @@ -428,20 +465,22 @@ static bool isPrimaryKey(STempTableNode* pTable, SNode* pExpr) { return isPrimaryKeyImpl(pTable, pExpr); } -static bool findAndSetColumn(SColumnNode** pColRef, const STableNode* pTable) { +static int32_t findAndSetColumn(STranslateContext* pCxt, SColumnNode** pColRef, const STableNode* pTable, + bool* pFound) { SColumnNode* pCol = *pColRef; - bool found = false; + *pFound = false; if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) { const STableMeta* pMeta = ((SRealTableNode*)pTable)->pMeta; if (isInternalPrimaryKey(pCol)) { setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema, false, pCol); - return true; + *pFound = true; + return TSDB_CODE_SUCCESS; } int32_t nums = pMeta->tableInfo.numOfTags + pMeta->tableInfo.numOfColumns; for (int32_t i = 0; i < nums; ++i) { if (0 == strcmp(pCol->colName, pMeta->schema[i].name)) { setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema + i, (i >= pMeta->tableInfo.numOfColumns), pCol); - found = true; + *pFound = true; break; } } @@ -452,13 +491,15 @@ static bool findAndSetColumn(SColumnNode** pColRef, const STableNode* pTable) { SExprNode* pExpr = (SExprNode*)pNode; if (0 == strcmp(pCol->colName, pExpr->aliasName) || (isPrimaryKey((STempTableNode*)pTable, pNode) && isInternalPrimaryKey(pCol))) { + if (*pFound) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, pCol->colName); + } setColumnInfoByExpr(pTable, pExpr, pColRef); - found = true; - break; + *pFound = true; } } } - return found; + return TSDB_CODE_SUCCESS; } static EDealRes translateColumnWithPrefix(STranslateContext* pCxt, SColumnNode** pCol) { @@ -469,7 +510,12 @@ static EDealRes translateColumnWithPrefix(STranslateContext* pCxt, SColumnNode** STableNode* pTable = taosArrayGetP(pTables, i); if (belongTable(pCxt->pParseCxt->db, (*pCol), pTable)) { foundTable = true; - if (findAndSetColumn(pCol, pTable)) { + bool foundCol = false; + pCxt->errCode = findAndSetColumn(pCxt, pCol, pTable, &foundCol); + if (TSDB_CODE_SUCCESS != pCxt->errCode) { + return DEAL_RES_ERROR; + } + if (foundCol) { break; } return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, (*pCol)->colName); @@ -488,14 +534,19 @@ static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNod bool isInternalPk = isInternalPrimaryKey(*pCol); for (size_t i = 0; i < nums; ++i) { STableNode* pTable = taosArrayGetP(pTables, i); - if (findAndSetColumn(pCol, pTable)) { + bool foundCol = false; + pCxt->errCode = findAndSetColumn(pCxt, pCol, pTable, &foundCol); + if (TSDB_CODE_SUCCESS != pCxt->errCode) { + return DEAL_RES_ERROR; + } + if (foundCol) { if (found) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, (*pCol)->colName); } found = true; - if (isInternalPk) { - break; - } + } + if (isInternalPk) { + break; } } if (!found) { @@ -661,7 +712,6 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1); if (NULL == pVal->datum.p) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY); - ; } int32_t len = 0; @@ -715,18 +765,30 @@ static bool isMultiResFunc(SNode* pNode) { return (QUERY_NODE_COLUMN == nodeType(pParam) ? 0 == strcmp(((SColumnNode*)pParam)->colName, "*") : false); } -static EDealRes translateUnaryOperator(STranslateContext* pCxt, SOperatorNode* pOp) { +static int32_t rewriteNegativeOperator(SNode** pOp) { + SNode* pRes = NULL; + int32_t code = scalarCalculateConstants(*pOp, &pRes); + if (TSDB_CODE_SUCCESS == code) { + *pOp = pRes; + } + return code; +} + +static EDealRes translateUnaryOperator(STranslateContext* pCxt, SOperatorNode** pOpRef) { + SOperatorNode* pOp = *pOpRef; if (OP_TYPE_MINUS == pOp->opType) { if (!IS_MATHABLE_TYPE(((SExprNode*)(pOp->pLeft))->resType.type)) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName); } pOp->node.resType.type = TSDB_DATA_TYPE_DOUBLE; pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes; + + pCxt->errCode = rewriteNegativeOperator((SNode**)pOpRef); } else { pOp->node.resType.type = TSDB_DATA_TYPE_BOOL; pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes; } - return DEAL_RES_CONTINUE; + return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; } static EDealRes translateArithmeticOperator(STranslateContext* pCxt, SOperatorNode* pOp) { @@ -767,7 +829,8 @@ static EDealRes translateComparisonOperator(STranslateContext* pCxt, SOperatorNo if (!IS_VAR_DATA_TYPE(((SExprNode*)(pOp->pLeft))->resType.type)) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName); } - if (QUERY_NODE_VALUE != nodeType(pOp->pRight) || !IS_STR_DATA_TYPE(((SExprNode*)(pOp->pRight))->resType.type)) { + if (QUERY_NODE_VALUE != nodeType(pOp->pRight) || + ((!IS_STR_DATA_TYPE(((SExprNode*)(pOp->pRight))->resType.type)) && (((SExprNode*)(pOp->pRight))->resType.type != TSDB_DATA_TYPE_NULL))) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pRight))->aliasName); } } @@ -787,7 +850,9 @@ static EDealRes translateJsonOperator(STranslateContext* pCxt, SOperatorNode* pO return DEAL_RES_CONTINUE; } -static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) { +static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode** pOpRef) { + SOperatorNode* pOp = *pOpRef; + if (isMultiResFunc(pOp->pLeft)) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName); } @@ -796,7 +861,7 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) { } if (nodesIsUnaryOp(pOp)) { - return translateUnaryOperator(pCxt, pOp); + return translateUnaryOperator(pCxt, pOpRef); } else if (nodesIsArithmeticOp(pOp)) { return translateArithmeticOperator(pCxt, pOp); } else if (nodesIsComparisonOp(pOp)) { @@ -857,12 +922,11 @@ static bool hasInvalidFuncNesting(SNodeList* pParameterList) { } static int32_t getFuncInfo(STranslateContext* pCxt, SFunctionNode* pFunc) { - SFmGetFuncInfoParam param = {.pCtg = pCxt->pParseCxt->pCatalog, - .pRpc = pCxt->pParseCxt->pTransporter, - .pMgmtEps = &pCxt->pParseCxt->mgmtEpSet, - .pErrBuf = pCxt->msgBuf.buf, - .errBufLen = pCxt->msgBuf.len}; - return fmGetFuncInfo(¶m, pFunc); + int32_t code = fmGetFuncInfo(pFunc, pCxt->msgBuf.buf, pCxt->msgBuf.len); + if (TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION == code) { + code = getUdfInfo(pCxt, pFunc); + } + return code; } static int32_t translateAggFunc(STranslateContext* pCxt, SFunctionNode* pFunc) { @@ -956,7 +1020,7 @@ static EDealRes doTranslateExpr(SNode** pNode, void* pContext) { case QUERY_NODE_VALUE: return translateValue(pCxt, (SValueNode*)*pNode); case QUERY_NODE_OPERATOR: - return translateOperator(pCxt, (SOperatorNode*)*pNode); + return translateOperator(pCxt, (SOperatorNode**)pNode); case QUERY_NODE_FUNCTION: return translateFunction(pCxt, (SFunctionNode*)*pNode); case QUERY_NODE_LOGIC_CONDITION: @@ -1196,7 +1260,6 @@ static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRea int32_t code = TSDB_CODE_SUCCESS; SArray* vgroupList = NULL; if ('\0' != pRealTable->qualDbName[0]) { - // todo release after mnode can be processed if (0 != strcmp(pRealTable->qualDbName, TSDB_INFORMATION_SCHEMA_DB)) { code = getDBVgInfo(pCxt, pRealTable->qualDbName, &vgroupList); } @@ -1204,7 +1267,6 @@ static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRea code = getDBVgInfoImpl(pCxt, pName, &vgroupList); } - // todo release after mnode can be processed if (TSDB_CODE_SUCCESS == code) { code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &vgroupList); } @@ -1225,7 +1287,7 @@ static int32_t setTableVgroupList(STranslateContext* pCxt, SName* pName, SRealTa int32_t code = TSDB_CODE_SUCCESS; if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) { SArray* vgroupList = NULL; - code = getTableDistVgInfo(pCxt, pName, &vgroupList); + code = getDBVgInfoImpl(pCxt, pName, &vgroupList); if (TSDB_CODE_SUCCESS == code) { code = toVgroupsInfo(vgroupList, &pRealTable->pVgroupList); } @@ -1857,9 +1919,9 @@ static int32_t translatePartitionBy(STranslateContext* pCxt, SNodeList* pPartiti return translateExprList(pCxt, pPartitionByList); } -static int32_t translateWhere(STranslateContext* pCxt, SNode* pWhere) { +static int32_t translateWhere(STranslateContext* pCxt, SNode** pWhere) { pCxt->currClause = SQL_CLAUSE_WHERE; - return translateExpr(pCxt, &pWhere); + return translateExpr(pCxt, pWhere); } static int32_t translateFrom(STranslateContext* pCxt, SSelectStmt* pSelect) { @@ -1891,7 +1953,9 @@ static int32_t createPrimaryKeyColByTable(STranslateContext* pCxt, STableNode* p } pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; strcpy(pCol->colName, PK_TS_COL_INTERNAL_NAME); - if (!findAndSetColumn(&pCol, pTable)) { + bool found = false; + int32_t code = findAndSetColumn(pCxt, &pCol, pTable, &found); + if (TSDB_CODE_SUCCESS != code || !found) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TIMELINE_FUNC); } *pPrimaryKey = (SNode*)pCol; @@ -1930,7 +1994,7 @@ static int32_t translateSelect(STranslateContext* pCxt, SSelectStmt* pSelect) { pCxt->pCurrStmt = pSelect; int32_t code = translateFrom(pCxt, pSelect); if (TSDB_CODE_SUCCESS == code) { - code = translateWhere(pCxt, pSelect->pWhere); + code = translateWhere(pCxt, &pSelect->pWhere); } if (TSDB_CODE_SUCCESS == code) { code = translatePartitionBy(pCxt, pSelect->pPartitionByList); @@ -2569,10 +2633,7 @@ static int32_t checkTableSchema(STranslateContext* pCxt, SCreateTableStmt* pStmt } static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) { - int32_t code = checkRangeOption(pCxt, "delay", pStmt->pOptions->delay, TSDB_MIN_ROLLUP_DELAY, TSDB_MAX_ROLLUP_DELAY); - if (TSDB_CODE_SUCCESS == code) { - code = checTableFactorOption(pCxt, pStmt->pOptions->filesFactor); - } + int32_t code = checTableFactorOption(pCxt, pStmt->pOptions->filesFactor); if (TSDB_CODE_SUCCESS == code) { code = checkTableRollupOption(pCxt, pStmt->pOptions->pRollupFuncs); } @@ -2585,6 +2646,11 @@ static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt if (TSDB_CODE_SUCCESS == code) { code = checkTableSchema(pCxt, pStmt); } + if (TSDB_CODE_SUCCESS == code) { + if(pCxt->pParseCxt->schemalessType == 0){ + code = isNotSchemalessDb(pCxt->pParseCxt, pStmt->dbName); + } + } return code; } @@ -2797,7 +2863,7 @@ static int32_t buildRollupAst(STranslateContext* pCxt, SCreateTableStmt* pStmt, for (int32_t i = 1; i < num; ++i) { SRetention* pRetension = taosArrayGet(dbCfg.pRetensions, i); STranslateContext cxt = {0}; - initTranslateContext(pCxt->pParseCxt, &cxt); + initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); code = getRollupAst(&cxt, pStmt, pRetension, dbCfg.precision, 1 == i ? &pReq->pAst1 : &pReq->pAst2, 1 == i ? &pReq->ast1Len : &pReq->ast2Len); destroyTranslateContext(&cxt); @@ -2813,7 +2879,6 @@ static int32_t buildRollupAst(STranslateContext* pCxt, SCreateTableStmt* pStmt, static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStmt, SMCreateStbReq* pReq) { pReq->igExists = pStmt->ignoreExists; pReq->xFilesFactor = pStmt->pOptions->filesFactor; - pReq->delay = pStmt->pOptions->delay; pReq->ttl = pStmt->pOptions->ttl; columnDefNodeToField(pStmt->pCols, &pReq->pColumns); columnDefNodeToField(pStmt->pTags, &pReq->pTags); @@ -3239,9 +3304,6 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->topicName, strlen(pStmt->topicName)); tNameGetFullDbName(&name, pReq->name); pReq->igExists = pStmt->ignoreExists; - pReq->withTbName = pStmt->pOptions->withTable; - pReq->withSchema = pStmt->pOptions->withSchema; - pReq->withTag = pStmt->pOptions->withTag; pReq->sql = strdup(pCxt->pParseCxt->pSql); if (NULL == pReq->sql) { @@ -3250,19 +3312,26 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS int32_t code = TSDB_CODE_SUCCESS; - const char* dbName; - if (NULL != pStmt->pQuery) { - dbName = ((SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable))->table.dbName; + if ('\0' != pStmt->subSTbName[0]) { + pReq->subType = TOPIC_SUB_TYPE__TABLE; + toName(pCxt->pParseCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name); + tNameGetFullDbName(&name, pReq->subDbName); + tNameExtractFullName(&name, pReq->subStbName); + } else if ('\0' != pStmt->subDbName[0]) { + pReq->subType = TOPIC_SUB_TYPE__DB; + tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->subDbName, strlen(pStmt->subDbName)); + tNameGetFullDbName(&name, pReq->subDbName); + } else { + pReq->subType = TOPIC_SUB_TYPE__COLUMN; + char* dbName = ((SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable))->table.dbName; + tNameSetDbName(&name, pCxt->pParseCxt->acctId, dbName, strlen(dbName)); + tNameGetFullDbName(&name, pReq->subDbName); pCxt->pParseCxt->topicQuery = true; code = translateQuery(pCxt, pStmt->pQuery); if (TSDB_CODE_SUCCESS == code) { code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL); } - } else { - dbName = pStmt->subscribeDbName; } - tNameSetDbName(&name, pCxt->pParseCxt->acctId, dbName, strlen(dbName)); - tNameGetFullDbName(&name, pReq->subscribeDbName); return code; } @@ -3317,7 +3386,7 @@ static int32_t translateDropCGroup(STranslateContext* pCxt, SDropCGroupStmt* pSt dropReq.igNotExists = pStmt->ignoreNotExists; strcpy(dropReq.cgroup, pStmt->cgroup); - return buildCmdMsg(pCxt, TDMT_MND_DROP_CGROUP, (FSerializeFunc)tSerializeSMDropCgroupReq, &dropReq); + return buildCmdMsg(pCxt, TDMT_MND_MQ_DROP_CGROUP, (FSerializeFunc)tSerializeSMDropCgroupReq, &dropReq); } static int32_t translateAlterLocal(STranslateContext* pCxt, SAlterLocalStmt* pStmt) { @@ -4097,8 +4166,8 @@ static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) { return code; } -static void addCreateTbReqIntoVgroup(int32_t acctId, SHashObj* pVgroupHashmap, SCreateSubTableClause* pStmt, SKVRow row, - uint64_t suid, SVgroupInfo* pVgInfo) { +static void addCreateTbReqIntoVgroup(int32_t acctId, SHashObj* pVgroupHashmap, SCreateSubTableClause* pStmt, + const STag* pTag, uint64_t suid, SVgroupInfo* pVgInfo) { char dbFName[TSDB_DB_FNAME_LEN] = {0}; SName name = {.type = TSDB_DB_NAME_T, .acctId = acctId}; strcpy(name.dbname, pStmt->dbName); @@ -4108,7 +4177,7 @@ static void addCreateTbReqIntoVgroup(int32_t acctId, SHashObj* pVgroupHashmap, S req.type = TD_CHILD_TABLE; req.name = strdup(pStmt->tableName); req.ctb.suid = suid; - req.ctb.pTag = row; + req.ctb.pTag = (uint8_t*)pTag; if (pStmt->ignoreExists) { req.flags |= TD_CREATE_IF_NOT_EXISTS; } @@ -4128,24 +4197,6 @@ static void addCreateTbReqIntoVgroup(int32_t acctId, SHashObj* pVgroupHashmap, S } } -static int32_t addValToKVRow(STranslateContext* pCxt, SValueNode* pVal, const SSchema* pSchema, - SKVRowBuilder* pBuilder) { - if (pSchema->type == TSDB_DATA_TYPE_JSON) { - if (pVal->literal && strlen(pVal->literal) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { - return buildSyntaxErrMsg(&pCxt->msgBuf, "json string too long than 4095", pVal->literal); - } - - return parseJsontoTagData(pVal->literal, pBuilder, &pCxt->msgBuf, pSchema->colId); - } - - if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL) { - tdAddColToKVRow(pBuilder, pSchema->colId, nodesGetValueFromNode(pVal), - IS_VAR_DATA_TYPE(pSchema->type) ? varDataTLen(pVal->datum.p) : TYPE_BYTES[pSchema->type]); - } - - return TSDB_CODE_SUCCESS; -} - static int32_t createValueFromFunction(STranslateContext* pCxt, SFunctionNode* pFunc, SValueNode** pVal) { int32_t code = getFuncInfo(pCxt, pFunc); if (TSDB_CODE_SUCCESS == code) { @@ -4173,15 +4224,22 @@ static int32_t translateTagVal(STranslateContext* pCxt, uint8_t precision, SSche } static int32_t buildKVRowForBindTags(STranslateContext* pCxt, SCreateSubTableClause* pStmt, STableMeta* pSuperTableMeta, - SKVRowBuilder* pBuilder) { + STag** ppTag) { int32_t numOfTags = getNumOfTags(pSuperTableMeta); if (LIST_LENGTH(pStmt->pValsOfTags) != LIST_LENGTH(pStmt->pSpecificTags) || numOfTags < LIST_LENGTH(pStmt->pValsOfTags)) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TAGS_NOT_MATCHED); } + SArray* pTagArray = taosArrayInit(LIST_LENGTH(pStmt->pValsOfTags), sizeof(STagVal)); + if (!pTagArray) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_TSC_OUT_OF_MEMORY); + } + int32_t code = TSDB_CODE_SUCCESS; + int16_t nTags = 0, nBufPos = 0; SSchema* pTagSchema = getTableTagSchema(pSuperTableMeta); - SNode * pTag, *pNode; + SNode * pTag = NULL, *pNode = NULL; + bool isJson = false; FORBOTH(pTag, pStmt->pSpecificTags, pNode, pStmt->pValsOfTags) { SColumnNode* pCol = (SColumnNode*)pTag; SSchema* pSchema = NULL; @@ -4192,56 +4250,125 @@ static int32_t buildKVRowForBindTags(STranslateContext* pCxt, SCreateSubTableCla } } if (NULL == pSchema) { - return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAG_NAME, pCol->colName); + code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAG_NAME, pCol->colName); + goto end; } SValueNode* pVal = NULL; - int32_t code = translateTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pSchema, pNode, &pVal); - if (TSDB_CODE_SUCCESS == code) { - if (NULL == pVal) { - pVal = (SValueNode*)pNode; - } else { - REPLACE_LIST2_NODE(pVal); - } + code = translateTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pSchema, pNode, &pVal); + if (TSDB_CODE_SUCCESS != code) { + goto end; } - if (TSDB_CODE_SUCCESS == code) { - code = addValToKVRow(pCxt, pVal, pSchema, pBuilder); + + if (NULL == pVal) { + pVal = (SValueNode*)pNode; + } else { + REPLACE_LIST2_NODE(pVal); } - if (TSDB_CODE_SUCCESS != code) { - return code; + if (pTagSchema->type == TSDB_DATA_TYPE_JSON) { + if (pVal->literal && strlen(pVal->literal) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { + code = buildSyntaxErrMsg(&pCxt->msgBuf, "json string too long than 4095", pVal->literal); + goto end; + } + + isJson = true; + code = parseJsontoTagData(pVal->literal, pTagArray, ppTag, &pCxt->msgBuf); + if(code != TSDB_CODE_SUCCESS){ + goto end; + } + }else if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL) { + void* nodeVal = nodesGetValueFromNode(pVal); + STagVal val = {.cid = pTagSchema->colId, .type = pTagSchema->type}; + if (IS_VAR_DATA_TYPE(pTagSchema->type)) { + val.pData = varDataVal(nodeVal); + val.nData = varDataLen(nodeVal); + } else { + memcpy(&val.i64, nodeVal, pTagSchema->bytes); + } + taosArrayPush(pTagArray, &val); } } + if(!isJson) code = tTagNew(pTagArray, 1, false, ppTag); + +end: + if(isJson){ + for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) { + STagVal *p = (STagVal *)taosArrayGet(pTagArray, i); + if(IS_VAR_DATA_TYPE(p->type)){ + taosMemoryFree(p->pData); + } + } + } + taosArrayDestroy(pTagArray); return TSDB_CODE_SUCCESS; } static int32_t buildKVRowForAllTags(STranslateContext* pCxt, SCreateSubTableClause* pStmt, STableMeta* pSuperTableMeta, - SKVRowBuilder* pBuilder) { + STag** ppTag) { if (getNumOfTags(pSuperTableMeta) != LIST_LENGTH(pStmt->pValsOfTags)) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TAGS_NOT_MATCHED); } - SSchema* pTagSchema = getTableTagSchema(pSuperTableMeta); + SSchema* pTagSchemas = getTableTagSchema(pSuperTableMeta); SNode* pNode; + int32_t code = TSDB_CODE_SUCCESS; int32_t index = 0; + SArray* pTagArray = taosArrayInit(LIST_LENGTH(pStmt->pValsOfTags), sizeof(STagVal)); + if (!pTagArray) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_TSC_OUT_OF_MEMORY); + } + + bool isJson = false; FOREACH(pNode, pStmt->pValsOfTags) { SValueNode* pVal = NULL; - int32_t code = translateTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pTagSchema + index, pNode, &pVal); - if (TSDB_CODE_SUCCESS == code) { - if (NULL == pVal) { - pVal = (SValueNode*)pNode; + SSchema* pTagSchema = pTagSchemas + index; + code = translateTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pTagSchema, pNode, &pVal); + if (TSDB_CODE_SUCCESS != code) { + goto end; + } + if (NULL == pVal) { + pVal = (SValueNode*)pNode; + } else { + REPLACE_NODE(pVal); + } + if (pTagSchema->type == TSDB_DATA_TYPE_JSON) { + if (pVal->literal && strlen(pVal->literal) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { + code = buildSyntaxErrMsg(&pCxt->msgBuf, "json string too long than 4095", pVal->literal); + goto end; + } + + isJson = true; + code = parseJsontoTagData(pVal->literal, pTagArray, ppTag, &pCxt->msgBuf); + if(code != TSDB_CODE_SUCCESS){ + goto end; + } + }else if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL) { + char* tmpVal = nodesGetValueFromNode(pVal); + STagVal val = {.cid = pTagSchema->colId, .type = pTagSchema->type}; + if (IS_VAR_DATA_TYPE(pTagSchema->type)) { + val.pData = varDataVal(tmpVal); + val.nData = varDataLen(tmpVal); } else { - REPLACE_NODE(pVal); + memcpy(&val.i64, tmpVal, pTagSchema->bytes); } + taosArrayPush(pTagArray, &val); } - if (TSDB_CODE_SUCCESS == code) { - code = addValToKVRow(pCxt, pVal, pTagSchema + index++, pBuilder); - } - if (TSDB_CODE_SUCCESS != code) { - return code; + ++index; + } + if(!isJson) code = tTagNew(pTagArray, 1, false, ppTag); + +end: + if(isJson){ + for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) { + STagVal *p = (STagVal *)taosArrayGet(pTagArray, i); + if(IS_VAR_DATA_TYPE(p->type)){ + taosMemoryFree(p->pData); + } } } - return TSDB_CODE_SUCCESS; + taosArrayDestroy(pTagArray); + return code; } static int32_t checkCreateSubTable(STranslateContext* pCxt, SCreateSubTableClause* pStmt) { @@ -4258,26 +4385,13 @@ static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableCla code = getTableMeta(pCxt, pStmt->useDbName, pStmt->useTableName, &pSuperTableMeta); } - SKVRowBuilder kvRowBuilder = {0}; - if (TSDB_CODE_SUCCESS == code) { - code = tdInitKVRowBuilder(&kvRowBuilder); - } + STag* pTag = NULL; if (TSDB_CODE_SUCCESS == code) { if (NULL != pStmt->pSpecificTags) { - code = buildKVRowForBindTags(pCxt, pStmt, pSuperTableMeta, &kvRowBuilder); - } else { - code = buildKVRowForAllTags(pCxt, pStmt, pSuperTableMeta, &kvRowBuilder); - } - } - - SKVRow row = NULL; - if (TSDB_CODE_SUCCESS == code) { - row = tdGetKVRowFromBuilder(&kvRowBuilder); - if (NULL == row) { - code = TSDB_CODE_OUT_OF_MEMORY; + code = buildKVRowForBindTags(pCxt, pStmt, pSuperTableMeta, &pTag); } else { - tdSortKVRowByColIdx(row); + code = buildKVRowForAllTags(pCxt, pStmt, pSuperTableMeta, &pTag); } } @@ -4286,11 +4400,10 @@ static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableCla code = getTableHashVgroup(pCxt, pStmt->dbName, pStmt->tableName, &info); } if (TSDB_CODE_SUCCESS == code) { - addCreateTbReqIntoVgroup(pCxt->pParseCxt->acctId, pVgroupHashmap, pStmt, row, pSuperTableMeta->uid, &info); + addCreateTbReqIntoVgroup(pCxt->pParseCxt->acctId, pVgroupHashmap, pStmt, pTag, pSuperTableMeta->uid, &info); } taosMemoryFreeClear(pSuperTableMeta); - tdDestroyKVRowBuilder(&kvRowBuilder); return code; } @@ -4316,6 +4429,7 @@ static SArray* serializeVgroupsCreateTableBatch(int32_t acctId, SHashObj* pVgrou } static int32_t rewriteCreateMultiTable(STranslateContext* pCxt, SQuery* pQuery) { + SCreateMultiTableStmt* pStmt = (SCreateMultiTableStmt*)pQuery->pRoot; SHashObj* pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); @@ -4326,6 +4440,10 @@ static int32_t rewriteCreateMultiTable(STranslateContext* pCxt, SQuery* pQuery) int32_t code = TSDB_CODE_SUCCESS; SNode* pNode; FOREACH(pNode, pStmt->pSubTables) { + if(pCxt->pParseCxt->schemalessType == 0 && + (code = isNotSchemalessDb(pCxt->pParseCxt, ((SCreateSubTableClause*)pNode)->dbName)) != TSDB_CODE_SUCCESS){ + return code; + } code = rewriteCreateSubTable(pCxt, (SCreateSubTableClause*)pNode, pVgroupHashmap); if (TSDB_CODE_SUCCESS != code) { taosHashCleanup(pVgroupHashmap); @@ -4512,37 +4630,41 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS pReq->isNull = (TSDB_DATA_TYPE_NULL == pStmt->pVal->node.resType.type); if (pStmt->pVal->node.resType.type == TSDB_DATA_TYPE_JSON) { - SKVRowBuilder kvRowBuilder = {0}; - int32_t code = tdInitKVRowBuilder(&kvRowBuilder); - - if (TSDB_CODE_SUCCESS != code) { - return TSDB_CODE_OUT_OF_MEMORY; - } if (pStmt->pVal->literal && strlen(pStmt->pVal->literal) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { return buildSyntaxErrMsg(&pCxt->msgBuf, "json string too long than 4095", pStmt->pVal->literal); } - - code = parseJsontoTagData(pStmt->pVal->literal, &kvRowBuilder, &pCxt->msgBuf, pSchema->colId); - if (TSDB_CODE_SUCCESS != code) { - return code; + SArray *pTagVals = taosArrayInit(1, sizeof(STagVal)); + int32_t code = TSDB_CODE_SUCCESS; + STag* pTag = NULL; + do{ + code = parseJsontoTagData(pStmt->pVal->literal, pTagVals, &pTag, &pCxt->msgBuf); + if (TSDB_CODE_SUCCESS != code) { + break; + } + }while(0); + for (int i = 0; i < taosArrayGetSize(pTagVals); ++i) { + STagVal *p = (STagVal *)taosArrayGet(pTagVals, i); + if(IS_VAR_DATA_TYPE(p->type)){ + taosMemoryFree(p->pData); + } } - - SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder); - if (NULL == row) { - tdDestroyKVRowBuilder(&kvRowBuilder); - return TSDB_CODE_OUT_OF_MEMORY; + taosArrayDestroy(pTagVals); + if (code != TSDB_CODE_SUCCESS){ + return code; } - pReq->nTagVal = kvRowLen(row); - pReq->pTagVal = row; - pStmt->pVal->datum.p = row; // for free - tdDestroyKVRowBuilder(&kvRowBuilder); + pReq->nTagVal = pTag->len; + pReq->pTagVal = (uint8_t *)pTag; + pStmt->pVal->datum.p = (char*)pTag; // for free } else { pReq->nTagVal = pStmt->pVal->node.resType.bytes; - if (TSDB_DATA_TYPE_NCHAR == pStmt->pVal->node.resType.type) { - pReq->nTagVal = pReq->nTagVal * TSDB_NCHAR_SIZE; - } pReq->pTagVal = nodesGetValueFromNode(pStmt->pVal); + + // data and length are seperated for new tag format STagVal + if (IS_VAR_DATA_TYPE(pStmt->pVal->node.resType.type)) { + pReq->nTagVal = varDataLen(pReq->pTagVal); + pReq->pTagVal = varDataVal(pReq->pTagVal); + } } return TSDB_CODE_SUCCESS; @@ -4733,9 +4855,14 @@ static int32_t buildModifyVnodeArray(STranslateContext* pCxt, SAlterTableStmt* p static int32_t rewriteAlterTable(STranslateContext* pCxt, SQuery* pQuery) { SAlterTableStmt* pStmt = (SAlterTableStmt*)pQuery->pRoot; + int32_t code = TSDB_CODE_SUCCESS; + if(pCxt->pParseCxt->schemalessType == 0 && + (code = isNotSchemalessDb(pCxt->pParseCxt, pStmt->dbName)) != TSDB_CODE_SUCCESS){ + return code; + } STableMeta* pTableMeta = NULL; - int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta); + code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -4824,6 +4951,47 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) { return code; } +static int32_t toMsgType(ENodeType type) { + switch (type) { + case QUERY_NODE_CREATE_TABLE_STMT: + return TDMT_VND_CREATE_TABLE; + case QUERY_NODE_ALTER_TABLE_STMT: + return TDMT_VND_ALTER_TABLE; + case QUERY_NODE_DROP_TABLE_STMT: + return TDMT_VND_DROP_TABLE; + default: + break; + } + return TDMT_VND_CREATE_TABLE; +} + +static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery) { + if (NULL != pCxt->pDbs) { + pQuery->pDbList = taosArrayInit(taosHashGetSize(pCxt->pDbs), TSDB_DB_FNAME_LEN); + if (NULL == pQuery->pDbList) { + return TSDB_CODE_OUT_OF_MEMORY; + } + SFullDatabaseName* pDb = taosHashIterate(pCxt->pDbs, NULL); + while (NULL != pDb) { + taosArrayPush(pQuery->pDbList, pDb->fullDbName); + pDb = taosHashIterate(pCxt->pDbs, pDb); + } + } + + if (NULL != pCxt->pTables) { + pQuery->pTableList = taosArrayInit(taosHashGetSize(pCxt->pTables), sizeof(SName)); + if (NULL == pQuery->pTableList) { + return TSDB_CODE_OUT_OF_MEMORY; + } + SName* pTable = taosHashIterate(pCxt->pTables, NULL); + while (NULL != pTable) { + taosArrayPush(pQuery->pTableList, pTable); + pTable = taosHashIterate(pCxt->pTables, pTable); + } + } + return TSDB_CODE_SUCCESS; +} + static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) { switch (nodeType(pQuery->pRoot)) { case QUERY_NODE_SELECT_STMT: @@ -4835,7 +5003,7 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) { break; case QUERY_NODE_VNODE_MODIF_STMT: pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; - pQuery->msgType = TDMT_VND_CREATE_TABLE; + pQuery->msgType = toMsgType(((SVnodeModifOpStmt*)pQuery->pRoot)->sqlNodeType); break; case QUERY_NODE_DESCRIBE_STMT: pQuery->execMode = QUERY_EXEC_MODE_LOCAL; @@ -4863,37 +5031,13 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) { } } - if (NULL != pCxt->pDbs) { - pQuery->pDbList = taosArrayInit(taosHashGetSize(pCxt->pDbs), TSDB_DB_FNAME_LEN); - if (NULL == pQuery->pDbList) { - return TSDB_CODE_OUT_OF_MEMORY; - } - SFullDatabaseName* pDb = taosHashIterate(pCxt->pDbs, NULL); - while (NULL != pDb) { - taosArrayPush(pQuery->pDbList, pDb->fullDbName); - pDb = taosHashIterate(pCxt->pDbs, pDb); - } - } - - if (NULL != pCxt->pTables) { - pQuery->pTableList = taosArrayInit(taosHashGetSize(pCxt->pTables), sizeof(SName)); - if (NULL == pQuery->pTableList) { - return TSDB_CODE_OUT_OF_MEMORY; - } - SName* pTable = taosHashIterate(pCxt->pTables, NULL); - while (NULL != pTable) { - taosArrayPush(pQuery->pTableList, pTable); - pTable = taosHashIterate(pCxt->pTables, pTable); - } - } - return TSDB_CODE_SUCCESS; } int32_t translate(SParseContext* pParseCxt, SQuery* pQuery) { STranslateContext cxt = {0}; - int32_t code = initTranslateContext(pParseCxt, &cxt); + int32_t code = initTranslateContext(pParseCxt, pQuery->pMetaCache, &cxt); if (TSDB_CODE_SUCCESS == code) { code = fmFuncMgtInit(); } @@ -4906,6 +5050,7 @@ int32_t translate(SParseContext* pParseCxt, SQuery* pQuery) { if (TSDB_CODE_SUCCESS == code) { code = setQuery(&cxt, pQuery); } + setRefreshMate(&cxt, pQuery); destroyTranslateContext(&cxt); return code; } diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index ff206c1a764f4dfb0dcd19cb53b718e467a6d64e..0a1915d6c2fc5c1c776f6e991a2e39ee6d8a9aa3 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -15,6 +15,9 @@ #include "parUtil.h" #include "cJSON.h" +#include "querynodes.h" + +#define USER_AUTH_KEY_MAX_LEN TSDB_USER_LEN + TSDB_DB_FNAME_LEN + 2 static char* getSyntaxErrFormat(int32_t errCode) { switch (errCode) { @@ -255,17 +258,8 @@ STableComInfo getTableInfo(const STableMeta* pTableMeta) { return pTableMeta->tableInfo; } -static uint32_t getTableMetaSize(const STableMeta* pTableMeta) { - int32_t totalCols = 0; - if (pTableMeta->tableInfo.numOfColumns >= 0) { - totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; - } - - return sizeof(STableMeta) + totalCols * sizeof(SSchema); -} - STableMeta* tableMetaDup(const STableMeta* pTableMeta) { - size_t size = getTableMetaSize(pTableMeta); + size_t size = TABLE_META_SIZE(pTableMeta); STableMeta* p = taosMemoryMalloc(size); memcpy(p, pTableMeta, size); @@ -328,33 +322,35 @@ static bool isValidateTag(char* input) { return true; } -int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* pMsgBuf, int16_t startColId) { +int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag **ppTag, SMsgBuf* pMsgBuf) { + int32_t retCode = TSDB_CODE_SUCCESS; + cJSON* root = NULL; + SHashObj* keyHash = NULL; + int32_t size = 0; // set json NULL data - uint8_t jsonNULL = TSDB_DATA_TYPE_NULL; - int jsonIndex = startColId + 1; - if (!json || strtrim((char*)json) == 0 ||strcasecmp(json, TSDB_DATA_NULL_STR_L) == 0) { - tdAddColToKVRow(kvRowBuilder, jsonIndex, &jsonNULL, CHAR_BYTES); - return TSDB_CODE_SUCCESS; + if (!json || strtrim((char*)json) == 0 || strcasecmp(json, TSDB_DATA_NULL_STR_L) == 0) { + retCode = TSDB_CODE_SUCCESS; + goto end; } // set json real data - cJSON* root = cJSON_Parse(json); + root = cJSON_Parse(json); if (root == NULL) { - return buildSyntaxErrMsg(pMsgBuf, "json parse error", json); + retCode = buildSyntaxErrMsg(pMsgBuf, "json parse error", json); + goto end; } - int size = cJSON_GetArraySize(root); + size = cJSON_GetArraySize(root); if (!cJSON_IsObject(root)) { - return buildSyntaxErrMsg(pMsgBuf, "json error invalide value", json); + retCode = buildSyntaxErrMsg(pMsgBuf, "json error invalide value", json); + goto end; } - int retCode = 0; - char* tagKV = NULL; - SHashObj* keyHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, false); - for (int i = 0; i < size; i++) { + keyHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, false); + for (int32_t i = 0; i < size; i++) { cJSON* item = cJSON_GetArrayItem(root, i); if (!item) { - qError("json inner error:%d", i); + uError("json inner error:%d", i); retCode = buildSyntaxErrMsg(pMsgBuf, "json inner error", json); goto end; } @@ -365,86 +361,467 @@ int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* p goto end; } size_t keyLen = strlen(jsonKey); - if(keyLen > TSDB_MAX_JSON_KEY_LEN){ - qError("json key too long error"); - retCode = buildSyntaxErrMsg(pMsgBuf, "json key too long, more than 256", jsonKey); + if (keyLen > TSDB_MAX_JSON_KEY_LEN) { + uError("json key too long error"); + retCode = buildSyntaxErrMsg(pMsgBuf, "json key too long, more than 256", jsonKey); goto end; } if (keyLen == 0 || taosHashGet(keyHash, jsonKey, keyLen) != NULL) { continue; } - // key: keyLen + VARSTR_HEADER_SIZE, value type: CHAR_BYTES, value reserved: DOUBLE_BYTES - tagKV = taosMemoryCalloc(keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES + DOUBLE_BYTES, 1); - if (!tagKV) { - retCode = TSDB_CODE_TSC_OUT_OF_MEMORY; - goto end; - } - strncpy(varDataVal(tagKV), jsonKey, keyLen); - varDataSetLen(tagKV, keyLen); - if (taosHashGetSize(keyHash) == 0) { - uint8_t jsonNotNULL = TSDB_DATA_TYPE_JSON; - tdAddColToKVRow(kvRowBuilder, jsonIndex++, &jsonNotNULL, CHAR_BYTES); // add json type - } - taosHashPut(keyHash, jsonKey, keyLen, &keyLen, - CHAR_BYTES); // add key to hash to remove dumplicate, value is useless + STagVal val = {0}; + val.pKey = jsonKey; + taosHashPut(keyHash, jsonKey, keyLen, &keyLen, CHAR_BYTES); // add key to hash to remove dumplicate, value is useless if (item->type == cJSON_String) { // add json value format: type|data char* jsonValue = item->valuestring; int32_t valLen = (int32_t)strlen(jsonValue); - int32_t totalLen = keyLen + VARSTR_HEADER_SIZE + valLen * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE + CHAR_BYTES; - char* tmp = taosMemoryRealloc(tagKV, totalLen); + char* tmp = taosMemoryCalloc(1, valLen * TSDB_NCHAR_SIZE); if (!tmp) { retCode = TSDB_CODE_TSC_OUT_OF_MEMORY; goto end; } - tagKV = tmp; - char* valueType = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE); - char* valueData = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES); - *valueType = TSDB_DATA_TYPE_NCHAR; - if (valLen > 0 && !taosMbsToUcs4(jsonValue, valLen, (TdUcs4*)varDataVal(valueData), + val.type = TSDB_DATA_TYPE_NCHAR; + if (valLen > 0 && !taosMbsToUcs4(jsonValue, valLen, (TdUcs4*)tmp, (int32_t)(valLen * TSDB_NCHAR_SIZE), &valLen)) { - qError("charset:%s to %s. val:%s, errno:%s, convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, jsonValue, + uError("charset:%s to %s. val:%s, errno:%s, convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, jsonValue, strerror(errno)); retCode = buildSyntaxErrMsg(pMsgBuf, "charset convert json error", jsonValue); goto end; } - - varDataSetLen(valueData, valLen); - tdAddColToKVRow(kvRowBuilder, jsonIndex++, tagKV, totalLen); + val.nData = valLen; + val.pData = tmp; } else if (item->type == cJSON_Number) { if (!isfinite(item->valuedouble)) { - qError("json value is invalidate"); + uError("json value is invalidate"); retCode = buildSyntaxErrMsg(pMsgBuf, "json value number is illegal", json); goto end; } - char* valueType = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE); - char* valueData = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES); - *valueType = TSDB_DATA_TYPE_DOUBLE; - *((double*)valueData) = item->valuedouble; - tdAddColToKVRow(kvRowBuilder, jsonIndex++, tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES + DOUBLE_BYTES); + val.type = TSDB_DATA_TYPE_DOUBLE; + *((double*)&(val.i64)) = item->valuedouble; } else if (item->type == cJSON_True || item->type == cJSON_False) { - char* valueType = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE); - char* valueData = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES); - *valueType = TSDB_DATA_TYPE_BOOL; - *valueData = (char)(item->valueint); - tdAddColToKVRow(kvRowBuilder, jsonIndex++, tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES + CHAR_BYTES); + val.type = TSDB_DATA_TYPE_BOOL; + *((char*)&(val.i64)) = (char)(item->valueint); } else if (item->type == cJSON_NULL) { - char* valueType = POINTER_SHIFT(tagKV, keyLen + VARSTR_HEADER_SIZE); - *valueType = TSDB_DATA_TYPE_NULL; - tdAddColToKVRow(kvRowBuilder, jsonIndex++, tagKV, keyLen + VARSTR_HEADER_SIZE + CHAR_BYTES); + val.type = TSDB_DATA_TYPE_NULL; } else { retCode = buildSyntaxErrMsg(pMsgBuf, "invalidate json value", json); goto end; } - } - - if (taosHashGetSize(keyHash) == 0) { // set json NULL true - tdAddColToKVRow(kvRowBuilder, jsonIndex, &jsonNULL, CHAR_BYTES); + taosArrayPush(pTagVals, &val); } end: - taosMemoryFree(tagKV); taosHashCleanup(keyHash); + if(retCode == TSDB_CODE_SUCCESS){ + tTagNew(pTagVals, 1, true, ppTag); + } cJSON_Delete(root); return retCode; -} \ No newline at end of file +} + +static int32_t userAuthToString(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, char* pStr) { + return sprintf(pStr, "%s*%d.%s*%d", pUser, acctId, pDb, type); +} + +static int32_t userAuthToStringExt(const char* pUser, const char* pDbFName, AUTH_TYPE type, char* pStr) { + return sprintf(pStr, "%s*%s*%d", pUser, pDbFName, type); +} + +static void stringToUserAuth(const char* pStr, int32_t len, SUserAuthInfo* pUserAuth) { + char* p1 = strchr(pStr, '*'); + strncpy(pUserAuth->user, pStr, p1 - pStr); + ++p1; + char* p2 = strchr(p1, '*'); + strncpy(pUserAuth->dbFName, p1, p2 - p1); + ++p2; + char buf[10] = {0}; + strncpy(buf, p2, len - (p2 - pStr)); + pUserAuth->type = taosStr2Int32(buf, NULL, 10); +} + +static int32_t buildTableReq(SHashObj* pTablesHash, SArray** pTables) { + if (NULL != pTablesHash) { + *pTables = taosArrayInit(taosHashGetSize(pTablesHash), sizeof(SName)); + if (NULL == *pTables) { + return TSDB_CODE_OUT_OF_MEMORY; + } + void* p = taosHashIterate(pTablesHash, NULL); + while (NULL != p) { + size_t len = 0; + char* pKey = taosHashGetKey(p, &len); + char fullName[TSDB_TABLE_FNAME_LEN] = {0}; + strncpy(fullName, pKey, len); + SName name = {0}; + tNameFromString(&name, fullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + taosArrayPush(*pTables, &name); + p = taosHashIterate(pTablesHash, p); + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t buildDbReq(SHashObj* pDbsHash, SArray** pDbs) { + if (NULL != pDbsHash) { + *pDbs = taosArrayInit(taosHashGetSize(pDbsHash), TSDB_DB_FNAME_LEN); + if (NULL == *pDbs) { + return TSDB_CODE_OUT_OF_MEMORY; + } + void* p = taosHashIterate(pDbsHash, NULL); + while (NULL != p) { + size_t len = 0; + char* pKey = taosHashGetKey(p, &len); + char fullName[TSDB_DB_FNAME_LEN] = {0}; + strncpy(fullName, pKey, len); + taosArrayPush(*pDbs, fullName); + p = taosHashIterate(pDbsHash, p); + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t buildTableMetaReq(SHashObj* pTableMetaHash, SArray** pTableMeta) { + return buildTableReq(pTableMetaHash, pTableMeta); +} + +static int32_t buildDbVgroupReq(SHashObj* pDbVgroupHash, SArray** pDbVgroup) { + return buildDbReq(pDbVgroupHash, pDbVgroup); +} + +static int32_t buildTableVgroupReq(SHashObj* pTableVgroupHash, SArray** pTableVgroup) { + return buildTableReq(pTableVgroupHash, pTableVgroup); +} + +static int32_t buildDbCfgReq(SHashObj* pDbCfgHash, SArray** pDbCfg) { return buildDbReq(pDbCfgHash, pDbCfg); } + +static int32_t buildUserAuthReq(SHashObj* pUserAuthHash, SArray** pUserAuth) { + if (NULL != pUserAuthHash) { + *pUserAuth = taosArrayInit(taosHashGetSize(pUserAuthHash), sizeof(SUserAuthInfo)); + if (NULL == *pUserAuth) { + return TSDB_CODE_OUT_OF_MEMORY; + } + void* p = taosHashIterate(pUserAuthHash, NULL); + while (NULL != p) { + size_t len = 0; + char* pKey = taosHashGetKey(p, &len); + SUserAuthInfo userAuth = {0}; + stringToUserAuth(pKey, len, &userAuth); + taosArrayPush(*pUserAuth, &userAuth); + p = taosHashIterate(pUserAuthHash, p); + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t buildUdfReq(SHashObj* pUdfHash, SArray** pUdf) { + if (NULL != pUdfHash) { + *pUdf = taosArrayInit(taosHashGetSize(pUdfHash), TSDB_FUNC_NAME_LEN); + if (NULL == *pUdf) { + return TSDB_CODE_OUT_OF_MEMORY; + } + void* p = taosHashIterate(pUdfHash, NULL); + while (NULL != p) { + size_t len = 0; + char* pFunc = taosHashGetKey(p, &len); + char func[TSDB_FUNC_NAME_LEN] = {0}; + strncpy(func, pFunc, len); + taosArrayPush(*pUdf, func); + p = taosHashIterate(pUdfHash, p); + } + } + return TSDB_CODE_SUCCESS; +} + +int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) { + int32_t code = buildTableMetaReq(pMetaCache->pTableMeta, &pCatalogReq->pTableMeta); + if (TSDB_CODE_SUCCESS == code) { + code = buildDbVgroupReq(pMetaCache->pDbVgroup, &pCatalogReq->pDbVgroup); + } + if (TSDB_CODE_SUCCESS == code) { + code = buildTableVgroupReq(pMetaCache->pTableVgroup, &pCatalogReq->pTableHash); + } + if (TSDB_CODE_SUCCESS == code) { + code = buildDbCfgReq(pMetaCache->pDbCfg, &pCatalogReq->pDbCfg); + } + if (TSDB_CODE_SUCCESS == code) { + code = buildUserAuthReq(pMetaCache->pUserAuth, &pCatalogReq->pUser); + } + if (TSDB_CODE_SUCCESS == code) { + code = buildUdfReq(pMetaCache->pUdf, &pCatalogReq->pUdf); + } + return code; +} + +static int32_t putTableMetaToCache(const SArray* pTableMetaReq, const SArray* pTableMetaData, SHashObj* pTableMeta) { + int32_t ntables = taosArrayGetSize(pTableMetaReq); + for (int32_t i = 0; i < ntables; ++i) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(taosArrayGet(pTableMetaReq, i), fullName); + if (TSDB_CODE_SUCCESS != + taosHashPut(pTableMeta, fullName, strlen(fullName), taosArrayGet(pTableMetaData, i), POINTER_BYTES)) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t putDbVgroupToCache(const SArray* pDbVgroupReq, const SArray* pDbVgroupData, SHashObj* pDbVgroup) { + int32_t nvgs = taosArrayGetSize(pDbVgroupReq); + for (int32_t i = 0; i < nvgs; ++i) { + char* pDbFName = taosArrayGet(pDbVgroupReq, i); + if (TSDB_CODE_SUCCESS != + taosHashPut(pDbVgroup, pDbFName, strlen(pDbFName), taosArrayGet(pDbVgroupData, i), POINTER_BYTES)) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t putTableVgroupToCache(const SArray* pTableVgroupReq, const SArray* pTableVgroupData, + SHashObj* pTableVgroup) { + int32_t ntables = taosArrayGetSize(pTableVgroupReq); + for (int32_t i = 0; i < ntables; ++i) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(taosArrayGet(pTableVgroupReq, i), fullName); + SVgroupInfo* pInfo = taosArrayGet(pTableVgroupData, i); + if (TSDB_CODE_SUCCESS != taosHashPut(pTableVgroup, fullName, strlen(fullName), &pInfo, POINTER_BYTES)) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t putDbCfgToCache(const SArray* pDbCfgReq, const SArray* pDbCfgData, SHashObj* pDbCfg) { + int32_t nvgs = taosArrayGetSize(pDbCfgReq); + for (int32_t i = 0; i < nvgs; ++i) { + char* pDbFName = taosArrayGet(pDbCfgReq, i); + SDbCfgInfo* pInfo = taosArrayGet(pDbCfgData, i); + if (TSDB_CODE_SUCCESS != taosHashPut(pDbCfg, pDbFName, strlen(pDbFName), &pInfo, POINTER_BYTES)) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t putUserAuthToCache(const SArray* pUserAuthReq, const SArray* pUserAuthData, SHashObj* pUserAuth) { + int32_t nvgs = taosArrayGetSize(pUserAuthReq); + for (int32_t i = 0; i < nvgs; ++i) { + SUserAuthInfo* pUser = taosArrayGet(pUserAuthReq, i); + char key[USER_AUTH_KEY_MAX_LEN] = {0}; + int32_t len = userAuthToStringExt(pUser->user, pUser->dbFName, pUser->type, key); + if (TSDB_CODE_SUCCESS != taosHashPut(pUserAuth, key, len, taosArrayGet(pUserAuthData, i), sizeof(bool))) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t putUdfToCache(const SArray* pUdfReq, const SArray* pUdfData, SHashObj* pUdf) { + int32_t num = taosArrayGetSize(pUdfReq); + for (int32_t i = 0; i < num; ++i) { + char* pFunc = taosArrayGet(pUdfReq, i); + SFuncInfo* pInfo = taosArrayGet(pUdfData, i); + if (TSDB_CODE_SUCCESS != taosHashPut(pUdf, pFunc, strlen(pFunc), &pInfo, POINTER_BYTES)) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) { + int32_t code = putTableMetaToCache(pCatalogReq->pTableMeta, pMetaData->pTableMeta, pMetaCache->pTableMeta); + if (TSDB_CODE_SUCCESS == code) { + code = putDbVgroupToCache(pCatalogReq->pDbVgroup, pMetaData->pDbVgroup, pMetaCache->pDbVgroup); + } + if (TSDB_CODE_SUCCESS == code) { + code = putTableVgroupToCache(pCatalogReq->pTableHash, pMetaData->pTableHash, pMetaCache->pTableVgroup); + } + if (TSDB_CODE_SUCCESS == code) { + code = putDbCfgToCache(pCatalogReq->pDbCfg, pMetaData->pDbCfg, pMetaCache->pDbCfg); + } + if (TSDB_CODE_SUCCESS == code) { + code = putUserAuthToCache(pCatalogReq->pUser, pMetaData->pUser, pMetaCache->pUserAuth); + } + if (TSDB_CODE_SUCCESS == code) { + code = putUdfToCache(pCatalogReq->pUdf, pMetaData->pUdfList, pMetaCache->pUdf); + } + return code; +} + +static int32_t reserveTableReqInCacheImpl(const char* pTbFName, int32_t len, SHashObj** pTables) { + if (NULL == *pTables) { + *pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (NULL == *pTables) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return taosHashPut(*pTables, pTbFName, len, &pTables, POINTER_BYTES); +} + +static int32_t reserveTableReqInCache(int32_t acctId, const char* pDb, const char* pTable, SHashObj** pTables) { + char fullName[TSDB_TABLE_FNAME_LEN]; + int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s.%s", acctId, pDb, pTable); + return reserveTableReqInCacheImpl(fullName, len, pTables); +} + +int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) { + return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableMeta); +} + +int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(pName, fullName); + return reserveTableReqInCacheImpl(fullName, strlen(fullName), &pMetaCache->pTableMeta); +} + +int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(pName, fullName); + STableMeta** pRes = taosHashGet(pMetaCache->pTableMeta, fullName, strlen(fullName)); + if (NULL == pRes || NULL == *pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + *pMeta = tableMetaDup(*pRes); + if (NULL == *pMeta) { + return TSDB_CODE_OUT_OF_MEMORY; + } + return TSDB_CODE_SUCCESS; +} + +static int32_t reserveDbReqInCache(int32_t acctId, const char* pDb, SHashObj** pDbs) { + if (NULL == *pDbs) { + *pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (NULL == *pDbs) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + char fullName[TSDB_TABLE_FNAME_LEN]; + int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s", acctId, pDb); + return taosHashPut(*pDbs, fullName, len, &pDbs, POINTER_BYTES); +} + +int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) { + return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbVgroup); +} + +int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo) { + SArray** pRes = taosHashGet(pMetaCache->pDbVgroup, pDbFName, strlen(pDbFName)); + if (NULL == pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + // *pRes is null, which is a legal value, indicating that the user DB has not been created + if (NULL != *pRes) { + *pVgInfo = taosArrayDup(*pRes); + if (NULL == *pVgInfo) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) { + return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableVgroup); +} + +int32_t reserveTableVgroupInCacheExt(const SName* pName, SParseMetaCache* pMetaCache) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(pName, fullName); + return reserveTableReqInCacheImpl(fullName, strlen(fullName), &pMetaCache->pTableVgroup); +} + +int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(pName, fullName); + SVgroupInfo** pRes = taosHashGet(pMetaCache->pTableVgroup, fullName, strlen(fullName)); + if (NULL == pRes || NULL == *pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + memcpy(pVgroup, *pRes, sizeof(SVgroupInfo)); + return TSDB_CODE_SUCCESS; +} + +int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) { + return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbCfg); +} + +int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId, + int32_t* pTableNum) { + SDbInfo** pRes = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName)); + if (NULL == pRes || NULL == *pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + *pVersion = (*pRes)->vgVer; + *pDbId = (*pRes)->dbId; + *pTableNum = (*pRes)->tbNum; + return TSDB_CODE_SUCCESS; +} + +int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) { + return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbCfg); +} + +int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo) { + SDbCfgInfo** pRes = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName)); + if (NULL == pRes || NULL == *pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + memcpy(pInfo, *pRes, sizeof(SDbCfgInfo)); + return TSDB_CODE_SUCCESS; +} + +static int32_t reserveUserAuthInCacheImpl(const char* pKey, int32_t len, SParseMetaCache* pMetaCache) { + if (NULL == pMetaCache->pUserAuth) { + pMetaCache->pUserAuth = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (NULL == pMetaCache->pUserAuth) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + bool pass = false; + return taosHashPut(pMetaCache->pUserAuth, pKey, len, &pass, sizeof(pass)); +} + +int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, + SParseMetaCache* pMetaCache) { + char key[USER_AUTH_KEY_MAX_LEN] = {0}; + int32_t len = userAuthToString(acctId, pUser, pDb, type, key); + return reserveUserAuthInCacheImpl(key, len, pMetaCache); +} + +int32_t reserveUserAuthInCacheExt(const char* pUser, const SName* pName, AUTH_TYPE type, SParseMetaCache* pMetaCache) { + char dbFName[TSDB_DB_FNAME_LEN] = {0}; + tNameGetFullDbName(pName, dbFName); + char key[USER_AUTH_KEY_MAX_LEN] = {0}; + int32_t len = userAuthToStringExt(pUser, dbFName, type, key); + return reserveUserAuthInCacheImpl(key, len, pMetaCache); +} + +int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type, + bool* pPass) { + char key[USER_AUTH_KEY_MAX_LEN] = {0}; + int32_t len = userAuthToStringExt(pUser, pDbFName, type, key); + bool* pRes = taosHashGet(pMetaCache->pUserAuth, key, len); + if (NULL == pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + *pPass = *pRes; + return TSDB_CODE_SUCCESS; +} + +int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache) { + if (NULL == pMetaCache->pUdf) { + pMetaCache->pUdf = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (NULL == pMetaCache->pUdf) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return taosHashPut(pMetaCache->pUdf, pFunc, strlen(pFunc), &pMetaCache, POINTER_BYTES); +} + +int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo) { + SFuncInfo** pRes = taosHashGet(pMetaCache->pUdf, pFunc, strlen(pFunc)); + if (NULL == pRes || NULL == *pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + memcpy(pInfo, *pRes, sizeof(SFuncInfo)); + return TSDB_CODE_SUCCESS; +} diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c index 6dac1e1250df2de8d41df4c9b342f783e54f2b22..c2e1eba4727281a54b778d64afc25ea159a11880 100644 --- a/source/libs/parser/src/parser.c +++ b/source/libs/parser/src/parser.c @@ -34,22 +34,35 @@ bool qIsInsertSql(const char* pStr, size_t length) { } while (1); } -static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) { - int32_t code = parse(pCxt, pQuery); - if (TSDB_CODE_SUCCESS == code) { - code = authenticate(pCxt, *pQuery); - } +static int32_t analyseSemantic(SParseContext* pCxt, SQuery* pQuery) { + int32_t code = authenticate(pCxt, pQuery); - if (TSDB_CODE_SUCCESS == code && (*pQuery)->placeholderNum > 0) { - TSWAP((*pQuery)->pPrepareRoot, (*pQuery)->pRoot); + if (TSDB_CODE_SUCCESS == code && pQuery->placeholderNum > 0) { + TSWAP(pQuery->pPrepareRoot, pQuery->pRoot); return TSDB_CODE_SUCCESS; } if (TSDB_CODE_SUCCESS == code) { - code = translate(pCxt, *pQuery); + code = translate(pCxt, pQuery); + } + if (TSDB_CODE_SUCCESS == code) { + code = calculateConstant(pCxt, pQuery); + } + return code; +} + +static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) { + int32_t code = parse(pCxt, pQuery); + if (TSDB_CODE_SUCCESS == code) { + code = analyseSemantic(pCxt, *pQuery); } + return code; +} + +static int32_t parseSqlSyntax(SParseContext* pCxt, SQuery** pQuery) { + int32_t code = parse(pCxt, pQuery); if (TSDB_CODE_SUCCESS == code) { - code = calculateConstant(pCxt, *pQuery); + code = collectMetaKey(pCxt, *pQuery); } return code; } @@ -63,28 +76,8 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) { int32_t inputSize = (NULL != pParam->length ? *(pParam->length) : tDataTypes[pParam->buffer_type].bytes); pVal->node.resType.type = pParam->buffer_type; pVal->node.resType.bytes = inputSize; + switch (pParam->buffer_type) { - case TSDB_DATA_TYPE_BOOL: - pVal->datum.b = *((bool*)pParam->buffer); - break; - case TSDB_DATA_TYPE_TINYINT: - pVal->datum.i = *((int8_t*)pParam->buffer); - break; - case TSDB_DATA_TYPE_SMALLINT: - pVal->datum.i = *((int16_t*)pParam->buffer); - break; - case TSDB_DATA_TYPE_INT: - pVal->datum.i = *((int32_t*)pParam->buffer); - break; - case TSDB_DATA_TYPE_BIGINT: - pVal->datum.i = *((int64_t*)pParam->buffer); - break; - case TSDB_DATA_TYPE_FLOAT: - pVal->datum.d = *((float*)pParam->buffer); - break; - case TSDB_DATA_TYPE_DOUBLE: - pVal->datum.d = *((double*)pParam->buffer); - break; case TSDB_DATA_TYPE_VARCHAR: case TSDB_DATA_TYPE_VARBINARY: pVal->datum.p = taosMemoryCalloc(1, pVal->node.resType.bytes + VARSTR_HEADER_SIZE + 1); @@ -93,6 +86,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) { } varDataSetLen(pVal->datum.p, pVal->node.resType.bytes); strncpy(varDataVal(pVal->datum.p), (const char*)pParam->buffer, pVal->node.resType.bytes); + pVal->node.resType.bytes += VARSTR_HEADER_SIZE; break; case TSDB_DATA_TYPE_NCHAR: { pVal->node.resType.bytes *= TSDB_NCHAR_SIZE; @@ -107,31 +101,16 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) { return errno; } varDataSetLen(pVal->datum.p, output); - pVal->node.resType.bytes = output; + pVal->node.resType.bytes = output + VARSTR_HEADER_SIZE; break; } - case TSDB_DATA_TYPE_TIMESTAMP: - pVal->datum.i = *((int64_t*)pParam->buffer); - break; - case TSDB_DATA_TYPE_UTINYINT: - pVal->datum.u = *((uint8_t*)pParam->buffer); - break; - case TSDB_DATA_TYPE_USMALLINT: - pVal->datum.u = *((uint16_t*)pParam->buffer); - break; - case TSDB_DATA_TYPE_UINT: - pVal->datum.u = *((uint32_t*)pParam->buffer); - break; - case TSDB_DATA_TYPE_UBIGINT: - pVal->datum.u = *((uint64_t*)pParam->buffer); - break; - case TSDB_DATA_TYPE_JSON: - case TSDB_DATA_TYPE_DECIMAL: - case TSDB_DATA_TYPE_BLOB: - case TSDB_DATA_TYPE_MEDIUMBLOB: - // todo - default: + default: { + int32_t code = nodesSetValueNodeValue(pVal, pParam->buffer); + if (code) { + return code; + } break; + } } pVal->translate = true; return TSDB_CODE_SUCCESS; @@ -178,6 +157,29 @@ int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery) { return code; } +int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq) { + int32_t code = TSDB_CODE_SUCCESS; + if (qIsInsertSql(pCxt->pSql, pCxt->sqlLen)) { + code = parseInsertSyntax(pCxt, pQuery); + } else { + code = parseSqlSyntax(pCxt, pQuery); + } + if (TSDB_CODE_SUCCESS == code) { + code = buildCatalogReq((*pQuery)->pMetaCache, pCatalogReq); + } + terrno = code; + return code; +} + +int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq, + const struct SMetaData* pMetaData, SQuery* pQuery) { + int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, pQuery->pMetaCache); + if (NULL == pQuery->pRoot) { + return parseInsertSql(pCxt, &pQuery); + } + return analyseSemantic(pCxt, pQuery); +} + void qDestroyQuery(SQuery* pQueryNode) { nodesDestroyNode(pQueryNode); } int32_t qExtractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema) { diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c index 262abac54bbd1c1ea9847c05507bb13fdedb0462..ff4fe4032e9be6ab95696bf41d6e1f398983e7b1 100644 --- a/source/libs/parser/src/sql.c +++ b/source/libs/parser/src/sql.c @@ -32,11 +32,15 @@ #include #include +#define ALLOW_FORBID_FUNC + #include "functionMgt.h" #include "nodes.h" #include "parToken.h" #include "ttokendef.h" #include "parAst.h" + +#define YYSTACKDEPTH 0 /**************** End of %include directives **********************************/ /* These constants specify the various numeric values for terminal symbols ** in a format understandable to "makeheaders". This section is blank unless @@ -100,25 +104,25 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 361 +#define YYNOCODE 357 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SToken typedef union { int yyinit; ParseTOKENTYPE yy0; - EFillMode yy18; - SAlterOption yy25; - SToken yy53; - EOperatorType yy136; - int32_t yy158; - ENullOrder yy185; - SNodeList* yy236; - EJoinType yy342; - EOrder yy430; - int64_t yy435; - SDataType yy450; - bool yy603; - SNode* yy636; + SAlterOption yy53; + ENullOrder yy109; + SToken yy113; + EJoinType yy120; + int64_t yy123; + bool yy131; + EOrder yy428; + SDataType yy490; + EFillMode yy522; + int32_t yy550; + EOperatorType yy632; + SNodeList* yy670; + SNode* yy686; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -134,17 +138,18 @@ typedef union { #define ParseCTX_FETCH #define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 611 -#define YYNRULE 455 -#define YYNTOKEN 240 -#define YY_MAX_SHIFT 610 -#define YY_MIN_SHIFTREDUCE 901 -#define YY_MAX_SHIFTREDUCE 1355 -#define YY_ERROR_ACTION 1356 -#define YY_ACCEPT_ACTION 1357 -#define YY_NO_ACTION 1358 -#define YY_MIN_REDUCE 1359 -#define YY_MAX_REDUCE 1813 +#define YYNSTATE 612 +#define YYNRULE 451 +#define YYNRULE_WITH_ACTION 451 +#define YYNTOKEN 237 +#define YY_MAX_SHIFT 611 +#define YY_MIN_SHIFTREDUCE 898 +#define YY_MAX_SHIFTREDUCE 1348 +#define YY_ERROR_ACTION 1349 +#define YY_ACCEPT_ACTION 1350 +#define YY_NO_ACTION 1351 +#define YY_MIN_REDUCE 1352 +#define YY_MAX_REDUCE 1802 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -211,604 +216,622 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2153) +#define YY_ACTTAB_COUNT (2125) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 386, 1647, 387, 1391, 295, 394, 524, 387, 1391, 28, - /* 10 */ 226, 931, 35, 33, 130, 1676, 1371, 1660, 104, 1791, - /* 20 */ 304, 1644, 1169, 477, 523, 424, 36, 34, 32, 31, - /* 30 */ 30, 385, 1790, 62, 389, 1490, 1788, 1640, 1646, 36, - /* 40 */ 34, 32, 31, 30, 1535, 1676, 108, 1167, 527, 935, - /* 50 */ 936, 294, 1000, 508, 524, 1485, 1533, 154, 14, 476, - /* 60 */ 35, 33, 1296, 507, 1175, 24, 350, 1630, 304, 1002, - /* 70 */ 1169, 1418, 277, 488, 523, 36, 34, 32, 31, 30, - /* 80 */ 56, 1, 60, 1490, 1689, 59, 524, 80, 1661, 510, - /* 90 */ 1663, 1664, 506, 1359, 527, 1167, 1207, 1729, 104, 603, - /* 100 */ 602, 276, 1725, 607, 1258, 429, 14, 36, 34, 32, - /* 110 */ 31, 30, 1175, 1791, 1168, 1490, 140, 103, 102, 101, - /* 120 */ 100, 99, 98, 97, 96, 95, 147, 376, 1529, 2, - /* 130 */ 1788, 583, 582, 581, 319, 39, 580, 579, 578, 114, - /* 140 */ 573, 572, 571, 570, 569, 568, 567, 566, 121, 562, - /* 150 */ 511, 607, 1568, 307, 1259, 55, 1580, 55, 1170, 156, - /* 160 */ 94, 1791, 1168, 93, 92, 91, 90, 89, 88, 87, - /* 170 */ 86, 85, 158, 157, 146, 352, 1264, 1352, 1788, 393, - /* 180 */ 1173, 1174, 389, 1220, 1221, 1223, 1224, 1225, 1226, 1227, - /* 190 */ 503, 525, 1235, 1236, 1237, 1238, 1239, 1240, 1468, 36, - /* 200 */ 34, 32, 31, 30, 64, 292, 1170, 131, 191, 274, - /* 210 */ 148, 1447, 27, 302, 1253, 1254, 1255, 1256, 1257, 1261, - /* 220 */ 1262, 1263, 1421, 36, 34, 32, 31, 30, 1173, 1174, - /* 230 */ 484, 1220, 1221, 1223, 1224, 1225, 1226, 1227, 503, 525, - /* 240 */ 1235, 1236, 1237, 1238, 1239, 1240, 35, 33, 1467, 948, - /* 250 */ 70, 947, 438, 437, 304, 403, 1169, 436, 1351, 112, - /* 260 */ 109, 433, 308, 1791, 432, 431, 430, 35, 33, 1310, - /* 270 */ 128, 1483, 1660, 403, 523, 304, 1789, 1169, 949, 1492, - /* 280 */ 1788, 1167, 438, 437, 148, 1193, 148, 436, 62, 972, - /* 290 */ 109, 433, 14, 1207, 432, 431, 430, 110, 1175, 1360, - /* 300 */ 1676, 1303, 1167, 1382, 1660, 524, 973, 1193, 508, 524, - /* 310 */ 1486, 486, 142, 1736, 1737, 2, 1741, 351, 507, 1175, - /* 320 */ 94, 361, 1630, 93, 92, 91, 90, 89, 88, 87, - /* 330 */ 86, 85, 1676, 1381, 1490, 38, 8, 607, 1490, 1689, - /* 340 */ 487, 559, 82, 1661, 510, 1663, 1664, 506, 1168, 527, - /* 350 */ 507, 1191, 1729, 1630, 1630, 1535, 1728, 1725, 607, 128, - /* 360 */ 558, 557, 309, 556, 555, 554, 1380, 1533, 1493, 1168, - /* 370 */ 565, 1689, 1462, 1535, 81, 1661, 510, 1663, 1664, 506, - /* 380 */ 315, 527, 524, 1630, 1729, 1533, 1743, 26, 297, 1725, - /* 390 */ 141, 478, 1170, 54, 362, 435, 434, 36, 34, 32, - /* 400 */ 31, 30, 218, 36, 34, 32, 31, 30, 466, 1756, - /* 410 */ 1740, 1490, 55, 1170, 1173, 1174, 1630, 1220, 1221, 1223, - /* 420 */ 1224, 1225, 1226, 1227, 503, 525, 1235, 1236, 1237, 1238, - /* 430 */ 1239, 1240, 460, 577, 575, 1173, 1174, 1379, 1220, 1221, - /* 440 */ 1223, 1224, 1225, 1226, 1227, 503, 525, 1235, 1236, 1237, - /* 450 */ 1238, 1239, 1240, 35, 33, 1241, 1378, 443, 1195, 610, - /* 460 */ 316, 304, 1377, 1169, 148, 148, 249, 1571, 1573, 1520, - /* 470 */ 1246, 1222, 451, 243, 35, 33, 1193, 1481, 1376, 1660, - /* 480 */ 1647, 461, 304, 312, 1169, 105, 190, 1630, 1167, 524, - /* 490 */ 473, 599, 595, 591, 587, 242, 391, 1644, 446, 1357, - /* 500 */ 1644, 402, 1191, 440, 561, 1175, 1630, 1676, 189, 1167, - /* 510 */ 337, 484, 1630, 1640, 1646, 508, 1640, 1646, 1490, 484, - /* 520 */ 78, 1791, 9, 237, 527, 507, 1175, 527, 1630, 1630, - /* 530 */ 339, 335, 564, 51, 145, 488, 50, 127, 1788, 511, - /* 540 */ 112, 148, 576, 9, 607, 1581, 1689, 1194, 112, 80, - /* 550 */ 1661, 510, 1663, 1664, 506, 1168, 527, 520, 320, 1729, - /* 560 */ 1375, 479, 474, 276, 1725, 607, 36, 34, 32, 31, - /* 570 */ 30, 1648, 1130, 314, 1479, 1791, 1168, 428, 110, 553, - /* 580 */ 1132, 128, 465, 340, 217, 198, 110, 1465, 145, 55, - /* 590 */ 1492, 1644, 1788, 143, 1736, 1737, 77, 1741, 1791, 1170, - /* 600 */ 427, 144, 1736, 1737, 1146, 1741, 193, 1640, 1646, 113, - /* 610 */ 1630, 145, 277, 1572, 1573, 1788, 490, 1482, 527, 1295, - /* 620 */ 1170, 1173, 1174, 1374, 1220, 1221, 1223, 1224, 1225, 1226, - /* 630 */ 1227, 503, 525, 1235, 1236, 1237, 1238, 1239, 1240, 286, - /* 640 */ 1222, 1131, 1173, 1174, 1258, 1220, 1221, 1223, 1224, 1225, - /* 650 */ 1226, 1227, 503, 525, 1235, 1236, 1237, 1238, 1239, 1240, - /* 660 */ 35, 33, 273, 559, 1191, 345, 1320, 344, 304, 524, - /* 670 */ 1169, 369, 524, 1630, 381, 32, 31, 30, 1748, 1291, - /* 680 */ 559, 1487, 558, 557, 1610, 556, 555, 554, 287, 7, - /* 690 */ 285, 284, 382, 426, 1259, 1167, 947, 428, 1490, 558, - /* 700 */ 557, 1490, 556, 555, 554, 470, 1318, 1319, 1321, 1322, - /* 710 */ 1535, 317, 1175, 11, 10, 1373, 1264, 1743, 148, 128, - /* 720 */ 427, 422, 1534, 935, 936, 1743, 1154, 1155, 1492, 2, - /* 730 */ 1038, 550, 549, 548, 1042, 547, 1044, 1045, 546, 1047, - /* 740 */ 543, 1739, 1053, 540, 1055, 1056, 537, 534, 346, 1738, - /* 750 */ 1370, 607, 27, 302, 1253, 1254, 1255, 1256, 1257, 1261, - /* 760 */ 1262, 1263, 1168, 380, 1466, 1630, 375, 374, 373, 372, - /* 770 */ 371, 368, 367, 366, 365, 364, 360, 359, 358, 357, - /* 780 */ 356, 355, 354, 353, 524, 524, 129, 524, 1791, 1196, - /* 790 */ 492, 255, 1192, 1193, 1272, 1260, 521, 522, 1475, 239, - /* 800 */ 1630, 145, 1369, 253, 53, 1788, 1170, 52, 1368, 1367, - /* 810 */ 1366, 452, 1365, 1490, 1490, 1364, 1490, 1265, 524, 1363, - /* 820 */ 1660, 561, 1362, 47, 159, 275, 1294, 1477, 1173, 1174, - /* 830 */ 318, 1220, 1221, 1223, 1224, 1225, 1226, 1227, 503, 525, - /* 840 */ 1235, 1236, 1237, 1238, 1239, 1240, 495, 1490, 1676, 55, - /* 850 */ 1473, 1791, 1630, 25, 1619, 194, 487, 1408, 1630, 1630, - /* 860 */ 1630, 1291, 1630, 1403, 145, 1630, 507, 1401, 1788, 1630, - /* 870 */ 1630, 182, 1630, 184, 180, 186, 183, 188, 185, 439, - /* 880 */ 187, 1660, 500, 450, 502, 441, 79, 1689, 76, 444, - /* 890 */ 81, 1661, 510, 1663, 1664, 506, 448, 527, 72, 327, - /* 900 */ 1729, 11, 10, 552, 297, 1725, 141, 1372, 459, 1676, - /* 910 */ 1354, 1355, 1650, 1448, 1660, 202, 1178, 508, 58, 57, - /* 920 */ 349, 118, 46, 153, 471, 1757, 1177, 507, 343, 205, - /* 930 */ 221, 1630, 37, 37, 37, 453, 212, 1677, 1392, 228, - /* 940 */ 272, 421, 1676, 333, 1530, 329, 325, 150, 1689, 1652, - /* 950 */ 508, 81, 1661, 510, 1663, 1664, 506, 1222, 527, 1759, - /* 960 */ 507, 1729, 462, 1317, 1630, 297, 1725, 1804, 1191, 116, - /* 970 */ 207, 117, 485, 1266, 1228, 1124, 1763, 493, 148, 1660, - /* 980 */ 230, 1689, 220, 1181, 81, 1661, 510, 1663, 1664, 506, - /* 990 */ 223, 527, 118, 1180, 1729, 1660, 46, 532, 297, 1725, - /* 1000 */ 1804, 322, 117, 225, 1250, 3, 118, 1676, 326, 1786, - /* 1010 */ 516, 282, 236, 1000, 283, 508, 119, 117, 244, 155, - /* 1020 */ 1138, 363, 370, 1676, 1570, 507, 378, 1660, 377, 1630, - /* 1030 */ 379, 508, 383, 1031, 1197, 496, 384, 248, 1059, 392, - /* 1040 */ 1200, 507, 395, 1063, 162, 1630, 1689, 1070, 396, 82, - /* 1050 */ 1661, 510, 1663, 1664, 506, 1676, 527, 1068, 120, 1729, - /* 1060 */ 1199, 164, 1689, 508, 1726, 81, 1661, 510, 1663, 1664, - /* 1070 */ 506, 1201, 527, 507, 397, 1729, 398, 1630, 1660, 297, - /* 1080 */ 1725, 1804, 167, 488, 399, 169, 1198, 400, 401, 172, - /* 1090 */ 1747, 61, 404, 1660, 1689, 175, 423, 262, 1661, 510, - /* 1100 */ 1663, 1664, 506, 425, 527, 84, 1676, 1175, 1480, 179, - /* 1110 */ 1476, 291, 181, 1614, 508, 122, 123, 1478, 1474, 124, - /* 1120 */ 125, 1676, 245, 1791, 507, 192, 455, 195, 1630, 508, - /* 1130 */ 246, 197, 454, 464, 488, 463, 147, 200, 1196, 507, - /* 1140 */ 1788, 458, 472, 1630, 1660, 1689, 1770, 203, 262, 1661, - /* 1150 */ 510, 1663, 1664, 506, 514, 527, 6, 1750, 469, 1769, - /* 1160 */ 1689, 211, 481, 82, 1661, 510, 1663, 1664, 506, 206, - /* 1170 */ 527, 1760, 1676, 1729, 1791, 296, 475, 499, 1725, 1195, - /* 1180 */ 505, 468, 5, 1291, 111, 40, 497, 145, 1744, 1807, - /* 1190 */ 507, 1788, 298, 18, 1630, 512, 1660, 513, 494, 306, - /* 1200 */ 311, 310, 1579, 135, 1578, 1660, 214, 517, 518, 519, - /* 1210 */ 1183, 1689, 213, 1787, 270, 1661, 510, 1663, 1664, 506, - /* 1220 */ 504, 527, 501, 1701, 1676, 219, 232, 71, 491, 1710, - /* 1230 */ 234, 247, 508, 1676, 69, 1176, 250, 1491, 241, 222, - /* 1240 */ 606, 508, 507, 1463, 498, 48, 1630, 530, 224, 256, - /* 1250 */ 134, 507, 1175, 1660, 263, 1630, 257, 293, 467, 252, - /* 1260 */ 254, 1624, 1623, 1689, 321, 1620, 132, 1661, 510, 1663, - /* 1270 */ 1664, 506, 1689, 527, 323, 271, 1661, 510, 1663, 1664, - /* 1280 */ 506, 1676, 527, 324, 1163, 1660, 1164, 151, 1618, 508, - /* 1290 */ 328, 528, 330, 331, 1617, 332, 334, 1616, 336, 507, - /* 1300 */ 1615, 338, 1179, 1630, 1600, 152, 341, 1141, 342, 1140, - /* 1310 */ 489, 1805, 1594, 1676, 1593, 347, 348, 1660, 1592, 1591, - /* 1320 */ 1689, 508, 1107, 266, 1661, 510, 1663, 1664, 506, 1563, - /* 1330 */ 527, 507, 1562, 1561, 1560, 1630, 1559, 1558, 1557, 1556, - /* 1340 */ 1555, 1554, 1553, 1552, 1551, 1676, 1184, 1550, 1549, 1548, - /* 1350 */ 1547, 1546, 1689, 508, 1545, 132, 1661, 510, 1663, 1664, - /* 1360 */ 506, 480, 527, 507, 115, 1660, 1544, 1630, 1187, 1543, - /* 1370 */ 301, 1542, 1541, 1540, 1109, 1539, 1538, 1537, 1660, 525, - /* 1380 */ 1235, 1236, 1536, 1420, 1689, 1388, 160, 271, 1661, 510, - /* 1390 */ 1663, 1664, 506, 1676, 527, 938, 106, 138, 937, 388, - /* 1400 */ 1806, 505, 1387, 161, 390, 107, 1676, 1608, 1602, 1586, - /* 1410 */ 1585, 507, 1576, 1469, 508, 1630, 166, 171, 1660, 1419, - /* 1420 */ 966, 1417, 1415, 407, 507, 405, 1413, 411, 1630, 415, - /* 1430 */ 1411, 303, 1689, 419, 406, 270, 1661, 510, 1663, 1664, - /* 1440 */ 506, 409, 527, 410, 1702, 1689, 1676, 413, 271, 1661, - /* 1450 */ 510, 1663, 1664, 506, 508, 527, 414, 1400, 177, 1399, - /* 1460 */ 418, 417, 1386, 1471, 507, 1074, 1660, 1470, 1630, 1073, - /* 1470 */ 139, 305, 574, 576, 999, 1169, 420, 416, 412, 408, - /* 1480 */ 176, 45, 998, 178, 997, 1689, 996, 993, 271, 1661, - /* 1490 */ 510, 1663, 1664, 506, 1676, 527, 992, 991, 1409, 288, - /* 1500 */ 1167, 1404, 508, 289, 442, 63, 1402, 290, 174, 1385, - /* 1510 */ 447, 445, 507, 1384, 449, 83, 1630, 1175, 1607, 1148, - /* 1520 */ 49, 1601, 456, 1660, 1584, 126, 1583, 1575, 199, 65, - /* 1530 */ 196, 4, 133, 1689, 201, 37, 258, 1661, 510, 1663, - /* 1540 */ 1664, 506, 204, 527, 15, 457, 43, 1316, 1309, 208, - /* 1550 */ 22, 1676, 209, 23, 210, 66, 607, 1288, 1650, 508, - /* 1560 */ 1287, 216, 1345, 42, 136, 41, 173, 1168, 165, 507, - /* 1570 */ 170, 1660, 168, 1630, 17, 1340, 1339, 16, 13, 1334, - /* 1580 */ 10, 299, 1344, 1343, 300, 1251, 19, 137, 149, 1230, - /* 1590 */ 1689, 163, 1215, 265, 1661, 510, 1663, 1664, 506, 1676, - /* 1600 */ 527, 1660, 509, 1574, 29, 515, 12, 508, 1649, 233, - /* 1610 */ 72, 1170, 1229, 20, 235, 1185, 531, 507, 238, 21, - /* 1620 */ 229, 1630, 227, 529, 1314, 964, 313, 231, 67, 1676, - /* 1630 */ 68, 1660, 1692, 1173, 1174, 1232, 526, 508, 1689, 44, - /* 1640 */ 533, 267, 1661, 510, 1663, 1664, 506, 507, 527, 1060, - /* 1650 */ 1057, 1630, 535, 538, 536, 541, 544, 1054, 539, 1676, - /* 1660 */ 1037, 1052, 1048, 542, 1069, 1046, 545, 508, 1689, 551, - /* 1670 */ 1051, 259, 1661, 510, 1663, 1664, 506, 507, 527, 1660, - /* 1680 */ 73, 1630, 74, 75, 1066, 1065, 1050, 560, 1660, 1049, - /* 1690 */ 988, 1006, 563, 240, 986, 985, 984, 983, 1689, 981, - /* 1700 */ 1067, 268, 1661, 510, 1663, 1664, 506, 1676, 527, 982, - /* 1710 */ 1003, 980, 979, 1001, 976, 508, 1676, 975, 974, 971, - /* 1720 */ 970, 969, 1416, 584, 508, 507, 585, 586, 1414, 1630, - /* 1730 */ 588, 589, 590, 1412, 507, 592, 1660, 593, 1630, 594, - /* 1740 */ 1410, 596, 597, 598, 1398, 600, 1689, 601, 1397, 260, - /* 1750 */ 1661, 510, 1663, 1664, 506, 1689, 527, 1383, 269, 1661, - /* 1760 */ 510, 1663, 1664, 506, 1676, 527, 609, 604, 605, 1358, - /* 1770 */ 1358, 1171, 508, 251, 608, 1358, 1358, 1358, 1358, 1358, - /* 1780 */ 1358, 1358, 507, 1358, 1660, 1358, 1630, 1358, 1358, 1358, - /* 1790 */ 1358, 1358, 1358, 1358, 1358, 1660, 1358, 1358, 1358, 1358, - /* 1800 */ 1358, 1358, 1358, 1689, 1358, 1358, 261, 1661, 510, 1663, - /* 1810 */ 1664, 506, 1676, 527, 1660, 1358, 1358, 1358, 1358, 1358, - /* 1820 */ 508, 1358, 1358, 1676, 1358, 1358, 1358, 1358, 1358, 1358, - /* 1830 */ 507, 508, 1358, 1358, 1630, 1358, 1358, 1358, 1358, 1358, - /* 1840 */ 1358, 507, 1676, 1358, 1358, 1630, 1358, 1358, 1358, 1358, - /* 1850 */ 508, 1689, 1358, 1358, 1672, 1661, 510, 1663, 1664, 506, - /* 1860 */ 507, 527, 1689, 1358, 1630, 1671, 1661, 510, 1663, 1664, - /* 1870 */ 506, 1358, 527, 1660, 1358, 1358, 1358, 1358, 1358, 1358, - /* 1880 */ 1358, 1689, 1660, 1358, 1670, 1661, 510, 1663, 1664, 506, - /* 1890 */ 1358, 527, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, - /* 1900 */ 1358, 1676, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 508, - /* 1910 */ 1676, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 508, 507, - /* 1920 */ 1358, 1358, 1358, 1630, 1358, 1358, 1358, 1358, 507, 1358, - /* 1930 */ 1358, 1358, 1630, 1358, 1358, 1358, 1358, 1358, 1358, 1358, - /* 1940 */ 1689, 1358, 1660, 280, 1661, 510, 1663, 1664, 506, 1689, - /* 1950 */ 527, 1660, 279, 1661, 510, 1663, 1664, 506, 1358, 527, - /* 1960 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, - /* 1970 */ 1676, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 508, 1676, - /* 1980 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 508, 507, 1358, - /* 1990 */ 1358, 1358, 1630, 1358, 1358, 1358, 1358, 507, 1358, 1660, - /* 2000 */ 1358, 1630, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1689, - /* 2010 */ 484, 1358, 281, 1661, 510, 1663, 1664, 506, 1689, 527, - /* 2020 */ 1358, 278, 1661, 510, 1663, 1664, 506, 1676, 527, 1358, - /* 2030 */ 1358, 1358, 1358, 1358, 1358, 508, 1358, 1358, 1358, 112, - /* 2040 */ 1358, 1358, 1358, 1358, 1358, 507, 484, 1358, 1358, 1630, - /* 2050 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 488, - /* 2060 */ 1358, 1358, 1358, 1358, 1358, 1358, 1689, 1358, 1358, 264, - /* 2070 */ 1661, 510, 1663, 1664, 506, 112, 527, 110, 1358, 1358, - /* 2080 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, - /* 2090 */ 1358, 1358, 215, 1736, 483, 488, 482, 1358, 1358, 1791, - /* 2100 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, - /* 2110 */ 1358, 1358, 147, 110, 1358, 1358, 1788, 1358, 1358, 1358, - /* 2120 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 215, 1736, - /* 2130 */ 483, 1358, 482, 1358, 1358, 1791, 1358, 1358, 1358, 1358, - /* 2140 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 145, 1358, - /* 2150 */ 1358, 1358, 1788, + /* 0 */ 132, 1780, 345, 1636, 1440, 1636, 294, 385, 311, 386, + /* 10 */ 1384, 78, 35, 33, 1779, 1472, 24, 1649, 1777, 131, + /* 20 */ 303, 1364, 1162, 1633, 114, 1633, 36, 34, 32, 31, + /* 30 */ 30, 1780, 1475, 36, 34, 32, 31, 30, 1629, 1635, + /* 40 */ 1629, 1635, 1780, 525, 147, 1665, 928, 1160, 1777, 529, + /* 50 */ 525, 529, 1350, 489, 393, 146, 386, 1384, 14, 1777, + /* 60 */ 35, 33, 1289, 509, 1168, 56, 384, 1619, 303, 388, + /* 70 */ 1162, 36, 34, 32, 31, 30, 36, 34, 32, 31, + /* 80 */ 30, 1, 77, 1678, 932, 933, 82, 1650, 512, 1652, + /* 90 */ 1653, 508, 73, 529, 1375, 1160, 1718, 1414, 1780, 1296, + /* 100 */ 296, 1714, 142, 608, 39, 1186, 14, 1353, 35, 33, + /* 110 */ 319, 1778, 1168, 1161, 220, 1777, 303, 277, 1162, 462, + /* 120 */ 468, 1745, 36, 34, 32, 31, 30, 71, 95, 2, + /* 130 */ 1374, 94, 93, 92, 91, 90, 89, 88, 87, 86, + /* 140 */ 525, 1200, 55, 1160, 1619, 315, 1303, 307, 1476, 1251, + /* 150 */ 1780, 608, 1563, 1565, 14, 129, 1163, 438, 437, 1780, + /* 160 */ 1168, 1161, 436, 146, 1485, 110, 433, 1777, 277, 432, + /* 170 */ 431, 430, 146, 945, 497, 944, 1777, 2, 1166, 1167, + /* 180 */ 1619, 1213, 1214, 1216, 1217, 1218, 1219, 1220, 505, 527, + /* 190 */ 1228, 1229, 1230, 1231, 1232, 1233, 286, 1239, 1252, 608, + /* 200 */ 1251, 38, 946, 1186, 1163, 55, 62, 95, 149, 1161, + /* 210 */ 94, 93, 92, 91, 90, 89, 88, 87, 86, 1257, + /* 220 */ 1732, 36, 34, 32, 31, 30, 1166, 1167, 1479, 1213, + /* 230 */ 1214, 1216, 1217, 1218, 1219, 1220, 505, 527, 1228, 1229, + /* 240 */ 1230, 1231, 1232, 1233, 1729, 287, 1373, 285, 284, 1252, + /* 250 */ 426, 403, 1163, 1372, 428, 27, 301, 1246, 1247, 1248, + /* 260 */ 1249, 1250, 1254, 1255, 1256, 513, 1188, 1215, 306, 149, + /* 270 */ 1257, 1572, 28, 228, 1166, 1167, 427, 1213, 1214, 1216, + /* 280 */ 1217, 1218, 1219, 1220, 505, 527, 1228, 1229, 1230, 1231, + /* 290 */ 1232, 1233, 35, 33, 1352, 1313, 1619, 64, 292, 1461, + /* 300 */ 303, 192, 1162, 1619, 526, 351, 27, 301, 1246, 1247, + /* 310 */ 1248, 1249, 1250, 1254, 1255, 1256, 349, 1189, 104, 103, + /* 320 */ 102, 101, 100, 99, 98, 97, 96, 1160, 149, 452, + /* 330 */ 560, 1649, 149, 1483, 472, 1311, 1312, 1314, 1315, 275, + /* 340 */ 35, 33, 1234, 1162, 1168, 486, 313, 1665, 303, 559, + /* 350 */ 1162, 558, 557, 556, 129, 479, 403, 1345, 526, 1665, + /* 360 */ 498, 8, 1371, 1485, 1560, 1215, 526, 489, 1160, 1780, + /* 370 */ 350, 157, 1527, 392, 113, 1160, 388, 509, 360, 293, + /* 380 */ 1186, 1619, 146, 608, 1525, 1168, 1777, 1483, 35, 33, + /* 390 */ 478, 219, 1168, 1161, 1370, 1483, 303, 1678, 1162, 1459, + /* 400 */ 82, 1650, 512, 1652, 1653, 508, 55, 529, 26, 9, + /* 410 */ 1718, 111, 1619, 1288, 296, 1714, 142, 141, 36, 34, + /* 420 */ 32, 31, 30, 1160, 608, 488, 143, 1725, 1726, 1521, + /* 430 */ 1730, 608, 62, 1369, 1161, 1746, 1163, 1344, 438, 437, + /* 440 */ 1168, 1161, 204, 436, 1619, 109, 110, 433, 11, 10, + /* 450 */ 432, 431, 430, 480, 1478, 1368, 562, 9, 1166, 1167, + /* 460 */ 475, 1213, 1214, 1216, 1217, 1218, 1219, 1220, 505, 527, + /* 470 */ 1228, 1229, 1230, 1231, 1232, 1233, 1187, 1163, 316, 608, + /* 480 */ 344, 336, 343, 1619, 1163, 1460, 129, 997, 149, 1161, + /* 490 */ 36, 34, 32, 31, 30, 1485, 1527, 604, 603, 1166, + /* 500 */ 1167, 338, 334, 308, 999, 1619, 1166, 1167, 1525, 1213, + /* 510 */ 1214, 1216, 1217, 1218, 1219, 1220, 505, 527, 1228, 1229, + /* 520 */ 1230, 1231, 1232, 1233, 36, 34, 32, 31, 30, 1265, + /* 530 */ 481, 476, 1163, 149, 7, 1035, 552, 551, 550, 1039, + /* 540 */ 549, 1041, 1042, 548, 1044, 545, 1367, 1050, 542, 1052, + /* 550 */ 1053, 539, 536, 1366, 1166, 1167, 1649, 1213, 1214, 1216, + /* 560 */ 1217, 1218, 1219, 1220, 505, 527, 1228, 1229, 1230, 1231, + /* 570 */ 1232, 1233, 35, 33, 274, 375, 1184, 1411, 560, 526, + /* 580 */ 303, 526, 1162, 368, 1665, 1732, 380, 250, 155, 390, + /* 590 */ 1513, 105, 507, 361, 1200, 1184, 1619, 559, 424, 558, + /* 600 */ 557, 556, 509, 1619, 381, 1527, 1619, 1160, 1483, 1728, + /* 610 */ 1483, 1363, 314, 60, 1253, 513, 59, 1525, 1287, 159, + /* 620 */ 158, 1573, 1678, 128, 1168, 270, 1650, 512, 1652, 1653, + /* 630 */ 508, 506, 529, 503, 1690, 1258, 486, 584, 583, 582, + /* 640 */ 318, 2, 581, 580, 579, 115, 574, 573, 572, 571, + /* 650 */ 570, 569, 568, 567, 122, 563, 1362, 32, 31, 30, + /* 660 */ 1186, 1619, 1361, 608, 1360, 113, 435, 434, 562, 578, + /* 670 */ 576, 25, 1359, 1161, 379, 1458, 1358, 374, 373, 372, + /* 680 */ 371, 370, 367, 366, 365, 364, 363, 359, 358, 357, + /* 690 */ 356, 355, 354, 353, 352, 486, 1564, 1565, 526, 932, + /* 700 */ 933, 1732, 111, 1527, 1284, 198, 1619, 54, 1357, 1356, + /* 710 */ 402, 526, 1619, 1355, 1619, 1526, 1163, 144, 1725, 1726, + /* 720 */ 1185, 1730, 1619, 105, 113, 1727, 1619, 1483, 129, 1649, + /* 730 */ 429, 55, 566, 65, 1455, 1365, 1474, 1486, 1166, 1167, + /* 740 */ 1483, 1213, 1214, 1216, 1217, 1218, 1219, 1220, 505, 527, + /* 750 */ 1228, 1229, 1230, 1231, 1232, 1233, 1633, 1665, 1619, 1619, + /* 760 */ 1649, 111, 1608, 1619, 428, 510, 969, 944, 560, 1737, + /* 770 */ 1284, 1629, 1635, 1147, 1148, 509, 145, 1725, 1726, 1619, + /* 780 */ 1730, 555, 529, 970, 490, 526, 427, 559, 1665, 558, + /* 790 */ 557, 556, 422, 502, 577, 1678, 510, 1480, 81, 1650, + /* 800 */ 512, 1652, 1653, 508, 494, 529, 509, 326, 1718, 1468, + /* 810 */ 1619, 526, 276, 1714, 1483, 490, 183, 185, 1637, 181, + /* 820 */ 184, 1649, 1215, 1599, 1780, 187, 1678, 1470, 186, 81, + /* 830 */ 1650, 512, 1652, 1653, 508, 339, 529, 148, 1633, 1718, + /* 840 */ 1483, 1777, 1639, 276, 1714, 130, 310, 309, 526, 1665, + /* 850 */ 256, 565, 450, 1629, 1635, 1780, 1176, 510, 149, 1466, + /* 860 */ 460, 195, 254, 53, 529, 448, 52, 509, 146, 504, + /* 870 */ 526, 1619, 1777, 443, 526, 189, 119, 1483, 188, 1641, + /* 880 */ 46, 1169, 523, 160, 1649, 207, 524, 1678, 451, 526, + /* 890 */ 82, 1650, 512, 1652, 1653, 508, 1171, 529, 1168, 1483, + /* 900 */ 1718, 241, 191, 1483, 296, 1714, 1793, 1401, 55, 1396, + /* 910 */ 1394, 526, 1665, 1243, 446, 1752, 554, 464, 1483, 440, + /* 920 */ 510, 1310, 1441, 317, 190, 37, 209, 492, 1170, 439, + /* 930 */ 509, 441, 444, 37, 1619, 1347, 1348, 530, 46, 223, + /* 940 */ 1483, 37, 11, 10, 80, 230, 117, 1172, 459, 51, + /* 950 */ 1678, 473, 50, 82, 1650, 512, 1652, 1653, 508, 453, + /* 960 */ 529, 214, 1174, 1718, 1666, 421, 1259, 296, 1714, 1793, + /* 970 */ 1649, 1385, 118, 119, 1221, 58, 57, 348, 1775, 249, + /* 980 */ 154, 1522, 1120, 222, 1748, 342, 232, 518, 495, 534, + /* 990 */ 1177, 118, 119, 487, 1173, 225, 1184, 273, 1665, 3, + /* 1000 */ 332, 1649, 328, 324, 151, 321, 510, 227, 325, 120, + /* 1010 */ 282, 997, 1180, 238, 1028, 1131, 509, 246, 118, 283, + /* 1020 */ 1619, 362, 1562, 527, 1228, 1229, 156, 369, 377, 1665, + /* 1030 */ 1056, 376, 1060, 1066, 378, 149, 1678, 510, 382, 82, + /* 1040 */ 1650, 512, 1652, 1653, 508, 1190, 529, 509, 383, 1718, + /* 1050 */ 1064, 1619, 391, 296, 1714, 1793, 490, 1193, 486, 121, + /* 1060 */ 394, 163, 1649, 395, 1736, 165, 1192, 1678, 1194, 397, + /* 1070 */ 261, 1650, 512, 1652, 1653, 508, 396, 529, 168, 399, + /* 1080 */ 170, 400, 1191, 401, 173, 61, 425, 113, 404, 176, + /* 1090 */ 1665, 1473, 423, 180, 1168, 291, 1780, 1469, 510, 85, + /* 1100 */ 247, 454, 1603, 455, 182, 193, 490, 458, 509, 148, + /* 1110 */ 123, 124, 1619, 1777, 1471, 1467, 125, 490, 196, 126, + /* 1120 */ 461, 199, 202, 1189, 111, 1649, 1759, 466, 1678, 474, + /* 1130 */ 516, 261, 1650, 512, 1652, 1653, 508, 1758, 529, 217, + /* 1140 */ 1725, 485, 465, 484, 6, 483, 1780, 471, 463, 205, + /* 1150 */ 208, 470, 295, 1665, 477, 213, 1649, 1780, 1739, 148, + /* 1160 */ 1284, 510, 5, 1777, 1749, 1188, 112, 1733, 40, 136, + /* 1170 */ 146, 509, 499, 496, 1777, 1619, 215, 18, 1571, 1570, + /* 1180 */ 1796, 514, 519, 297, 1665, 515, 305, 520, 234, 216, + /* 1190 */ 521, 1678, 510, 236, 83, 1650, 512, 1652, 1653, 508, + /* 1200 */ 1699, 529, 509, 248, 1718, 70, 1619, 72, 1717, 1714, + /* 1210 */ 1649, 1484, 251, 607, 532, 1456, 1776, 221, 47, 1649, + /* 1220 */ 135, 493, 1678, 243, 224, 83, 1650, 512, 1652, 1653, + /* 1230 */ 508, 500, 529, 226, 262, 1718, 272, 263, 1665, 501, + /* 1240 */ 1714, 253, 255, 1613, 1612, 320, 510, 1665, 1609, 322, + /* 1250 */ 323, 1156, 1157, 152, 327, 510, 509, 1607, 329, 330, + /* 1260 */ 1619, 331, 1606, 333, 1605, 509, 335, 1604, 337, 1619, + /* 1270 */ 1589, 153, 340, 341, 1134, 1133, 1678, 346, 347, 133, + /* 1280 */ 1650, 512, 1652, 1653, 508, 1678, 529, 1583, 83, 1650, + /* 1290 */ 512, 1652, 1653, 508, 1582, 529, 611, 1649, 1718, 1103, + /* 1300 */ 1555, 1554, 1553, 1715, 1581, 1580, 1552, 1551, 1649, 1550, + /* 1310 */ 245, 1549, 1548, 1547, 1546, 1545, 1544, 1543, 1542, 1541, + /* 1320 */ 1540, 1539, 106, 491, 1794, 1665, 1538, 1537, 600, 596, + /* 1330 */ 592, 588, 244, 510, 1536, 116, 1665, 1535, 1534, 1533, + /* 1340 */ 1532, 1531, 1530, 509, 510, 1105, 1529, 1619, 161, 935, + /* 1350 */ 469, 1528, 1413, 1381, 509, 1380, 1597, 79, 1619, 107, + /* 1360 */ 239, 934, 108, 1678, 1591, 139, 271, 1650, 512, 1652, + /* 1370 */ 1653, 508, 387, 529, 1678, 389, 1649, 266, 1650, 512, + /* 1380 */ 1652, 1653, 508, 162, 529, 1579, 167, 169, 1578, 1568, + /* 1390 */ 1462, 172, 963, 522, 1412, 1410, 407, 405, 1408, 411, + /* 1400 */ 1406, 1404, 406, 415, 1665, 409, 410, 413, 414, 419, + /* 1410 */ 418, 1393, 510, 1392, 417, 482, 1070, 179, 467, 1379, + /* 1420 */ 1464, 200, 509, 1463, 1069, 1649, 1619, 996, 995, 994, + /* 1430 */ 993, 990, 575, 45, 577, 1402, 1649, 288, 1397, 1139, + /* 1440 */ 289, 194, 1678, 989, 988, 133, 1650, 512, 1652, 1653, + /* 1450 */ 508, 442, 529, 1665, 1395, 290, 445, 1378, 447, 1377, + /* 1460 */ 1596, 510, 449, 84, 1665, 201, 456, 1577, 1141, 1590, + /* 1470 */ 1576, 509, 507, 1575, 1567, 1619, 212, 49, 300, 41, + /* 1480 */ 66, 457, 509, 4, 15, 134, 1619, 1649, 37, 48, + /* 1490 */ 1795, 1678, 206, 43, 271, 1650, 512, 1652, 1653, 508, + /* 1500 */ 1639, 529, 1678, 211, 1309, 270, 1650, 512, 1652, 1653, + /* 1510 */ 508, 210, 529, 197, 1691, 1665, 203, 10, 22, 23, + /* 1520 */ 42, 1302, 67, 510, 178, 218, 1649, 1281, 1280, 127, + /* 1530 */ 137, 1338, 1327, 509, 17, 1333, 140, 1619, 19, 1649, + /* 1540 */ 302, 1332, 420, 416, 412, 408, 177, 298, 1337, 1336, + /* 1550 */ 299, 1244, 29, 1678, 1665, 138, 271, 1650, 512, 1652, + /* 1560 */ 1653, 508, 510, 529, 1223, 1222, 12, 1665, 20, 1208, + /* 1570 */ 150, 63, 509, 21, 175, 510, 1619, 229, 1307, 304, + /* 1580 */ 231, 1566, 16, 235, 1178, 509, 13, 511, 1649, 1619, + /* 1590 */ 233, 517, 1678, 68, 69, 271, 1650, 512, 1652, 1653, + /* 1600 */ 508, 237, 529, 1638, 240, 1678, 1225, 73, 257, 1650, + /* 1610 */ 512, 1652, 1653, 508, 1681, 529, 1665, 1649, 528, 44, + /* 1620 */ 531, 1057, 533, 312, 510, 535, 537, 1054, 1049, 538, + /* 1630 */ 540, 174, 1051, 166, 509, 171, 541, 398, 1619, 543, + /* 1640 */ 1045, 546, 544, 1034, 547, 1665, 1043, 1048, 1047, 553, + /* 1650 */ 74, 75, 1065, 510, 1678, 164, 1649, 265, 1650, 512, + /* 1660 */ 1652, 1653, 508, 509, 529, 76, 1063, 1619, 1062, 961, + /* 1670 */ 1046, 561, 985, 1003, 564, 242, 983, 982, 981, 980, + /* 1680 */ 979, 978, 977, 1678, 1665, 976, 267, 1650, 512, 1652, + /* 1690 */ 1653, 508, 510, 529, 998, 973, 972, 971, 968, 967, + /* 1700 */ 966, 1000, 509, 1409, 585, 1649, 1619, 586, 587, 1407, + /* 1710 */ 589, 590, 591, 1405, 593, 594, 1649, 595, 1403, 597, + /* 1720 */ 599, 598, 1678, 1391, 601, 258, 1650, 512, 1652, 1653, + /* 1730 */ 508, 1390, 529, 1665, 602, 1376, 605, 606, 1351, 1351, + /* 1740 */ 609, 510, 1164, 252, 1665, 610, 1351, 1351, 1351, 1351, + /* 1750 */ 1351, 509, 510, 1351, 1351, 1619, 1351, 1351, 1351, 1351, + /* 1760 */ 1351, 1351, 509, 1351, 1351, 1649, 1619, 1351, 1351, 1351, + /* 1770 */ 1351, 1678, 1351, 1351, 268, 1650, 512, 1652, 1653, 508, + /* 1780 */ 1649, 529, 1678, 1351, 1351, 259, 1650, 512, 1652, 1653, + /* 1790 */ 508, 1351, 529, 1665, 1351, 1351, 1351, 1649, 1351, 1351, + /* 1800 */ 1351, 510, 1351, 1351, 1351, 1351, 1351, 1351, 1665, 1351, + /* 1810 */ 1351, 509, 1649, 1351, 1351, 1619, 510, 1351, 1351, 1351, + /* 1820 */ 1351, 1351, 1351, 1351, 1351, 1665, 509, 1351, 1351, 1649, + /* 1830 */ 1619, 1678, 1351, 510, 269, 1650, 512, 1652, 1653, 508, + /* 1840 */ 1665, 529, 1351, 509, 1351, 1351, 1678, 1619, 510, 260, + /* 1850 */ 1650, 512, 1652, 1653, 508, 1351, 529, 1665, 509, 1351, + /* 1860 */ 1351, 1351, 1619, 1678, 1351, 510, 1661, 1650, 512, 1652, + /* 1870 */ 1653, 508, 1351, 529, 1351, 509, 1649, 1351, 1678, 1619, + /* 1880 */ 1351, 1660, 1650, 512, 1652, 1653, 508, 1351, 529, 1351, + /* 1890 */ 1351, 1351, 1351, 1351, 1351, 1678, 1351, 1351, 1659, 1650, + /* 1900 */ 512, 1652, 1653, 508, 1665, 529, 1351, 1351, 1649, 1351, + /* 1910 */ 1351, 1351, 510, 1351, 1351, 1351, 1351, 1351, 1351, 1351, + /* 1920 */ 1351, 1351, 509, 1351, 1351, 1649, 1619, 1351, 1351, 1351, + /* 1930 */ 1351, 1351, 1351, 1351, 1351, 1351, 1665, 1351, 1351, 1351, + /* 1940 */ 1351, 1351, 1678, 1351, 510, 280, 1650, 512, 1652, 1653, + /* 1950 */ 508, 1351, 529, 1665, 509, 1351, 1351, 1649, 1619, 1351, + /* 1960 */ 1351, 510, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, + /* 1970 */ 1351, 509, 1351, 1351, 1678, 1619, 1351, 279, 1650, 512, + /* 1980 */ 1652, 1653, 508, 1351, 529, 1665, 1351, 1351, 1351, 1351, + /* 1990 */ 1351, 1678, 1351, 510, 281, 1650, 512, 1652, 1653, 508, + /* 2000 */ 1351, 529, 1351, 509, 1351, 1351, 1649, 1619, 1351, 1351, + /* 2010 */ 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 486, + /* 2020 */ 1351, 1351, 1351, 1678, 1351, 1351, 278, 1650, 512, 1652, + /* 2030 */ 1653, 508, 1351, 529, 1665, 1351, 1351, 1351, 1351, 1351, + /* 2040 */ 1351, 1351, 510, 1351, 1351, 1351, 1351, 1351, 113, 1351, + /* 2050 */ 1351, 1351, 509, 1351, 1351, 1351, 1619, 1351, 1351, 1351, + /* 2060 */ 1351, 1351, 1351, 1351, 1351, 1351, 1351, 490, 1351, 1351, + /* 2070 */ 1351, 1351, 1678, 1351, 1351, 264, 1650, 512, 1652, 1653, + /* 2080 */ 508, 1351, 529, 1351, 1351, 111, 1351, 1351, 1351, 1351, + /* 2090 */ 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, + /* 2100 */ 217, 1725, 485, 1351, 484, 1351, 1351, 1780, 1351, 1351, + /* 2110 */ 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, + /* 2120 */ 146, 1351, 1351, 1351, 1777, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 246, 273, 248, 249, 276, 246, 250, 248, 249, 324, - /* 10 */ 325, 4, 12, 13, 242, 271, 244, 243, 262, 339, - /* 20 */ 20, 293, 22, 279, 20, 269, 12, 13, 14, 15, - /* 30 */ 16, 247, 352, 255, 250, 279, 356, 309, 310, 12, - /* 40 */ 13, 14, 15, 16, 271, 271, 268, 47, 320, 42, - /* 50 */ 43, 278, 47, 279, 250, 277, 283, 55, 58, 315, - /* 60 */ 12, 13, 14, 289, 64, 2, 262, 293, 20, 64, - /* 70 */ 22, 0, 58, 299, 20, 12, 13, 14, 15, 16, - /* 80 */ 4, 81, 80, 279, 310, 83, 250, 313, 314, 315, - /* 90 */ 316, 317, 318, 0, 320, 47, 82, 323, 262, 251, - /* 100 */ 252, 327, 328, 103, 90, 269, 58, 12, 13, 14, - /* 110 */ 15, 16, 64, 339, 114, 279, 270, 24, 25, 26, - /* 120 */ 27, 28, 29, 30, 31, 32, 352, 75, 282, 81, - /* 130 */ 356, 60, 61, 62, 63, 81, 65, 66, 67, 68, - /* 140 */ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, - /* 150 */ 289, 103, 279, 292, 140, 81, 295, 81, 158, 286, - /* 160 */ 21, 339, 114, 24, 25, 26, 27, 28, 29, 30, - /* 170 */ 31, 32, 120, 121, 352, 250, 162, 150, 356, 247, - /* 180 */ 180, 181, 250, 183, 184, 185, 186, 187, 188, 189, - /* 190 */ 190, 191, 192, 193, 194, 195, 196, 197, 0, 12, - /* 200 */ 13, 14, 15, 16, 167, 168, 158, 256, 171, 284, - /* 210 */ 210, 260, 198, 199, 200, 201, 202, 203, 204, 205, - /* 220 */ 206, 207, 0, 12, 13, 14, 15, 16, 180, 181, - /* 230 */ 250, 183, 184, 185, 186, 187, 188, 189, 190, 191, - /* 240 */ 192, 193, 194, 195, 196, 197, 12, 13, 0, 20, - /* 250 */ 253, 22, 60, 61, 20, 57, 22, 65, 231, 279, - /* 260 */ 68, 69, 263, 339, 72, 73, 74, 12, 13, 82, - /* 270 */ 271, 274, 243, 57, 20, 20, 352, 22, 49, 280, - /* 280 */ 356, 47, 60, 61, 210, 20, 210, 65, 255, 47, - /* 290 */ 68, 69, 58, 82, 72, 73, 74, 317, 64, 0, - /* 300 */ 271, 14, 47, 243, 243, 250, 64, 20, 279, 250, - /* 310 */ 277, 331, 332, 333, 334, 81, 336, 262, 289, 64, - /* 320 */ 21, 262, 293, 24, 25, 26, 27, 28, 29, 30, - /* 330 */ 31, 32, 271, 243, 279, 81, 81, 103, 279, 310, - /* 340 */ 279, 93, 313, 314, 315, 316, 317, 318, 114, 320, - /* 350 */ 289, 20, 323, 293, 293, 271, 327, 328, 103, 271, - /* 360 */ 112, 113, 278, 115, 116, 117, 243, 283, 280, 114, - /* 370 */ 259, 310, 261, 271, 313, 314, 315, 316, 317, 318, - /* 380 */ 278, 320, 250, 293, 323, 283, 311, 2, 327, 328, - /* 390 */ 329, 20, 158, 3, 262, 257, 258, 12, 13, 14, - /* 400 */ 15, 16, 341, 12, 13, 14, 15, 16, 347, 348, - /* 410 */ 335, 279, 81, 158, 180, 181, 293, 183, 184, 185, - /* 420 */ 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, - /* 430 */ 196, 197, 250, 257, 258, 180, 181, 243, 183, 184, - /* 440 */ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, - /* 450 */ 195, 196, 197, 12, 13, 14, 243, 4, 20, 19, - /* 460 */ 281, 20, 243, 22, 210, 210, 264, 288, 289, 267, - /* 470 */ 14, 184, 19, 33, 12, 13, 20, 273, 243, 243, - /* 480 */ 273, 299, 20, 276, 22, 45, 33, 293, 47, 250, - /* 490 */ 144, 51, 52, 53, 54, 55, 14, 293, 45, 240, - /* 500 */ 293, 262, 20, 50, 57, 64, 293, 271, 55, 47, - /* 510 */ 153, 250, 293, 309, 310, 279, 309, 310, 279, 250, - /* 520 */ 80, 339, 81, 83, 320, 289, 64, 320, 293, 293, - /* 530 */ 173, 174, 64, 80, 352, 299, 83, 146, 356, 289, - /* 540 */ 279, 210, 41, 81, 103, 295, 310, 20, 279, 313, - /* 550 */ 314, 315, 316, 317, 318, 114, 320, 117, 299, 323, - /* 560 */ 243, 215, 216, 327, 328, 103, 12, 13, 14, 15, - /* 570 */ 16, 273, 80, 263, 272, 339, 114, 93, 317, 92, - /* 580 */ 88, 271, 142, 82, 146, 145, 317, 0, 352, 81, - /* 590 */ 280, 293, 356, 332, 333, 334, 253, 336, 339, 158, - /* 600 */ 116, 332, 333, 334, 164, 336, 166, 309, 310, 266, - /* 610 */ 293, 352, 58, 288, 289, 356, 226, 274, 320, 4, - /* 620 */ 158, 180, 181, 243, 183, 184, 185, 186, 187, 188, - /* 630 */ 189, 190, 191, 192, 193, 194, 195, 196, 197, 35, - /* 640 */ 184, 149, 180, 181, 90, 183, 184, 185, 186, 187, - /* 650 */ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, - /* 660 */ 12, 13, 18, 93, 20, 157, 180, 159, 20, 250, - /* 670 */ 22, 27, 250, 293, 30, 14, 15, 16, 208, 209, - /* 680 */ 93, 262, 112, 113, 262, 115, 116, 117, 84, 37, - /* 690 */ 86, 87, 48, 89, 140, 47, 22, 93, 279, 112, - /* 700 */ 113, 279, 115, 116, 117, 219, 220, 221, 222, 223, - /* 710 */ 271, 263, 64, 1, 2, 243, 162, 311, 210, 271, - /* 720 */ 116, 47, 283, 42, 43, 311, 169, 170, 280, 81, - /* 730 */ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, - /* 740 */ 104, 335, 106, 107, 108, 109, 110, 111, 299, 335, - /* 750 */ 243, 103, 198, 199, 200, 201, 202, 203, 204, 205, - /* 760 */ 206, 207, 114, 119, 0, 293, 122, 123, 124, 125, - /* 770 */ 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, - /* 780 */ 136, 137, 138, 139, 250, 250, 18, 250, 339, 20, - /* 790 */ 41, 23, 20, 20, 82, 140, 262, 262, 272, 262, - /* 800 */ 293, 352, 243, 35, 36, 356, 158, 39, 243, 243, - /* 810 */ 243, 299, 243, 279, 279, 243, 279, 162, 250, 243, - /* 820 */ 243, 57, 243, 146, 56, 148, 211, 272, 180, 181, - /* 830 */ 262, 183, 184, 185, 186, 187, 188, 189, 190, 191, - /* 840 */ 192, 193, 194, 195, 196, 197, 41, 279, 271, 81, - /* 850 */ 272, 339, 293, 198, 0, 272, 279, 0, 293, 293, - /* 860 */ 293, 209, 293, 0, 352, 293, 289, 0, 356, 293, - /* 870 */ 293, 85, 293, 85, 88, 85, 88, 85, 88, 22, - /* 880 */ 88, 243, 58, 21, 272, 22, 118, 310, 81, 22, - /* 890 */ 313, 314, 315, 316, 317, 318, 34, 320, 91, 45, - /* 900 */ 323, 1, 2, 272, 327, 328, 329, 244, 303, 271, - /* 910 */ 195, 196, 44, 260, 243, 146, 47, 279, 150, 151, - /* 920 */ 152, 41, 41, 155, 350, 348, 47, 289, 160, 41, - /* 930 */ 359, 293, 41, 41, 41, 307, 344, 271, 249, 41, - /* 940 */ 172, 251, 271, 175, 282, 177, 178, 179, 310, 81, - /* 950 */ 279, 313, 314, 315, 316, 317, 318, 184, 320, 312, - /* 960 */ 289, 323, 82, 82, 293, 327, 328, 329, 20, 41, - /* 970 */ 82, 41, 337, 82, 82, 82, 338, 228, 210, 243, - /* 980 */ 82, 310, 353, 114, 313, 314, 315, 316, 317, 318, - /* 990 */ 353, 320, 41, 114, 323, 243, 41, 41, 327, 328, - /* 1000 */ 329, 250, 41, 353, 180, 340, 41, 271, 45, 338, - /* 1010 */ 82, 308, 82, 47, 257, 279, 41, 41, 301, 40, - /* 1020 */ 156, 250, 287, 271, 250, 289, 140, 243, 285, 293, - /* 1030 */ 285, 279, 250, 82, 20, 230, 245, 82, 82, 245, - /* 1040 */ 20, 289, 305, 82, 255, 293, 310, 82, 289, 313, - /* 1050 */ 314, 315, 316, 317, 318, 271, 320, 82, 82, 323, - /* 1060 */ 20, 255, 310, 279, 328, 313, 314, 315, 316, 317, - /* 1070 */ 318, 20, 320, 289, 297, 323, 300, 293, 243, 327, - /* 1080 */ 328, 329, 255, 299, 297, 255, 20, 279, 290, 255, - /* 1090 */ 338, 255, 250, 243, 310, 255, 245, 313, 314, 315, - /* 1100 */ 316, 317, 318, 271, 320, 250, 271, 64, 271, 271, - /* 1110 */ 271, 245, 271, 293, 279, 271, 271, 271, 271, 271, - /* 1120 */ 271, 271, 305, 339, 289, 253, 304, 253, 293, 279, - /* 1130 */ 297, 253, 165, 290, 299, 279, 352, 253, 20, 289, - /* 1140 */ 356, 289, 218, 293, 243, 310, 349, 294, 313, 314, - /* 1150 */ 315, 316, 317, 318, 217, 320, 225, 346, 293, 349, - /* 1160 */ 310, 345, 224, 313, 314, 315, 316, 317, 318, 294, - /* 1170 */ 320, 312, 271, 323, 339, 293, 293, 327, 328, 20, - /* 1180 */ 279, 213, 212, 209, 279, 40, 229, 352, 311, 360, - /* 1190 */ 289, 356, 232, 81, 293, 293, 243, 293, 227, 293, - /* 1200 */ 12, 13, 294, 343, 294, 243, 330, 143, 291, 290, - /* 1210 */ 22, 310, 342, 355, 313, 314, 315, 316, 317, 318, - /* 1220 */ 319, 320, 321, 322, 271, 354, 279, 81, 355, 326, - /* 1230 */ 253, 267, 279, 271, 253, 47, 250, 279, 253, 354, - /* 1240 */ 245, 279, 289, 261, 355, 302, 293, 275, 354, 265, - /* 1250 */ 306, 289, 64, 243, 265, 293, 265, 298, 296, 254, - /* 1260 */ 241, 0, 0, 310, 72, 0, 313, 314, 315, 316, - /* 1270 */ 317, 318, 310, 320, 47, 313, 314, 315, 316, 317, - /* 1280 */ 318, 271, 320, 176, 47, 243, 47, 47, 0, 279, - /* 1290 */ 176, 103, 47, 47, 0, 176, 47, 0, 47, 289, - /* 1300 */ 0, 47, 114, 293, 0, 81, 162, 114, 161, 158, - /* 1310 */ 357, 358, 0, 271, 0, 154, 153, 243, 0, 0, - /* 1320 */ 310, 279, 44, 313, 314, 315, 316, 317, 318, 0, - /* 1330 */ 320, 289, 0, 0, 0, 293, 0, 0, 0, 0, - /* 1340 */ 0, 0, 0, 0, 0, 271, 158, 0, 0, 0, - /* 1350 */ 0, 0, 310, 279, 0, 313, 314, 315, 316, 317, - /* 1360 */ 318, 351, 320, 289, 40, 243, 0, 293, 180, 0, - /* 1370 */ 296, 0, 0, 0, 22, 0, 0, 0, 243, 191, - /* 1380 */ 192, 193, 0, 0, 310, 0, 40, 313, 314, 315, - /* 1390 */ 316, 317, 318, 271, 320, 14, 37, 41, 14, 44, - /* 1400 */ 358, 279, 0, 38, 44, 37, 271, 0, 0, 0, - /* 1410 */ 0, 289, 0, 0, 279, 293, 37, 37, 243, 0, - /* 1420 */ 59, 0, 0, 37, 289, 47, 0, 37, 293, 37, - /* 1430 */ 0, 296, 310, 37, 45, 313, 314, 315, 316, 317, - /* 1440 */ 318, 47, 320, 45, 322, 310, 271, 47, 313, 314, - /* 1450 */ 315, 316, 317, 318, 279, 320, 45, 0, 33, 0, - /* 1460 */ 45, 47, 0, 0, 289, 47, 243, 0, 293, 22, - /* 1470 */ 45, 296, 41, 41, 47, 22, 51, 52, 53, 54, - /* 1480 */ 55, 90, 47, 88, 47, 310, 47, 47, 313, 314, - /* 1490 */ 315, 316, 317, 318, 271, 320, 47, 47, 0, 22, - /* 1500 */ 47, 0, 279, 22, 48, 80, 0, 22, 83, 0, - /* 1510 */ 22, 47, 289, 0, 22, 20, 293, 64, 0, 47, - /* 1520 */ 146, 0, 22, 243, 0, 163, 0, 0, 37, 81, - /* 1530 */ 143, 41, 81, 310, 141, 41, 313, 314, 315, 316, - /* 1540 */ 317, 318, 82, 320, 214, 146, 41, 82, 82, 81, - /* 1550 */ 81, 271, 41, 41, 44, 81, 103, 82, 44, 279, - /* 1560 */ 82, 44, 82, 41, 44, 208, 141, 114, 143, 289, - /* 1570 */ 145, 243, 147, 293, 41, 47, 47, 214, 214, 82, - /* 1580 */ 2, 47, 47, 47, 47, 180, 41, 44, 44, 82, - /* 1590 */ 310, 166, 22, 313, 314, 315, 316, 317, 318, 271, - /* 1600 */ 320, 243, 182, 0, 81, 144, 81, 279, 44, 37, - /* 1610 */ 91, 158, 82, 81, 141, 22, 47, 289, 44, 81, - /* 1620 */ 81, 293, 82, 92, 82, 59, 47, 81, 81, 271, - /* 1630 */ 81, 243, 81, 180, 181, 82, 81, 279, 310, 81, - /* 1640 */ 81, 313, 314, 315, 316, 317, 318, 289, 320, 82, - /* 1650 */ 82, 293, 47, 47, 81, 47, 47, 82, 81, 271, - /* 1660 */ 22, 105, 82, 81, 47, 82, 81, 279, 310, 93, - /* 1670 */ 105, 313, 314, 315, 316, 317, 318, 289, 320, 243, - /* 1680 */ 81, 293, 81, 81, 47, 22, 105, 58, 243, 105, - /* 1690 */ 47, 64, 79, 41, 47, 47, 47, 47, 310, 22, - /* 1700 */ 114, 313, 314, 315, 316, 317, 318, 271, 320, 47, - /* 1710 */ 64, 47, 47, 47, 47, 279, 271, 47, 47, 47, - /* 1720 */ 47, 47, 0, 47, 279, 289, 45, 37, 0, 293, - /* 1730 */ 47, 45, 37, 0, 289, 47, 243, 45, 293, 37, - /* 1740 */ 0, 47, 45, 37, 0, 47, 310, 46, 0, 313, - /* 1750 */ 314, 315, 316, 317, 318, 310, 320, 0, 313, 314, - /* 1760 */ 315, 316, 317, 318, 271, 320, 20, 22, 21, 361, - /* 1770 */ 361, 22, 279, 22, 21, 361, 361, 361, 361, 361, - /* 1780 */ 361, 361, 289, 361, 243, 361, 293, 361, 361, 361, - /* 1790 */ 361, 361, 361, 361, 361, 243, 361, 361, 361, 361, - /* 1800 */ 361, 361, 361, 310, 361, 361, 313, 314, 315, 316, - /* 1810 */ 317, 318, 271, 320, 243, 361, 361, 361, 361, 361, - /* 1820 */ 279, 361, 361, 271, 361, 361, 361, 361, 361, 361, - /* 1830 */ 289, 279, 361, 361, 293, 361, 361, 361, 361, 361, - /* 1840 */ 361, 289, 271, 361, 361, 293, 361, 361, 361, 361, - /* 1850 */ 279, 310, 361, 361, 313, 314, 315, 316, 317, 318, - /* 1860 */ 289, 320, 310, 361, 293, 313, 314, 315, 316, 317, - /* 1870 */ 318, 361, 320, 243, 361, 361, 361, 361, 361, 361, - /* 1880 */ 361, 310, 243, 361, 313, 314, 315, 316, 317, 318, - /* 1890 */ 361, 320, 361, 361, 361, 361, 361, 361, 361, 361, - /* 1900 */ 361, 271, 361, 361, 361, 361, 361, 361, 361, 279, - /* 1910 */ 271, 361, 361, 361, 361, 361, 361, 361, 279, 289, - /* 1920 */ 361, 361, 361, 293, 361, 361, 361, 361, 289, 361, - /* 1930 */ 361, 361, 293, 361, 361, 361, 361, 361, 361, 361, - /* 1940 */ 310, 361, 243, 313, 314, 315, 316, 317, 318, 310, - /* 1950 */ 320, 243, 313, 314, 315, 316, 317, 318, 361, 320, - /* 1960 */ 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, - /* 1970 */ 271, 361, 361, 361, 361, 361, 361, 361, 279, 271, - /* 1980 */ 361, 361, 361, 361, 361, 361, 361, 279, 289, 361, - /* 1990 */ 361, 361, 293, 361, 361, 361, 361, 289, 361, 243, - /* 2000 */ 361, 293, 361, 361, 361, 361, 361, 361, 361, 310, - /* 2010 */ 250, 361, 313, 314, 315, 316, 317, 318, 310, 320, - /* 2020 */ 361, 313, 314, 315, 316, 317, 318, 271, 320, 361, - /* 2030 */ 361, 361, 361, 361, 361, 279, 361, 361, 361, 279, - /* 2040 */ 361, 361, 361, 361, 361, 289, 250, 361, 361, 293, - /* 2050 */ 361, 361, 361, 361, 361, 361, 361, 361, 361, 299, - /* 2060 */ 361, 361, 361, 361, 361, 361, 310, 361, 361, 313, - /* 2070 */ 314, 315, 316, 317, 318, 279, 320, 317, 361, 361, - /* 2080 */ 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, - /* 2090 */ 361, 361, 332, 333, 334, 299, 336, 361, 361, 339, - /* 2100 */ 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, - /* 2110 */ 361, 361, 352, 317, 361, 361, 356, 361, 361, 361, - /* 2120 */ 361, 361, 361, 361, 361, 361, 361, 361, 332, 333, - /* 2130 */ 334, 361, 336, 361, 361, 339, 361, 361, 361, 361, - /* 2140 */ 361, 361, 361, 361, 361, 361, 361, 361, 352, 361, - /* 2150 */ 361, 361, 356, + /* 0 */ 253, 335, 295, 270, 257, 270, 273, 243, 273, 245, + /* 10 */ 246, 250, 12, 13, 348, 269, 2, 240, 352, 239, + /* 20 */ 20, 241, 22, 290, 263, 290, 12, 13, 14, 15, + /* 30 */ 16, 335, 271, 12, 13, 14, 15, 16, 305, 306, + /* 40 */ 305, 306, 335, 20, 348, 268, 4, 47, 352, 316, + /* 50 */ 20, 316, 237, 276, 243, 348, 245, 246, 58, 352, + /* 60 */ 12, 13, 14, 286, 64, 4, 244, 290, 20, 247, + /* 70 */ 22, 12, 13, 14, 15, 16, 12, 13, 14, 15, + /* 80 */ 16, 81, 81, 306, 42, 43, 309, 310, 311, 312, + /* 90 */ 313, 314, 91, 316, 240, 47, 319, 0, 335, 14, + /* 100 */ 323, 324, 325, 103, 81, 20, 58, 0, 12, 13, + /* 110 */ 295, 348, 64, 113, 337, 352, 20, 58, 22, 295, + /* 120 */ 343, 344, 12, 13, 14, 15, 16, 250, 21, 81, + /* 130 */ 240, 24, 25, 26, 27, 28, 29, 30, 31, 32, + /* 140 */ 20, 82, 81, 47, 290, 278, 82, 260, 271, 90, + /* 150 */ 335, 103, 285, 286, 58, 268, 156, 60, 61, 335, + /* 160 */ 64, 113, 65, 348, 277, 68, 69, 352, 58, 72, + /* 170 */ 73, 74, 348, 20, 41, 22, 352, 81, 178, 179, + /* 180 */ 290, 181, 182, 183, 184, 185, 186, 187, 188, 189, + /* 190 */ 190, 191, 192, 193, 194, 195, 35, 14, 139, 103, + /* 200 */ 90, 81, 49, 20, 156, 81, 252, 21, 208, 113, + /* 210 */ 24, 25, 26, 27, 28, 29, 30, 31, 32, 160, + /* 220 */ 307, 12, 13, 14, 15, 16, 178, 179, 274, 181, + /* 230 */ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + /* 240 */ 192, 193, 194, 195, 331, 84, 240, 86, 87, 139, + /* 250 */ 89, 57, 156, 240, 93, 196, 197, 198, 199, 200, + /* 260 */ 201, 202, 203, 204, 205, 286, 20, 182, 289, 208, + /* 270 */ 160, 292, 320, 321, 178, 179, 115, 181, 182, 183, + /* 280 */ 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + /* 290 */ 194, 195, 12, 13, 0, 178, 290, 165, 166, 0, + /* 300 */ 20, 169, 22, 290, 247, 247, 196, 197, 198, 199, + /* 310 */ 200, 201, 202, 203, 204, 205, 259, 20, 24, 25, + /* 320 */ 26, 27, 28, 29, 30, 31, 32, 47, 208, 295, + /* 330 */ 93, 240, 208, 276, 217, 218, 219, 220, 221, 281, + /* 340 */ 12, 13, 14, 22, 64, 247, 260, 268, 20, 112, + /* 350 */ 22, 114, 115, 116, 268, 276, 57, 148, 247, 268, + /* 360 */ 227, 81, 240, 277, 276, 182, 247, 276, 47, 335, + /* 370 */ 259, 283, 268, 244, 276, 47, 247, 286, 259, 275, + /* 380 */ 20, 290, 348, 103, 280, 64, 352, 276, 12, 13, + /* 390 */ 311, 145, 64, 113, 240, 276, 20, 306, 22, 0, + /* 400 */ 309, 310, 311, 312, 313, 314, 81, 316, 2, 81, + /* 410 */ 319, 313, 290, 4, 323, 324, 325, 267, 12, 13, + /* 420 */ 14, 15, 16, 47, 103, 327, 328, 329, 330, 279, + /* 430 */ 332, 103, 252, 240, 113, 344, 156, 228, 60, 61, + /* 440 */ 64, 113, 145, 65, 290, 265, 68, 69, 1, 2, + /* 450 */ 72, 73, 74, 20, 274, 240, 57, 81, 178, 179, + /* 460 */ 143, 181, 182, 183, 184, 185, 186, 187, 188, 189, + /* 470 */ 190, 191, 192, 193, 194, 195, 20, 156, 260, 103, + /* 480 */ 155, 151, 157, 290, 156, 0, 268, 47, 208, 113, + /* 490 */ 12, 13, 14, 15, 16, 277, 268, 248, 249, 178, + /* 500 */ 179, 171, 172, 275, 64, 290, 178, 179, 280, 181, + /* 510 */ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + /* 520 */ 192, 193, 194, 195, 12, 13, 14, 15, 16, 82, + /* 530 */ 213, 214, 156, 208, 37, 94, 95, 96, 97, 98, + /* 540 */ 99, 100, 101, 102, 103, 104, 240, 106, 107, 108, + /* 550 */ 109, 110, 111, 240, 178, 179, 240, 181, 182, 183, + /* 560 */ 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + /* 570 */ 194, 195, 12, 13, 18, 75, 20, 0, 93, 247, + /* 580 */ 20, 247, 22, 27, 268, 307, 30, 261, 55, 14, + /* 590 */ 264, 259, 276, 259, 82, 20, 290, 112, 266, 114, + /* 600 */ 115, 116, 286, 290, 48, 268, 290, 47, 276, 331, + /* 610 */ 276, 240, 275, 80, 139, 286, 83, 280, 209, 119, + /* 620 */ 120, 292, 306, 145, 64, 309, 310, 311, 312, 313, + /* 630 */ 314, 315, 316, 317, 318, 160, 247, 60, 61, 62, + /* 640 */ 63, 81, 65, 66, 67, 68, 69, 70, 71, 72, + /* 650 */ 73, 74, 75, 76, 77, 78, 240, 14, 15, 16, + /* 660 */ 20, 290, 240, 103, 240, 276, 254, 255, 57, 254, + /* 670 */ 255, 196, 240, 113, 118, 0, 240, 121, 122, 123, + /* 680 */ 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + /* 690 */ 134, 135, 136, 137, 138, 247, 285, 286, 247, 42, + /* 700 */ 43, 307, 313, 268, 207, 55, 290, 3, 240, 240, + /* 710 */ 259, 247, 290, 240, 290, 280, 156, 328, 329, 330, + /* 720 */ 20, 332, 290, 259, 276, 331, 290, 276, 268, 240, + /* 730 */ 266, 81, 256, 83, 258, 241, 270, 277, 178, 179, + /* 740 */ 276, 181, 182, 183, 184, 185, 186, 187, 188, 189, + /* 750 */ 190, 191, 192, 193, 194, 195, 290, 268, 290, 290, + /* 760 */ 240, 313, 0, 290, 93, 276, 47, 22, 93, 206, + /* 770 */ 207, 305, 306, 167, 168, 286, 328, 329, 330, 290, + /* 780 */ 332, 92, 316, 64, 295, 247, 115, 112, 268, 114, + /* 790 */ 115, 116, 47, 58, 41, 306, 276, 259, 309, 310, + /* 800 */ 311, 312, 313, 314, 41, 316, 286, 45, 319, 269, + /* 810 */ 290, 247, 323, 324, 276, 295, 85, 85, 270, 88, + /* 820 */ 88, 240, 182, 259, 335, 85, 306, 269, 88, 309, + /* 830 */ 310, 311, 312, 313, 314, 82, 316, 348, 290, 319, + /* 840 */ 276, 352, 44, 323, 324, 18, 12, 13, 247, 268, + /* 850 */ 23, 64, 21, 305, 306, 335, 22, 276, 208, 269, + /* 860 */ 259, 269, 35, 36, 316, 34, 39, 286, 348, 269, + /* 870 */ 247, 290, 352, 4, 247, 85, 41, 276, 88, 81, + /* 880 */ 41, 47, 259, 56, 240, 41, 259, 306, 19, 247, + /* 890 */ 309, 310, 311, 312, 313, 314, 47, 316, 64, 276, + /* 900 */ 319, 259, 33, 276, 323, 324, 325, 0, 81, 0, + /* 910 */ 0, 247, 268, 178, 45, 334, 269, 82, 276, 50, + /* 920 */ 276, 82, 257, 259, 55, 41, 82, 223, 47, 22, + /* 930 */ 286, 22, 22, 41, 290, 193, 194, 103, 41, 355, + /* 940 */ 276, 41, 1, 2, 117, 41, 41, 113, 299, 80, + /* 950 */ 306, 346, 83, 309, 310, 311, 312, 313, 314, 303, + /* 960 */ 316, 340, 113, 319, 268, 248, 82, 323, 324, 325, + /* 970 */ 240, 246, 41, 41, 82, 148, 149, 150, 334, 82, + /* 980 */ 153, 279, 82, 349, 308, 158, 82, 82, 225, 41, + /* 990 */ 156, 41, 41, 333, 113, 349, 20, 170, 268, 336, + /* 1000 */ 173, 240, 175, 176, 177, 247, 276, 349, 45, 41, + /* 1010 */ 304, 47, 178, 82, 82, 154, 286, 297, 41, 254, + /* 1020 */ 290, 247, 247, 189, 190, 191, 40, 284, 139, 268, + /* 1030 */ 82, 282, 82, 82, 282, 208, 306, 276, 247, 309, + /* 1040 */ 310, 311, 312, 313, 314, 20, 316, 286, 242, 319, + /* 1050 */ 82, 290, 242, 323, 324, 325, 295, 20, 247, 82, + /* 1060 */ 301, 252, 240, 286, 334, 252, 20, 306, 20, 296, + /* 1070 */ 309, 310, 311, 312, 313, 314, 294, 316, 252, 294, + /* 1080 */ 252, 276, 20, 287, 252, 252, 268, 276, 247, 252, + /* 1090 */ 268, 268, 242, 268, 64, 242, 335, 268, 276, 247, + /* 1100 */ 301, 163, 290, 300, 268, 250, 295, 286, 286, 348, + /* 1110 */ 268, 268, 290, 352, 268, 268, 268, 295, 250, 268, + /* 1120 */ 247, 250, 250, 20, 313, 240, 345, 287, 306, 216, + /* 1130 */ 215, 309, 310, 311, 312, 313, 314, 345, 316, 328, + /* 1140 */ 329, 330, 276, 332, 222, 147, 335, 290, 294, 291, + /* 1150 */ 291, 211, 290, 268, 290, 341, 240, 335, 342, 348, + /* 1160 */ 207, 276, 210, 352, 308, 20, 276, 307, 40, 339, + /* 1170 */ 348, 286, 226, 224, 352, 290, 338, 81, 291, 291, + /* 1180 */ 356, 290, 142, 229, 268, 290, 290, 288, 276, 326, + /* 1190 */ 287, 306, 276, 250, 309, 310, 311, 312, 313, 314, + /* 1200 */ 322, 316, 286, 264, 319, 250, 290, 81, 323, 324, + /* 1210 */ 240, 276, 247, 242, 272, 258, 351, 350, 298, 240, + /* 1220 */ 302, 351, 306, 250, 350, 309, 310, 311, 312, 313, + /* 1230 */ 314, 351, 316, 350, 262, 319, 262, 262, 268, 323, + /* 1240 */ 324, 251, 238, 0, 0, 72, 276, 268, 0, 47, + /* 1250 */ 174, 47, 47, 47, 174, 276, 286, 0, 47, 47, + /* 1260 */ 290, 174, 0, 47, 0, 286, 47, 0, 47, 290, + /* 1270 */ 0, 81, 160, 159, 113, 156, 306, 152, 151, 309, + /* 1280 */ 310, 311, 312, 313, 314, 306, 316, 0, 309, 310, + /* 1290 */ 311, 312, 313, 314, 0, 316, 19, 240, 319, 44, + /* 1300 */ 0, 0, 0, 324, 0, 0, 0, 0, 240, 0, + /* 1310 */ 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* 1320 */ 0, 0, 45, 353, 354, 268, 0, 0, 51, 52, + /* 1330 */ 53, 54, 55, 276, 0, 40, 268, 0, 0, 0, + /* 1340 */ 0, 0, 0, 286, 276, 22, 0, 290, 40, 14, + /* 1350 */ 293, 0, 0, 0, 286, 0, 0, 80, 290, 37, + /* 1360 */ 83, 14, 37, 306, 0, 41, 309, 310, 311, 312, + /* 1370 */ 313, 314, 44, 316, 306, 44, 240, 309, 310, 311, + /* 1380 */ 312, 313, 314, 38, 316, 0, 37, 147, 0, 0, + /* 1390 */ 0, 37, 59, 116, 0, 0, 37, 47, 0, 37, + /* 1400 */ 0, 0, 45, 37, 268, 47, 45, 47, 45, 37, + /* 1410 */ 45, 0, 276, 0, 47, 347, 47, 88, 141, 0, + /* 1420 */ 0, 144, 286, 0, 22, 240, 290, 47, 47, 47, + /* 1430 */ 47, 47, 41, 90, 41, 0, 240, 22, 0, 162, + /* 1440 */ 22, 164, 306, 47, 47, 309, 310, 311, 312, 313, + /* 1450 */ 314, 48, 316, 268, 0, 22, 47, 0, 22, 0, + /* 1460 */ 0, 276, 22, 20, 268, 37, 22, 0, 47, 0, + /* 1470 */ 0, 286, 276, 0, 0, 290, 44, 145, 293, 206, + /* 1480 */ 81, 145, 286, 41, 212, 81, 290, 240, 41, 145, + /* 1490 */ 354, 306, 82, 41, 309, 310, 311, 312, 313, 314, + /* 1500 */ 44, 316, 306, 41, 82, 309, 310, 311, 312, 313, + /* 1510 */ 314, 81, 316, 142, 318, 268, 140, 2, 81, 41, + /* 1520 */ 41, 82, 81, 276, 33, 44, 240, 82, 82, 161, + /* 1530 */ 44, 82, 82, 286, 41, 47, 45, 290, 41, 240, + /* 1540 */ 293, 47, 51, 52, 53, 54, 55, 47, 47, 47, + /* 1550 */ 47, 178, 81, 306, 268, 44, 309, 310, 311, 312, + /* 1560 */ 313, 314, 276, 316, 82, 82, 81, 268, 81, 22, + /* 1570 */ 44, 80, 286, 81, 83, 276, 290, 82, 82, 293, + /* 1580 */ 81, 0, 212, 37, 22, 286, 212, 180, 240, 290, + /* 1590 */ 81, 143, 306, 81, 81, 309, 310, 311, 312, 313, + /* 1600 */ 314, 140, 316, 44, 44, 306, 82, 91, 309, 310, + /* 1610 */ 311, 312, 313, 314, 81, 316, 268, 240, 81, 81, + /* 1620 */ 92, 82, 47, 47, 276, 81, 47, 82, 105, 81, + /* 1630 */ 47, 140, 82, 142, 286, 144, 81, 146, 290, 47, + /* 1640 */ 82, 47, 81, 22, 81, 268, 82, 105, 105, 93, + /* 1650 */ 81, 81, 47, 276, 306, 164, 240, 309, 310, 311, + /* 1660 */ 312, 313, 314, 286, 316, 81, 113, 290, 22, 59, + /* 1670 */ 105, 58, 47, 64, 79, 41, 47, 47, 47, 47, + /* 1680 */ 47, 22, 47, 306, 268, 47, 309, 310, 311, 312, + /* 1690 */ 313, 314, 276, 316, 47, 47, 47, 47, 47, 47, + /* 1700 */ 47, 64, 286, 0, 47, 240, 290, 45, 37, 0, + /* 1710 */ 47, 45, 37, 0, 47, 45, 240, 37, 0, 47, + /* 1720 */ 37, 45, 306, 0, 47, 309, 310, 311, 312, 313, + /* 1730 */ 314, 0, 316, 268, 46, 0, 22, 21, 357, 357, + /* 1740 */ 21, 276, 22, 22, 268, 20, 357, 357, 357, 357, + /* 1750 */ 357, 286, 276, 357, 357, 290, 357, 357, 357, 357, + /* 1760 */ 357, 357, 286, 357, 357, 240, 290, 357, 357, 357, + /* 1770 */ 357, 306, 357, 357, 309, 310, 311, 312, 313, 314, + /* 1780 */ 240, 316, 306, 357, 357, 309, 310, 311, 312, 313, + /* 1790 */ 314, 357, 316, 268, 357, 357, 357, 240, 357, 357, + /* 1800 */ 357, 276, 357, 357, 357, 357, 357, 357, 268, 357, + /* 1810 */ 357, 286, 240, 357, 357, 290, 276, 357, 357, 357, + /* 1820 */ 357, 357, 357, 357, 357, 268, 286, 357, 357, 240, + /* 1830 */ 290, 306, 357, 276, 309, 310, 311, 312, 313, 314, + /* 1840 */ 268, 316, 357, 286, 357, 357, 306, 290, 276, 309, + /* 1850 */ 310, 311, 312, 313, 314, 357, 316, 268, 286, 357, + /* 1860 */ 357, 357, 290, 306, 357, 276, 309, 310, 311, 312, + /* 1870 */ 313, 314, 357, 316, 357, 286, 240, 357, 306, 290, + /* 1880 */ 357, 309, 310, 311, 312, 313, 314, 357, 316, 357, + /* 1890 */ 357, 357, 357, 357, 357, 306, 357, 357, 309, 310, + /* 1900 */ 311, 312, 313, 314, 268, 316, 357, 357, 240, 357, + /* 1910 */ 357, 357, 276, 357, 357, 357, 357, 357, 357, 357, + /* 1920 */ 357, 357, 286, 357, 357, 240, 290, 357, 357, 357, + /* 1930 */ 357, 357, 357, 357, 357, 357, 268, 357, 357, 357, + /* 1940 */ 357, 357, 306, 357, 276, 309, 310, 311, 312, 313, + /* 1950 */ 314, 357, 316, 268, 286, 357, 357, 240, 290, 357, + /* 1960 */ 357, 276, 357, 357, 357, 357, 357, 357, 357, 357, + /* 1970 */ 357, 286, 357, 357, 306, 290, 357, 309, 310, 311, + /* 1980 */ 312, 313, 314, 357, 316, 268, 357, 357, 357, 357, + /* 1990 */ 357, 306, 357, 276, 309, 310, 311, 312, 313, 314, + /* 2000 */ 357, 316, 357, 286, 357, 357, 240, 290, 357, 357, + /* 2010 */ 357, 357, 357, 357, 357, 357, 357, 357, 357, 247, + /* 2020 */ 357, 357, 357, 306, 357, 357, 309, 310, 311, 312, + /* 2030 */ 313, 314, 357, 316, 268, 357, 357, 357, 357, 357, + /* 2040 */ 357, 357, 276, 357, 357, 357, 357, 357, 276, 357, + /* 2050 */ 357, 357, 286, 357, 357, 357, 290, 357, 357, 357, + /* 2060 */ 357, 357, 357, 357, 357, 357, 357, 295, 357, 357, + /* 2070 */ 357, 357, 306, 357, 357, 309, 310, 311, 312, 313, + /* 2080 */ 314, 357, 316, 357, 357, 313, 357, 357, 357, 357, + /* 2090 */ 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, + /* 2100 */ 328, 329, 330, 357, 332, 357, 357, 335, 357, 357, + /* 2110 */ 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, + /* 2120 */ 348, 357, 357, 357, 352, 237, 237, 237, 237, 237, + /* 2130 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2140 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2150 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2160 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2170 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2180 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2190 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2200 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2210 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2220 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2230 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2240 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2250 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2260 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2270 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2280 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2290 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2300 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2310 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2320 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2330 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2340 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2350 */ 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + /* 2360 */ 237, 237, }; -#define YY_SHIFT_COUNT (610) +#define YY_SHIFT_COUNT (611) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (1757) +#define YY_SHIFT_MAX (1735) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 768, 0, 0, 48, 234, 234, 234, 234, 255, 255, - /* 10 */ 234, 234, 441, 462, 648, 462, 462, 462, 462, 462, - /* 20 */ 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, - /* 30 */ 462, 462, 462, 462, 462, 462, 462, 462, 254, 254, - /* 40 */ 54, 54, 54, 1188, 1188, 1188, 1188, 331, 508, 74, - /* 50 */ 4, 4, 7, 7, 76, 74, 74, 4, 4, 4, - /* 60 */ 4, 4, 4, 216, 4, 265, 371, 527, 265, 4, - /* 70 */ 4, 265, 4, 265, 265, 527, 265, 4, 447, 644, - /* 80 */ 14, 554, 554, 139, 192, 1453, 1453, 1453, 1453, 1453, - /* 90 */ 1453, 1453, 1453, 1453, 1453, 1453, 1453, 1453, 1453, 1453, - /* 100 */ 1453, 1453, 1453, 1453, 604, 229, 482, 482, 198, 5, - /* 110 */ 438, 438, 438, 764, 5, 772, 527, 265, 265, 527, - /* 120 */ 487, 468, 636, 636, 636, 636, 636, 636, 636, 440, - /* 130 */ 299, 222, 27, 486, 37, 346, 287, 456, 681, 674, - /* 140 */ 484, 769, 470, 652, 470, 390, 390, 390, 615, 773, - /* 150 */ 948, 963, 966, 864, 948, 948, 979, 886, 886, 948, - /* 160 */ 1014, 1014, 1020, 216, 527, 216, 1040, 1051, 216, 1040, - /* 170 */ 216, 772, 1066, 216, 216, 948, 216, 1014, 265, 265, - /* 180 */ 265, 265, 265, 265, 265, 265, 265, 265, 265, 948, - /* 190 */ 1014, 1043, 1020, 447, 967, 527, 447, 1040, 447, 772, - /* 200 */ 1066, 447, 1118, 924, 937, 1043, 924, 937, 1043, 1043, - /* 210 */ 265, 931, 938, 968, 970, 974, 772, 1159, 1145, 957, - /* 220 */ 971, 960, 957, 971, 957, 971, 1112, 937, 1043, 1043, - /* 230 */ 937, 1043, 1064, 772, 1066, 447, 487, 447, 772, 1146, - /* 240 */ 468, 948, 447, 1014, 2153, 2153, 2153, 2153, 2153, 2153, - /* 250 */ 2153, 2153, 71, 1425, 93, 453, 248, 587, 187, 63, - /* 260 */ 385, 391, 211, 570, 95, 95, 95, 95, 95, 95, - /* 270 */ 95, 95, 357, 2, 52, 492, 712, 655, 661, 661, - /* 280 */ 661, 661, 854, 501, 786, 788, 790, 792, 857, 863, - /* 290 */ 867, 862, 557, 677, 880, 881, 888, 900, 715, 749, - /* 300 */ 805, 891, 824, 892, 868, 893, 898, 928, 930, 951, - /* 310 */ 869, 879, 955, 956, 961, 965, 975, 976, 807, 242, - /* 320 */ 1261, 1262, 1192, 1265, 1227, 1107, 1237, 1239, 1240, 1114, - /* 330 */ 1288, 1245, 1246, 1119, 1294, 1249, 1297, 1251, 1300, 1254, - /* 340 */ 1304, 1224, 1144, 1147, 1193, 1151, 1312, 1314, 1161, 1163, - /* 350 */ 1318, 1319, 1278, 1329, 1332, 1333, 1334, 1336, 1337, 1338, - /* 360 */ 1339, 1340, 1341, 1342, 1343, 1344, 1347, 1348, 1349, 1350, - /* 370 */ 1351, 1324, 1354, 1366, 1369, 1371, 1372, 1373, 1352, 1375, - /* 380 */ 1376, 1377, 1382, 1383, 1385, 1346, 1359, 1356, 1381, 1355, - /* 390 */ 1384, 1360, 1402, 1365, 1368, 1407, 1408, 1409, 1379, 1410, - /* 400 */ 1412, 1380, 1413, 1361, 1419, 1421, 1378, 1389, 1386, 1422, - /* 410 */ 1394, 1398, 1390, 1426, 1400, 1411, 1392, 1430, 1414, 1415, - /* 420 */ 1396, 1457, 1459, 1462, 1463, 1391, 1395, 1418, 1447, 1467, - /* 430 */ 1427, 1435, 1437, 1439, 1431, 1432, 1440, 1449, 1450, 1498, - /* 440 */ 1477, 1501, 1481, 1456, 1506, 1485, 1464, 1509, 1488, 1513, - /* 450 */ 1492, 1495, 1518, 1374, 1472, 1521, 1362, 1500, 1399, 1387, - /* 460 */ 1524, 1526, 1527, 1448, 1491, 1393, 1490, 1494, 1330, 1460, - /* 470 */ 1505, 1465, 1451, 1468, 1469, 1466, 1511, 1510, 1514, 1474, - /* 480 */ 1512, 1363, 1475, 1478, 1517, 1357, 1522, 1520, 1480, 1533, - /* 490 */ 1364, 1497, 1528, 1529, 1534, 1535, 1536, 1537, 1497, 1578, - /* 500 */ 1405, 1545, 1507, 1523, 1530, 1543, 1525, 1532, 1544, 1570, - /* 510 */ 1420, 1538, 1540, 1542, 1539, 1546, 1461, 1547, 1603, 1572, - /* 520 */ 1473, 1549, 1519, 1564, 1574, 1551, 1553, 1555, 1593, 1558, - /* 530 */ 1531, 1567, 1569, 1579, 1559, 1568, 1605, 1573, 1575, 1606, - /* 540 */ 1577, 1580, 1608, 1582, 1583, 1609, 1585, 1556, 1565, 1581, - /* 550 */ 1584, 1638, 1576, 1599, 1601, 1617, 1602, 1586, 1637, 1663, - /* 560 */ 1566, 1629, 1643, 1627, 1613, 1652, 1647, 1648, 1649, 1650, - /* 570 */ 1662, 1677, 1664, 1665, 1646, 1431, 1666, 1432, 1667, 1670, - /* 580 */ 1671, 1672, 1673, 1674, 1722, 1676, 1681, 1690, 1728, 1683, - /* 590 */ 1686, 1695, 1733, 1688, 1692, 1702, 1740, 1694, 1697, 1706, - /* 600 */ 1744, 1698, 1701, 1748, 1757, 1745, 1747, 1749, 1751, 1753, - /* 610 */ 1746, + /* 0 */ 827, 0, 0, 48, 96, 96, 96, 96, 280, 280, + /* 10 */ 96, 96, 328, 376, 560, 376, 376, 376, 376, 376, + /* 20 */ 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, + /* 30 */ 376, 376, 376, 376, 376, 376, 376, 376, 120, 120, + /* 40 */ 23, 23, 23, 834, 834, 834, 834, 325, 650, 124, + /* 50 */ 30, 30, 42, 42, 61, 124, 124, 30, 30, 30, + /* 60 */ 30, 30, 30, 194, 30, 30, 360, 433, 456, 360, + /* 70 */ 30, 30, 360, 30, 360, 360, 456, 360, 30, 611, + /* 80 */ 556, 59, 110, 110, 186, 378, 321, 321, 321, 321, + /* 90 */ 321, 321, 321, 321, 321, 321, 321, 321, 321, 321, + /* 100 */ 321, 321, 321, 321, 321, 161, 153, 575, 575, 299, + /* 110 */ 440, 246, 246, 246, 399, 440, 700, 456, 360, 360, + /* 120 */ 456, 689, 787, 441, 441, 441, 441, 441, 441, 441, + /* 130 */ 1277, 107, 97, 209, 117, 132, 317, 85, 183, 657, + /* 140 */ 745, 671, 297, 563, 497, 563, 704, 704, 704, 409, + /* 150 */ 640, 976, 963, 964, 861, 976, 976, 986, 889, 889, + /* 160 */ 976, 1025, 1025, 1037, 194, 456, 194, 1046, 1048, 194, + /* 170 */ 1046, 194, 700, 1062, 194, 194, 976, 194, 1025, 360, + /* 180 */ 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, + /* 190 */ 976, 1025, 1030, 1037, 611, 938, 456, 611, 976, 1046, + /* 200 */ 611, 700, 1062, 611, 1103, 913, 915, 1030, 913, 915, + /* 210 */ 1030, 1030, 360, 922, 998, 940, 952, 953, 700, 1145, + /* 220 */ 1128, 946, 949, 954, 946, 949, 946, 949, 1096, 915, + /* 230 */ 1030, 1030, 915, 1030, 1040, 700, 1062, 611, 689, 611, + /* 240 */ 700, 1126, 787, 976, 611, 1025, 2125, 2125, 2125, 2125, + /* 250 */ 2125, 2125, 2125, 577, 1491, 294, 869, 64, 14, 406, + /* 260 */ 478, 512, 485, 675, 21, 21, 21, 21, 21, 21, + /* 270 */ 21, 21, 237, 330, 533, 500, 447, 475, 643, 643, + /* 280 */ 643, 643, 762, 753, 731, 732, 740, 790, 907, 909, + /* 290 */ 910, 831, 606, 835, 839, 844, 941, 742, 763, 133, + /* 300 */ 884, 735, 892, 798, 900, 904, 905, 931, 932, 849, + /* 310 */ 881, 897, 948, 950, 951, 968, 977, 1, 719, 1243, + /* 320 */ 1244, 1173, 1248, 1202, 1076, 1204, 1205, 1206, 1080, 1257, + /* 330 */ 1211, 1212, 1087, 1262, 1216, 1264, 1219, 1267, 1221, 1270, + /* 340 */ 1190, 1112, 1114, 1161, 1119, 1287, 1294, 1125, 1127, 1304, + /* 350 */ 1305, 1255, 1300, 1301, 1302, 1306, 1307, 1309, 1311, 1312, + /* 360 */ 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1326, + /* 370 */ 1295, 1327, 1334, 1337, 1338, 1339, 1340, 1323, 1341, 1342, + /* 380 */ 1346, 1351, 1352, 1353, 1308, 1322, 1324, 1335, 1328, 1347, + /* 390 */ 1331, 1355, 1345, 1325, 1356, 1364, 1385, 1349, 1240, 1388, + /* 400 */ 1389, 1354, 1390, 1333, 1394, 1395, 1350, 1357, 1359, 1398, + /* 410 */ 1358, 1361, 1362, 1400, 1360, 1363, 1366, 1401, 1367, 1365, + /* 420 */ 1372, 1411, 1413, 1419, 1420, 1343, 1329, 1369, 1402, 1423, + /* 430 */ 1380, 1381, 1382, 1383, 1391, 1393, 1384, 1396, 1397, 1435, + /* 440 */ 1415, 1438, 1418, 1403, 1454, 1433, 1409, 1457, 1436, 1459, + /* 450 */ 1440, 1443, 1460, 1332, 1421, 1469, 1368, 1444, 1336, 1371, + /* 460 */ 1467, 1470, 1473, 1344, 1474, 1399, 1428, 1376, 1442, 1447, + /* 470 */ 1272, 1410, 1452, 1422, 1404, 1430, 1437, 1439, 1462, 1432, + /* 480 */ 1456, 1441, 1478, 1370, 1445, 1446, 1481, 1273, 1479, 1486, + /* 490 */ 1449, 1493, 1374, 1450, 1488, 1494, 1500, 1501, 1502, 1503, + /* 500 */ 1450, 1515, 1373, 1497, 1482, 1471, 1483, 1511, 1485, 1487, + /* 510 */ 1526, 1547, 1407, 1492, 1495, 1496, 1499, 1509, 1448, 1512, + /* 520 */ 1581, 1546, 1461, 1513, 1516, 1559, 1560, 1533, 1524, 1537, + /* 530 */ 1562, 1538, 1528, 1539, 1575, 1576, 1544, 1545, 1579, 1548, + /* 540 */ 1550, 1583, 1555, 1558, 1592, 1561, 1564, 1594, 1563, 1523, + /* 550 */ 1542, 1543, 1565, 1621, 1556, 1569, 1570, 1605, 1584, 1553, + /* 560 */ 1646, 1610, 1613, 1625, 1609, 1595, 1634, 1629, 1630, 1631, + /* 570 */ 1632, 1633, 1659, 1635, 1638, 1637, 1391, 1647, 1393, 1648, + /* 580 */ 1649, 1650, 1651, 1652, 1653, 1703, 1657, 1662, 1671, 1709, + /* 590 */ 1663, 1666, 1675, 1713, 1667, 1670, 1680, 1718, 1672, 1676, + /* 600 */ 1683, 1723, 1677, 1688, 1731, 1735, 1714, 1716, 1720, 1721, + /* 610 */ 1719, 1725, }; -#define YY_REDUCE_COUNT (251) -#define YY_REDUCE_MIN (-320) -#define YY_REDUCE_MAX (1796) +#define YY_REDUCE_COUNT (252) +#define YY_REDUCE_MIN (-334) +#define YY_REDUCE_MAX (1772) static const short yy_reduce_ofst[] = { - /* 0 */ 259, -226, 236, 61, 577, 638, 671, 752, 784, 835, - /* 10 */ 29, 850, 901, 953, 736, 962, 1010, 1042, 1074, 1122, - /* 20 */ 1135, 1175, 1223, 1280, 1328, 1358, 1388, 1436, 1445, 1493, - /* 30 */ 1541, 1552, 1571, 1630, 1639, 1699, 1708, 1756, 1760, 1796, - /* 40 */ -20, 261, 269, -272, 207, 204, 298, 182, 449, 512, - /* 50 */ -244, -164, -246, -241, -320, -178, -76, -196, 55, 59, - /* 60 */ 132, 239, 419, -222, 422, -227, -256, -139, -1, 534, - /* 70 */ 535, 84, 537, 310, 102, 179, 448, 568, 343, -75, - /* 80 */ -315, -315, -315, -228, -49, 60, 90, 123, 194, 213, - /* 90 */ 219, 235, 317, 380, 472, 507, 559, 565, 566, 567, - /* 100 */ 569, 572, 576, 579, -154, -152, -216, -68, 33, 138, - /* 110 */ 75, 406, 414, -3, 176, -127, 250, 88, 439, 325, - /* 120 */ 202, 111, 302, 526, 555, 578, 583, 612, 631, 605, - /* 130 */ 663, 653, 571, 574, 628, 592, 666, 666, 689, 690, - /* 140 */ 662, 647, 635, 635, 635, 629, 637, 650, 665, 666, - /* 150 */ 751, 703, 757, 717, 771, 774, 735, 743, 745, 782, - /* 160 */ 791, 794, 737, 789, 759, 806, 777, 776, 827, 787, - /* 170 */ 830, 808, 798, 834, 836, 842, 840, 851, 832, 837, - /* 180 */ 838, 839, 841, 844, 845, 846, 847, 848, 849, 855, - /* 190 */ 866, 820, 817, 872, 822, 852, 874, 833, 878, 856, - /* 200 */ 843, 884, 859, 797, 853, 865, 810, 875, 882, 883, - /* 210 */ 666, 811, 816, 860, 870, 635, 905, 877, 876, 858, - /* 220 */ 871, 829, 873, 885, 889, 894, 903, 908, 902, 904, - /* 230 */ 910, 906, 917, 947, 919, 977, 964, 981, 958, 972, - /* 240 */ 982, 986, 985, 995, 943, 944, 959, 984, 989, 991, - /* 250 */ 1005, 1019, + /* 0 */ -185, 489, 520, -223, 91, 581, 644, 730, 761, 822, + /* 10 */ 885, 916, 316, 970, 979, 1057, 1068, 1136, 1185, 1196, + /* 20 */ 1247, 1286, 1299, 1348, 1377, 1416, 1465, 1476, 1525, 1540, + /* 30 */ 1557, 1572, 1589, 1636, 1668, 1685, 1717, 1766, 811, 1772, + /* 40 */ 98, 389, 448, -267, -265, 466, 548, -293, -176, 34, + /* 50 */ 332, 464, -236, -189, -334, -304, -237, 57, 111, 119, + /* 60 */ 334, 451, 538, 180, 564, 601, 104, 79, -21, -113, + /* 70 */ 623, 627, 228, 642, 86, 337, -133, 218, 664, -239, + /* 80 */ 58, -48, -48, -48, -220, -253, -146, -110, 6, 13, + /* 90 */ 122, 154, 193, 215, 306, 313, 371, 416, 422, 424, + /* 100 */ 432, 436, 468, 469, 473, 150, 249, -178, 129, -46, + /* 110 */ 412, -87, 278, 394, -123, 415, 88, 329, 460, 435, + /* 120 */ 411, 326, 476, -254, 540, 558, 590, 592, 600, 647, + /* 130 */ 649, 494, 665, 584, 605, 656, 621, 696, 696, 725, + /* 140 */ 717, 702, 676, 660, 660, 660, 634, 646, 658, 663, + /* 150 */ 696, 758, 706, 765, 720, 774, 775, 743, 749, 752, + /* 160 */ 791, 806, 810, 759, 809, 777, 813, 782, 773, 826, + /* 170 */ 785, 828, 805, 796, 832, 833, 841, 837, 850, 818, + /* 180 */ 823, 825, 829, 836, 842, 843, 846, 847, 848, 851, + /* 190 */ 852, 853, 812, 799, 855, 803, 821, 868, 873, 854, + /* 200 */ 871, 866, 840, 872, 856, 781, 858, 857, 792, 859, + /* 210 */ 862, 864, 696, 816, 814, 830, 838, 660, 890, 860, + /* 220 */ 863, 865, 867, 824, 870, 874, 880, 883, 878, 887, + /* 230 */ 891, 895, 888, 896, 899, 912, 903, 943, 939, 955, + /* 240 */ 935, 942, 957, 965, 973, 971, 920, 918, 972, 974, + /* 250 */ 975, 990, 1004, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 10 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 20 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 30 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 40 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 50 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 60 */ 1356, 1356, 1356, 1425, 1356, 1356, 1356, 1356, 1356, 1356, - /* 70 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1423, 1564, - /* 80 */ 1356, 1731, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 90 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 100 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1425, 1356, - /* 110 */ 1742, 1742, 1742, 1423, 1356, 1356, 1356, 1356, 1356, 1356, - /* 120 */ 1519, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1603, - /* 130 */ 1356, 1356, 1808, 1356, 1609, 1766, 1356, 1356, 1356, 1356, - /* 140 */ 1472, 1758, 1734, 1748, 1735, 1793, 1793, 1793, 1751, 1356, - /* 150 */ 1356, 1356, 1356, 1595, 1356, 1356, 1569, 1566, 1566, 1356, - /* 160 */ 1356, 1356, 1356, 1425, 1356, 1425, 1356, 1356, 1425, 1356, - /* 170 */ 1425, 1356, 1356, 1425, 1425, 1356, 1425, 1356, 1356, 1356, - /* 180 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 190 */ 1356, 1356, 1356, 1423, 1605, 1356, 1423, 1356, 1423, 1356, - /* 200 */ 1356, 1423, 1356, 1773, 1771, 1356, 1773, 1771, 1356, 1356, - /* 210 */ 1356, 1785, 1781, 1764, 1762, 1748, 1356, 1356, 1356, 1799, - /* 220 */ 1795, 1811, 1799, 1795, 1799, 1795, 1356, 1771, 1356, 1356, - /* 230 */ 1771, 1356, 1577, 1356, 1356, 1423, 1356, 1423, 1356, 1488, - /* 240 */ 1356, 1356, 1423, 1356, 1597, 1611, 1587, 1522, 1522, 1522, - /* 250 */ 1426, 1361, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 260 */ 1356, 1356, 1356, 1484, 1675, 1784, 1783, 1707, 1706, 1705, - /* 270 */ 1703, 1674, 1356, 1356, 1356, 1356, 1356, 1356, 1668, 1669, - /* 280 */ 1667, 1666, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 290 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1732, 1356, 1796, - /* 300 */ 1800, 1356, 1356, 1356, 1651, 1356, 1356, 1356, 1356, 1356, - /* 310 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 320 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 330 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 340 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 350 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 360 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 370 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 380 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1390, 1356, 1356, - /* 390 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 400 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 410 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 420 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 430 */ 1356, 1356, 1356, 1356, 1453, 1452, 1356, 1356, 1356, 1356, - /* 440 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 450 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 460 */ 1356, 1356, 1356, 1356, 1356, 1356, 1755, 1765, 1356, 1356, - /* 470 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1651, 1356, - /* 480 */ 1782, 1356, 1741, 1737, 1356, 1356, 1733, 1356, 1356, 1794, - /* 490 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1727, - /* 500 */ 1356, 1700, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 510 */ 1662, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 520 */ 1356, 1356, 1356, 1650, 1356, 1691, 1356, 1356, 1356, 1356, - /* 530 */ 1356, 1356, 1356, 1356, 1516, 1356, 1356, 1356, 1356, 1356, - /* 540 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1501, 1499, 1498, - /* 550 */ 1497, 1356, 1494, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 560 */ 1356, 1356, 1356, 1356, 1356, 1445, 1356, 1356, 1356, 1356, - /* 570 */ 1356, 1356, 1356, 1356, 1356, 1436, 1356, 1435, 1356, 1356, - /* 580 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 590 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 600 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 610 */ 1356, + /* 0 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 10 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 20 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 30 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 40 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 50 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 60 */ 1349, 1349, 1349, 1418, 1349, 1349, 1349, 1349, 1349, 1349, + /* 70 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1416, + /* 80 */ 1556, 1349, 1720, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 90 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 100 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1418, + /* 110 */ 1349, 1731, 1731, 1731, 1416, 1349, 1349, 1349, 1349, 1349, + /* 120 */ 1349, 1512, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 130 */ 1592, 1349, 1349, 1797, 1349, 1598, 1755, 1349, 1349, 1349, + /* 140 */ 1349, 1465, 1747, 1723, 1737, 1724, 1782, 1782, 1782, 1740, + /* 150 */ 1349, 1349, 1349, 1349, 1584, 1349, 1349, 1561, 1558, 1558, + /* 160 */ 1349, 1349, 1349, 1349, 1418, 1349, 1418, 1349, 1349, 1418, + /* 170 */ 1349, 1418, 1349, 1349, 1418, 1418, 1349, 1418, 1349, 1349, + /* 180 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 190 */ 1349, 1349, 1349, 1349, 1416, 1594, 1349, 1416, 1349, 1349, + /* 200 */ 1416, 1349, 1349, 1416, 1349, 1762, 1760, 1349, 1762, 1760, + /* 210 */ 1349, 1349, 1349, 1774, 1770, 1753, 1751, 1737, 1349, 1349, + /* 220 */ 1349, 1788, 1784, 1800, 1788, 1784, 1788, 1784, 1349, 1760, + /* 230 */ 1349, 1349, 1760, 1349, 1569, 1349, 1349, 1416, 1349, 1416, + /* 240 */ 1349, 1481, 1349, 1349, 1416, 1349, 1586, 1600, 1515, 1515, + /* 250 */ 1515, 1419, 1354, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 260 */ 1349, 1349, 1349, 1349, 1664, 1773, 1772, 1696, 1695, 1694, + /* 270 */ 1692, 1663, 1477, 1349, 1349, 1349, 1349, 1349, 1657, 1658, + /* 280 */ 1656, 1655, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 290 */ 1349, 1349, 1349, 1349, 1349, 1349, 1721, 1349, 1785, 1789, + /* 300 */ 1349, 1349, 1349, 1640, 1349, 1349, 1349, 1349, 1349, 1349, + /* 310 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 320 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 330 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 340 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 350 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 360 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 370 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 380 */ 1349, 1349, 1349, 1349, 1349, 1349, 1383, 1349, 1349, 1349, + /* 390 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 400 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 410 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 420 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 430 */ 1349, 1349, 1349, 1349, 1446, 1445, 1349, 1349, 1349, 1349, + /* 440 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 450 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 460 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1744, 1754, + /* 470 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 480 */ 1640, 1349, 1771, 1349, 1730, 1726, 1349, 1349, 1722, 1349, + /* 490 */ 1349, 1783, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 500 */ 1349, 1716, 1349, 1689, 1349, 1349, 1349, 1349, 1349, 1349, + /* 510 */ 1349, 1349, 1651, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 520 */ 1349, 1349, 1349, 1349, 1349, 1639, 1349, 1680, 1349, 1349, + /* 530 */ 1349, 1349, 1349, 1349, 1349, 1349, 1509, 1349, 1349, 1349, + /* 540 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1494, + /* 550 */ 1492, 1491, 1490, 1349, 1487, 1349, 1349, 1349, 1349, 1349, + /* 560 */ 1349, 1349, 1349, 1349, 1349, 1349, 1438, 1349, 1349, 1349, + /* 570 */ 1349, 1349, 1349, 1349, 1349, 1349, 1429, 1349, 1428, 1349, + /* 580 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 590 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 600 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 610 */ 1349, 1349, }; /********** End of lemon-generated parsing tables *****************************/ @@ -940,7 +963,6 @@ static const YYCODETYPE yyFallback[] = { 0, /* BLOB => nothing */ 0, /* VARBINARY => nothing */ 0, /* DECIMAL => nothing */ - 0, /* DELAY => nothing */ 0, /* FILE_FACTOR => nothing */ 0, /* NK_FLOAT => nothing */ 0, /* ROLLUP => nothing */ @@ -975,9 +997,8 @@ static const YYCODETYPE yyFallback[] = { 0, /* INTERVAL => nothing */ 0, /* TOPIC => nothing */ 0, /* AS => nothing */ - 0, /* CGROUP => nothing */ - 0, /* WITH => nothing */ - 0, /* SCHEMA => nothing */ + 0, /* CONSUMER => nothing */ + 0, /* GROUP => nothing */ 0, /* DESC => nothing */ 0, /* DESCRIBE => nothing */ 0, /* RESET => nothing */ @@ -1052,7 +1073,6 @@ static const YYCODETYPE yyFallback[] = { 0, /* PREV => nothing */ 0, /* LINEAR => nothing */ 0, /* NEXT => nothing */ - 0, /* GROUP => nothing */ 0, /* HAVING => nothing */ 0, /* ORDER => nothing */ 0, /* SLIMIT => nothing */ @@ -1062,12 +1082,12 @@ static const YYCODETYPE yyFallback[] = { 0, /* ASC => nothing */ 0, /* NULLS => nothing */ 0, /* ID => nothing */ - 233, /* NK_BITNOT => ID */ - 233, /* INSERT => ID */ - 233, /* VALUES => ID */ - 233, /* IMPORT => ID */ - 233, /* NK_SEMI => ID */ - 233, /* FILE => ID */ + 230, /* NK_BITNOT => ID */ + 230, /* INSERT => ID */ + 230, /* VALUES => ID */ + 230, /* IMPORT => ID */ + 230, /* NK_SEMI => ID */ + 230, /* FILE => ID */ }; #endif /* YYFALLBACK */ @@ -1267,255 +1287,251 @@ static const char *const yyTokenName[] = { /* 109 */ "BLOB", /* 110 */ "VARBINARY", /* 111 */ "DECIMAL", - /* 112 */ "DELAY", - /* 113 */ "FILE_FACTOR", - /* 114 */ "NK_FLOAT", - /* 115 */ "ROLLUP", - /* 116 */ "TTL", - /* 117 */ "SMA", - /* 118 */ "SHOW", - /* 119 */ "DATABASES", - /* 120 */ "TABLES", - /* 121 */ "STABLES", - /* 122 */ "MNODES", - /* 123 */ "MODULES", - /* 124 */ "QNODES", - /* 125 */ "FUNCTIONS", - /* 126 */ "INDEXES", - /* 127 */ "ACCOUNTS", - /* 128 */ "APPS", - /* 129 */ "CONNECTIONS", - /* 130 */ "LICENCE", - /* 131 */ "GRANTS", - /* 132 */ "QUERIES", - /* 133 */ "SCORES", - /* 134 */ "TOPICS", - /* 135 */ "VARIABLES", - /* 136 */ "BNODES", - /* 137 */ "SNODES", - /* 138 */ "CLUSTER", - /* 139 */ "TRANSACTIONS", - /* 140 */ "LIKE", - /* 141 */ "INDEX", - /* 142 */ "FULLTEXT", - /* 143 */ "FUNCTION", - /* 144 */ "INTERVAL", - /* 145 */ "TOPIC", - /* 146 */ "AS", - /* 147 */ "CGROUP", - /* 148 */ "WITH", - /* 149 */ "SCHEMA", - /* 150 */ "DESC", - /* 151 */ "DESCRIBE", - /* 152 */ "RESET", - /* 153 */ "QUERY", - /* 154 */ "CACHE", - /* 155 */ "EXPLAIN", - /* 156 */ "ANALYZE", - /* 157 */ "VERBOSE", - /* 158 */ "NK_BOOL", - /* 159 */ "RATIO", - /* 160 */ "COMPACT", - /* 161 */ "VNODES", - /* 162 */ "IN", - /* 163 */ "OUTPUTTYPE", - /* 164 */ "AGGREGATE", - /* 165 */ "BUFSIZE", - /* 166 */ "STREAM", - /* 167 */ "INTO", - /* 168 */ "TRIGGER", - /* 169 */ "AT_ONCE", - /* 170 */ "WINDOW_CLOSE", - /* 171 */ "WATERMARK", - /* 172 */ "KILL", - /* 173 */ "CONNECTION", - /* 174 */ "TRANSACTION", - /* 175 */ "MERGE", - /* 176 */ "VGROUP", - /* 177 */ "REDISTRIBUTE", - /* 178 */ "SPLIT", - /* 179 */ "SYNCDB", - /* 180 */ "NULL", - /* 181 */ "NK_QUESTION", - /* 182 */ "NK_ARROW", - /* 183 */ "ROWTS", - /* 184 */ "TBNAME", - /* 185 */ "QSTARTTS", - /* 186 */ "QENDTS", - /* 187 */ "WSTARTTS", - /* 188 */ "WENDTS", - /* 189 */ "WDURATION", - /* 190 */ "CAST", - /* 191 */ "NOW", - /* 192 */ "TODAY", - /* 193 */ "TIMEZONE", - /* 194 */ "COUNT", - /* 195 */ "FIRST", - /* 196 */ "LAST", - /* 197 */ "LAST_ROW", - /* 198 */ "BETWEEN", - /* 199 */ "IS", - /* 200 */ "NK_LT", - /* 201 */ "NK_GT", - /* 202 */ "NK_LE", - /* 203 */ "NK_GE", - /* 204 */ "NK_NE", - /* 205 */ "MATCH", - /* 206 */ "NMATCH", - /* 207 */ "CONTAINS", - /* 208 */ "JOIN", - /* 209 */ "INNER", - /* 210 */ "SELECT", - /* 211 */ "DISTINCT", - /* 212 */ "WHERE", - /* 213 */ "PARTITION", - /* 214 */ "BY", - /* 215 */ "SESSION", - /* 216 */ "STATE_WINDOW", - /* 217 */ "SLIDING", - /* 218 */ "FILL", - /* 219 */ "VALUE", - /* 220 */ "NONE", - /* 221 */ "PREV", - /* 222 */ "LINEAR", - /* 223 */ "NEXT", - /* 224 */ "GROUP", - /* 225 */ "HAVING", - /* 226 */ "ORDER", - /* 227 */ "SLIMIT", - /* 228 */ "SOFFSET", - /* 229 */ "LIMIT", - /* 230 */ "OFFSET", - /* 231 */ "ASC", - /* 232 */ "NULLS", - /* 233 */ "ID", - /* 234 */ "NK_BITNOT", - /* 235 */ "INSERT", - /* 236 */ "VALUES", - /* 237 */ "IMPORT", - /* 238 */ "NK_SEMI", - /* 239 */ "FILE", - /* 240 */ "cmd", - /* 241 */ "account_options", - /* 242 */ "alter_account_options", - /* 243 */ "literal", - /* 244 */ "alter_account_option", - /* 245 */ "user_name", - /* 246 */ "privileges", - /* 247 */ "priv_level", - /* 248 */ "priv_type_list", - /* 249 */ "priv_type", - /* 250 */ "db_name", - /* 251 */ "dnode_endpoint", - /* 252 */ "dnode_host_name", - /* 253 */ "not_exists_opt", - /* 254 */ "db_options", - /* 255 */ "exists_opt", - /* 256 */ "alter_db_options", - /* 257 */ "integer_list", - /* 258 */ "variable_list", - /* 259 */ "retention_list", - /* 260 */ "alter_db_option", - /* 261 */ "retention", - /* 262 */ "full_table_name", - /* 263 */ "column_def_list", - /* 264 */ "tags_def_opt", - /* 265 */ "table_options", - /* 266 */ "multi_create_clause", - /* 267 */ "tags_def", - /* 268 */ "multi_drop_clause", - /* 269 */ "alter_table_clause", - /* 270 */ "alter_table_options", - /* 271 */ "column_name", - /* 272 */ "type_name", - /* 273 */ "signed_literal", - /* 274 */ "create_subtable_clause", - /* 275 */ "specific_tags_opt", - /* 276 */ "literal_list", - /* 277 */ "drop_table_clause", - /* 278 */ "col_name_list", - /* 279 */ "table_name", - /* 280 */ "column_def", - /* 281 */ "func_name_list", - /* 282 */ "alter_table_option", - /* 283 */ "col_name", - /* 284 */ "db_name_cond_opt", - /* 285 */ "like_pattern_opt", - /* 286 */ "table_name_cond", - /* 287 */ "from_db_opt", - /* 288 */ "func_name", - /* 289 */ "function_name", - /* 290 */ "index_name", - /* 291 */ "index_options", - /* 292 */ "func_list", - /* 293 */ "duration_literal", - /* 294 */ "sliding_opt", - /* 295 */ "func", - /* 296 */ "expression_list", - /* 297 */ "topic_name", - /* 298 */ "topic_options", - /* 299 */ "query_expression", - /* 300 */ "cgroup_name", - /* 301 */ "analyze_opt", - /* 302 */ "explain_options", - /* 303 */ "agg_func_opt", - /* 304 */ "bufsize_opt", - /* 305 */ "stream_name", - /* 306 */ "stream_options", - /* 307 */ "into_opt", - /* 308 */ "dnode_list", - /* 309 */ "signed", - /* 310 */ "literal_func", - /* 311 */ "table_alias", - /* 312 */ "column_alias", - /* 313 */ "expression", - /* 314 */ "pseudo_column", - /* 315 */ "column_reference", - /* 316 */ "function_expression", - /* 317 */ "subquery", - /* 318 */ "star_func", - /* 319 */ "star_func_para_list", - /* 320 */ "noarg_func", - /* 321 */ "other_para_list", - /* 322 */ "star_func_para", - /* 323 */ "predicate", - /* 324 */ "compare_op", - /* 325 */ "in_op", - /* 326 */ "in_predicate_value", - /* 327 */ "boolean_value_expression", - /* 328 */ "boolean_primary", - /* 329 */ "common_expression", - /* 330 */ "from_clause", - /* 331 */ "table_reference_list", - /* 332 */ "table_reference", - /* 333 */ "table_primary", - /* 334 */ "joined_table", - /* 335 */ "alias_opt", - /* 336 */ "parenthesized_joined_table", - /* 337 */ "join_type", - /* 338 */ "search_condition", - /* 339 */ "query_specification", - /* 340 */ "set_quantifier_opt", - /* 341 */ "select_list", - /* 342 */ "where_clause_opt", - /* 343 */ "partition_by_clause_opt", - /* 344 */ "twindow_clause_opt", - /* 345 */ "group_by_clause_opt", - /* 346 */ "having_clause_opt", - /* 347 */ "select_sublist", - /* 348 */ "select_item", - /* 349 */ "fill_opt", - /* 350 */ "fill_mode", - /* 351 */ "group_by_list", - /* 352 */ "query_expression_body", - /* 353 */ "order_by_clause_opt", - /* 354 */ "slimit_clause_opt", - /* 355 */ "limit_clause_opt", - /* 356 */ "query_primary", - /* 357 */ "sort_specification_list", - /* 358 */ "sort_specification", - /* 359 */ "ordering_specification_opt", - /* 360 */ "null_ordering_opt", + /* 112 */ "FILE_FACTOR", + /* 113 */ "NK_FLOAT", + /* 114 */ "ROLLUP", + /* 115 */ "TTL", + /* 116 */ "SMA", + /* 117 */ "SHOW", + /* 118 */ "DATABASES", + /* 119 */ "TABLES", + /* 120 */ "STABLES", + /* 121 */ "MNODES", + /* 122 */ "MODULES", + /* 123 */ "QNODES", + /* 124 */ "FUNCTIONS", + /* 125 */ "INDEXES", + /* 126 */ "ACCOUNTS", + /* 127 */ "APPS", + /* 128 */ "CONNECTIONS", + /* 129 */ "LICENCE", + /* 130 */ "GRANTS", + /* 131 */ "QUERIES", + /* 132 */ "SCORES", + /* 133 */ "TOPICS", + /* 134 */ "VARIABLES", + /* 135 */ "BNODES", + /* 136 */ "SNODES", + /* 137 */ "CLUSTER", + /* 138 */ "TRANSACTIONS", + /* 139 */ "LIKE", + /* 140 */ "INDEX", + /* 141 */ "FULLTEXT", + /* 142 */ "FUNCTION", + /* 143 */ "INTERVAL", + /* 144 */ "TOPIC", + /* 145 */ "AS", + /* 146 */ "CONSUMER", + /* 147 */ "GROUP", + /* 148 */ "DESC", + /* 149 */ "DESCRIBE", + /* 150 */ "RESET", + /* 151 */ "QUERY", + /* 152 */ "CACHE", + /* 153 */ "EXPLAIN", + /* 154 */ "ANALYZE", + /* 155 */ "VERBOSE", + /* 156 */ "NK_BOOL", + /* 157 */ "RATIO", + /* 158 */ "COMPACT", + /* 159 */ "VNODES", + /* 160 */ "IN", + /* 161 */ "OUTPUTTYPE", + /* 162 */ "AGGREGATE", + /* 163 */ "BUFSIZE", + /* 164 */ "STREAM", + /* 165 */ "INTO", + /* 166 */ "TRIGGER", + /* 167 */ "AT_ONCE", + /* 168 */ "WINDOW_CLOSE", + /* 169 */ "WATERMARK", + /* 170 */ "KILL", + /* 171 */ "CONNECTION", + /* 172 */ "TRANSACTION", + /* 173 */ "MERGE", + /* 174 */ "VGROUP", + /* 175 */ "REDISTRIBUTE", + /* 176 */ "SPLIT", + /* 177 */ "SYNCDB", + /* 178 */ "NULL", + /* 179 */ "NK_QUESTION", + /* 180 */ "NK_ARROW", + /* 181 */ "ROWTS", + /* 182 */ "TBNAME", + /* 183 */ "QSTARTTS", + /* 184 */ "QENDTS", + /* 185 */ "WSTARTTS", + /* 186 */ "WENDTS", + /* 187 */ "WDURATION", + /* 188 */ "CAST", + /* 189 */ "NOW", + /* 190 */ "TODAY", + /* 191 */ "TIMEZONE", + /* 192 */ "COUNT", + /* 193 */ "FIRST", + /* 194 */ "LAST", + /* 195 */ "LAST_ROW", + /* 196 */ "BETWEEN", + /* 197 */ "IS", + /* 198 */ "NK_LT", + /* 199 */ "NK_GT", + /* 200 */ "NK_LE", + /* 201 */ "NK_GE", + /* 202 */ "NK_NE", + /* 203 */ "MATCH", + /* 204 */ "NMATCH", + /* 205 */ "CONTAINS", + /* 206 */ "JOIN", + /* 207 */ "INNER", + /* 208 */ "SELECT", + /* 209 */ "DISTINCT", + /* 210 */ "WHERE", + /* 211 */ "PARTITION", + /* 212 */ "BY", + /* 213 */ "SESSION", + /* 214 */ "STATE_WINDOW", + /* 215 */ "SLIDING", + /* 216 */ "FILL", + /* 217 */ "VALUE", + /* 218 */ "NONE", + /* 219 */ "PREV", + /* 220 */ "LINEAR", + /* 221 */ "NEXT", + /* 222 */ "HAVING", + /* 223 */ "ORDER", + /* 224 */ "SLIMIT", + /* 225 */ "SOFFSET", + /* 226 */ "LIMIT", + /* 227 */ "OFFSET", + /* 228 */ "ASC", + /* 229 */ "NULLS", + /* 230 */ "ID", + /* 231 */ "NK_BITNOT", + /* 232 */ "INSERT", + /* 233 */ "VALUES", + /* 234 */ "IMPORT", + /* 235 */ "NK_SEMI", + /* 236 */ "FILE", + /* 237 */ "cmd", + /* 238 */ "account_options", + /* 239 */ "alter_account_options", + /* 240 */ "literal", + /* 241 */ "alter_account_option", + /* 242 */ "user_name", + /* 243 */ "privileges", + /* 244 */ "priv_level", + /* 245 */ "priv_type_list", + /* 246 */ "priv_type", + /* 247 */ "db_name", + /* 248 */ "dnode_endpoint", + /* 249 */ "dnode_host_name", + /* 250 */ "not_exists_opt", + /* 251 */ "db_options", + /* 252 */ "exists_opt", + /* 253 */ "alter_db_options", + /* 254 */ "integer_list", + /* 255 */ "variable_list", + /* 256 */ "retention_list", + /* 257 */ "alter_db_option", + /* 258 */ "retention", + /* 259 */ "full_table_name", + /* 260 */ "column_def_list", + /* 261 */ "tags_def_opt", + /* 262 */ "table_options", + /* 263 */ "multi_create_clause", + /* 264 */ "tags_def", + /* 265 */ "multi_drop_clause", + /* 266 */ "alter_table_clause", + /* 267 */ "alter_table_options", + /* 268 */ "column_name", + /* 269 */ "type_name", + /* 270 */ "signed_literal", + /* 271 */ "create_subtable_clause", + /* 272 */ "specific_tags_opt", + /* 273 */ "literal_list", + /* 274 */ "drop_table_clause", + /* 275 */ "col_name_list", + /* 276 */ "table_name", + /* 277 */ "column_def", + /* 278 */ "func_name_list", + /* 279 */ "alter_table_option", + /* 280 */ "col_name", + /* 281 */ "db_name_cond_opt", + /* 282 */ "like_pattern_opt", + /* 283 */ "table_name_cond", + /* 284 */ "from_db_opt", + /* 285 */ "func_name", + /* 286 */ "function_name", + /* 287 */ "index_name", + /* 288 */ "index_options", + /* 289 */ "func_list", + /* 290 */ "duration_literal", + /* 291 */ "sliding_opt", + /* 292 */ "func", + /* 293 */ "expression_list", + /* 294 */ "topic_name", + /* 295 */ "query_expression", + /* 296 */ "cgroup_name", + /* 297 */ "analyze_opt", + /* 298 */ "explain_options", + /* 299 */ "agg_func_opt", + /* 300 */ "bufsize_opt", + /* 301 */ "stream_name", + /* 302 */ "stream_options", + /* 303 */ "into_opt", + /* 304 */ "dnode_list", + /* 305 */ "signed", + /* 306 */ "literal_func", + /* 307 */ "table_alias", + /* 308 */ "column_alias", + /* 309 */ "expression", + /* 310 */ "pseudo_column", + /* 311 */ "column_reference", + /* 312 */ "function_expression", + /* 313 */ "subquery", + /* 314 */ "star_func", + /* 315 */ "star_func_para_list", + /* 316 */ "noarg_func", + /* 317 */ "other_para_list", + /* 318 */ "star_func_para", + /* 319 */ "predicate", + /* 320 */ "compare_op", + /* 321 */ "in_op", + /* 322 */ "in_predicate_value", + /* 323 */ "boolean_value_expression", + /* 324 */ "boolean_primary", + /* 325 */ "common_expression", + /* 326 */ "from_clause", + /* 327 */ "table_reference_list", + /* 328 */ "table_reference", + /* 329 */ "table_primary", + /* 330 */ "joined_table", + /* 331 */ "alias_opt", + /* 332 */ "parenthesized_joined_table", + /* 333 */ "join_type", + /* 334 */ "search_condition", + /* 335 */ "query_specification", + /* 336 */ "set_quantifier_opt", + /* 337 */ "select_list", + /* 338 */ "where_clause_opt", + /* 339 */ "partition_by_clause_opt", + /* 340 */ "twindow_clause_opt", + /* 341 */ "group_by_clause_opt", + /* 342 */ "having_clause_opt", + /* 343 */ "select_sublist", + /* 344 */ "select_item", + /* 345 */ "fill_opt", + /* 346 */ "fill_mode", + /* 347 */ "group_by_list", + /* 348 */ "query_expression_body", + /* 349 */ "order_by_clause_opt", + /* 350 */ "slimit_clause_opt", + /* 351 */ "limit_clause_opt", + /* 352 */ "query_primary", + /* 353 */ "sort_specification_list", + /* 354 */ "sort_specification", + /* 355 */ "ordering_specification_opt", + /* 356 */ "null_ordering_opt", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -1688,296 +1704,292 @@ static const char *const yyRuleName[] = { /* 162 */ "tags_def ::= TAGS NK_LP column_def_list NK_RP", /* 163 */ "table_options ::=", /* 164 */ "table_options ::= table_options COMMENT NK_STRING", - /* 165 */ "table_options ::= table_options DELAY NK_INTEGER", - /* 166 */ "table_options ::= table_options FILE_FACTOR NK_FLOAT", - /* 167 */ "table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP", - /* 168 */ "table_options ::= table_options TTL NK_INTEGER", - /* 169 */ "table_options ::= table_options SMA NK_LP col_name_list NK_RP", - /* 170 */ "alter_table_options ::= alter_table_option", - /* 171 */ "alter_table_options ::= alter_table_options alter_table_option", - /* 172 */ "alter_table_option ::= COMMENT NK_STRING", - /* 173 */ "alter_table_option ::= TTL NK_INTEGER", - /* 174 */ "col_name_list ::= col_name", - /* 175 */ "col_name_list ::= col_name_list NK_COMMA col_name", - /* 176 */ "col_name ::= column_name", - /* 177 */ "cmd ::= SHOW DNODES", - /* 178 */ "cmd ::= SHOW USERS", - /* 179 */ "cmd ::= SHOW DATABASES", - /* 180 */ "cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt", - /* 181 */ "cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt", - /* 182 */ "cmd ::= SHOW db_name_cond_opt VGROUPS", - /* 183 */ "cmd ::= SHOW MNODES", - /* 184 */ "cmd ::= SHOW MODULES", - /* 185 */ "cmd ::= SHOW QNODES", - /* 186 */ "cmd ::= SHOW FUNCTIONS", - /* 187 */ "cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt", - /* 188 */ "cmd ::= SHOW STREAMS", - /* 189 */ "cmd ::= SHOW ACCOUNTS", - /* 190 */ "cmd ::= SHOW APPS", - /* 191 */ "cmd ::= SHOW CONNECTIONS", - /* 192 */ "cmd ::= SHOW LICENCE", - /* 193 */ "cmd ::= SHOW GRANTS", - /* 194 */ "cmd ::= SHOW CREATE DATABASE db_name", - /* 195 */ "cmd ::= SHOW CREATE TABLE full_table_name", - /* 196 */ "cmd ::= SHOW CREATE STABLE full_table_name", - /* 197 */ "cmd ::= SHOW QUERIES", - /* 198 */ "cmd ::= SHOW SCORES", - /* 199 */ "cmd ::= SHOW TOPICS", - /* 200 */ "cmd ::= SHOW VARIABLES", - /* 201 */ "cmd ::= SHOW BNODES", - /* 202 */ "cmd ::= SHOW SNODES", - /* 203 */ "cmd ::= SHOW CLUSTER", - /* 204 */ "cmd ::= SHOW TRANSACTIONS", - /* 205 */ "db_name_cond_opt ::=", - /* 206 */ "db_name_cond_opt ::= db_name NK_DOT", - /* 207 */ "like_pattern_opt ::=", - /* 208 */ "like_pattern_opt ::= LIKE NK_STRING", - /* 209 */ "table_name_cond ::= table_name", - /* 210 */ "from_db_opt ::=", - /* 211 */ "from_db_opt ::= FROM db_name", - /* 212 */ "func_name_list ::= func_name", - /* 213 */ "func_name_list ::= func_name_list NK_COMMA func_name", - /* 214 */ "func_name ::= function_name", - /* 215 */ "cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options", - /* 216 */ "cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP", - /* 217 */ "cmd ::= DROP INDEX exists_opt index_name ON table_name", - /* 218 */ "index_options ::=", - /* 219 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt", - /* 220 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt", - /* 221 */ "func_list ::= func", - /* 222 */ "func_list ::= func_list NK_COMMA func", - /* 223 */ "func ::= function_name NK_LP expression_list NK_RP", - /* 224 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS query_expression", - /* 225 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS db_name", + /* 165 */ "table_options ::= table_options FILE_FACTOR NK_FLOAT", + /* 166 */ "table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP", + /* 167 */ "table_options ::= table_options TTL NK_INTEGER", + /* 168 */ "table_options ::= table_options SMA NK_LP col_name_list NK_RP", + /* 169 */ "alter_table_options ::= alter_table_option", + /* 170 */ "alter_table_options ::= alter_table_options alter_table_option", + /* 171 */ "alter_table_option ::= COMMENT NK_STRING", + /* 172 */ "alter_table_option ::= TTL NK_INTEGER", + /* 173 */ "col_name_list ::= col_name", + /* 174 */ "col_name_list ::= col_name_list NK_COMMA col_name", + /* 175 */ "col_name ::= column_name", + /* 176 */ "cmd ::= SHOW DNODES", + /* 177 */ "cmd ::= SHOW USERS", + /* 178 */ "cmd ::= SHOW DATABASES", + /* 179 */ "cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt", + /* 180 */ "cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt", + /* 181 */ "cmd ::= SHOW db_name_cond_opt VGROUPS", + /* 182 */ "cmd ::= SHOW MNODES", + /* 183 */ "cmd ::= SHOW MODULES", + /* 184 */ "cmd ::= SHOW QNODES", + /* 185 */ "cmd ::= SHOW FUNCTIONS", + /* 186 */ "cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt", + /* 187 */ "cmd ::= SHOW STREAMS", + /* 188 */ "cmd ::= SHOW ACCOUNTS", + /* 189 */ "cmd ::= SHOW APPS", + /* 190 */ "cmd ::= SHOW CONNECTIONS", + /* 191 */ "cmd ::= SHOW LICENCE", + /* 192 */ "cmd ::= SHOW GRANTS", + /* 193 */ "cmd ::= SHOW CREATE DATABASE db_name", + /* 194 */ "cmd ::= SHOW CREATE TABLE full_table_name", + /* 195 */ "cmd ::= SHOW CREATE STABLE full_table_name", + /* 196 */ "cmd ::= SHOW QUERIES", + /* 197 */ "cmd ::= SHOW SCORES", + /* 198 */ "cmd ::= SHOW TOPICS", + /* 199 */ "cmd ::= SHOW VARIABLES", + /* 200 */ "cmd ::= SHOW BNODES", + /* 201 */ "cmd ::= SHOW SNODES", + /* 202 */ "cmd ::= SHOW CLUSTER", + /* 203 */ "cmd ::= SHOW TRANSACTIONS", + /* 204 */ "db_name_cond_opt ::=", + /* 205 */ "db_name_cond_opt ::= db_name NK_DOT", + /* 206 */ "like_pattern_opt ::=", + /* 207 */ "like_pattern_opt ::= LIKE NK_STRING", + /* 208 */ "table_name_cond ::= table_name", + /* 209 */ "from_db_opt ::=", + /* 210 */ "from_db_opt ::= FROM db_name", + /* 211 */ "func_name_list ::= func_name", + /* 212 */ "func_name_list ::= func_name_list NK_COMMA func_name", + /* 213 */ "func_name ::= function_name", + /* 214 */ "cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options", + /* 215 */ "cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP", + /* 216 */ "cmd ::= DROP INDEX exists_opt index_name ON table_name", + /* 217 */ "index_options ::=", + /* 218 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt", + /* 219 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt", + /* 220 */ "func_list ::= func", + /* 221 */ "func_list ::= func_list NK_COMMA func", + /* 222 */ "func ::= function_name NK_LP expression_list NK_RP", + /* 223 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression", + /* 224 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name", + /* 225 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name", /* 226 */ "cmd ::= DROP TOPIC exists_opt topic_name", - /* 227 */ "cmd ::= DROP CGROUP exists_opt cgroup_name ON topic_name", - /* 228 */ "topic_options ::=", - /* 229 */ "topic_options ::= topic_options WITH TABLE", - /* 230 */ "topic_options ::= topic_options WITH SCHEMA", - /* 231 */ "topic_options ::= topic_options WITH TAG", - /* 232 */ "cmd ::= DESC full_table_name", - /* 233 */ "cmd ::= DESCRIBE full_table_name", - /* 234 */ "cmd ::= RESET QUERY CACHE", - /* 235 */ "cmd ::= EXPLAIN analyze_opt explain_options query_expression", - /* 236 */ "analyze_opt ::=", - /* 237 */ "analyze_opt ::= ANALYZE", - /* 238 */ "explain_options ::=", - /* 239 */ "explain_options ::= explain_options VERBOSE NK_BOOL", - /* 240 */ "explain_options ::= explain_options RATIO NK_FLOAT", - /* 241 */ "cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP", - /* 242 */ "cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt", - /* 243 */ "cmd ::= DROP FUNCTION exists_opt function_name", - /* 244 */ "agg_func_opt ::=", - /* 245 */ "agg_func_opt ::= AGGREGATE", - /* 246 */ "bufsize_opt ::=", - /* 247 */ "bufsize_opt ::= BUFSIZE NK_INTEGER", - /* 248 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression", - /* 249 */ "cmd ::= DROP STREAM exists_opt stream_name", - /* 250 */ "into_opt ::=", - /* 251 */ "into_opt ::= INTO full_table_name", - /* 252 */ "stream_options ::=", - /* 253 */ "stream_options ::= stream_options TRIGGER AT_ONCE", - /* 254 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE", - /* 255 */ "stream_options ::= stream_options WATERMARK duration_literal", - /* 256 */ "cmd ::= KILL CONNECTION NK_INTEGER", - /* 257 */ "cmd ::= KILL QUERY NK_INTEGER", - /* 258 */ "cmd ::= KILL TRANSACTION NK_INTEGER", - /* 259 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", - /* 260 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", - /* 261 */ "cmd ::= SPLIT VGROUP NK_INTEGER", - /* 262 */ "dnode_list ::= DNODE NK_INTEGER", - /* 263 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", - /* 264 */ "cmd ::= SYNCDB db_name REPLICA", - /* 265 */ "cmd ::= query_expression", - /* 266 */ "literal ::= NK_INTEGER", - /* 267 */ "literal ::= NK_FLOAT", - /* 268 */ "literal ::= NK_STRING", - /* 269 */ "literal ::= NK_BOOL", - /* 270 */ "literal ::= TIMESTAMP NK_STRING", - /* 271 */ "literal ::= duration_literal", - /* 272 */ "literal ::= NULL", - /* 273 */ "literal ::= NK_QUESTION", - /* 274 */ "duration_literal ::= NK_VARIABLE", - /* 275 */ "signed ::= NK_INTEGER", - /* 276 */ "signed ::= NK_PLUS NK_INTEGER", - /* 277 */ "signed ::= NK_MINUS NK_INTEGER", - /* 278 */ "signed ::= NK_FLOAT", - /* 279 */ "signed ::= NK_PLUS NK_FLOAT", - /* 280 */ "signed ::= NK_MINUS NK_FLOAT", - /* 281 */ "signed_literal ::= signed", - /* 282 */ "signed_literal ::= NK_STRING", - /* 283 */ "signed_literal ::= NK_BOOL", - /* 284 */ "signed_literal ::= TIMESTAMP NK_STRING", - /* 285 */ "signed_literal ::= duration_literal", - /* 286 */ "signed_literal ::= NULL", - /* 287 */ "signed_literal ::= literal_func", - /* 288 */ "literal_list ::= signed_literal", - /* 289 */ "literal_list ::= literal_list NK_COMMA signed_literal", - /* 290 */ "db_name ::= NK_ID", - /* 291 */ "table_name ::= NK_ID", - /* 292 */ "column_name ::= NK_ID", - /* 293 */ "function_name ::= NK_ID", - /* 294 */ "table_alias ::= NK_ID", - /* 295 */ "column_alias ::= NK_ID", - /* 296 */ "user_name ::= NK_ID", - /* 297 */ "index_name ::= NK_ID", - /* 298 */ "topic_name ::= NK_ID", - /* 299 */ "stream_name ::= NK_ID", - /* 300 */ "cgroup_name ::= NK_ID", - /* 301 */ "expression ::= literal", - /* 302 */ "expression ::= pseudo_column", - /* 303 */ "expression ::= column_reference", - /* 304 */ "expression ::= function_expression", - /* 305 */ "expression ::= subquery", - /* 306 */ "expression ::= NK_LP expression NK_RP", - /* 307 */ "expression ::= NK_PLUS expression", - /* 308 */ "expression ::= NK_MINUS expression", - /* 309 */ "expression ::= expression NK_PLUS expression", - /* 310 */ "expression ::= expression NK_MINUS expression", - /* 311 */ "expression ::= expression NK_STAR expression", - /* 312 */ "expression ::= expression NK_SLASH expression", - /* 313 */ "expression ::= expression NK_REM expression", - /* 314 */ "expression ::= column_reference NK_ARROW NK_STRING", - /* 315 */ "expression_list ::= expression", - /* 316 */ "expression_list ::= expression_list NK_COMMA expression", - /* 317 */ "column_reference ::= column_name", - /* 318 */ "column_reference ::= table_name NK_DOT column_name", - /* 319 */ "pseudo_column ::= ROWTS", - /* 320 */ "pseudo_column ::= TBNAME", - /* 321 */ "pseudo_column ::= table_name NK_DOT TBNAME", - /* 322 */ "pseudo_column ::= QSTARTTS", - /* 323 */ "pseudo_column ::= QENDTS", - /* 324 */ "pseudo_column ::= WSTARTTS", - /* 325 */ "pseudo_column ::= WENDTS", - /* 326 */ "pseudo_column ::= WDURATION", - /* 327 */ "function_expression ::= function_name NK_LP expression_list NK_RP", - /* 328 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", - /* 329 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP", - /* 330 */ "function_expression ::= literal_func", - /* 331 */ "literal_func ::= noarg_func NK_LP NK_RP", - /* 332 */ "literal_func ::= NOW", - /* 333 */ "noarg_func ::= NOW", - /* 334 */ "noarg_func ::= TODAY", - /* 335 */ "noarg_func ::= TIMEZONE", - /* 336 */ "star_func ::= COUNT", - /* 337 */ "star_func ::= FIRST", - /* 338 */ "star_func ::= LAST", - /* 339 */ "star_func ::= LAST_ROW", - /* 340 */ "star_func_para_list ::= NK_STAR", - /* 341 */ "star_func_para_list ::= other_para_list", - /* 342 */ "other_para_list ::= star_func_para", - /* 343 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", - /* 344 */ "star_func_para ::= expression", - /* 345 */ "star_func_para ::= table_name NK_DOT NK_STAR", - /* 346 */ "predicate ::= expression compare_op expression", - /* 347 */ "predicate ::= expression BETWEEN expression AND expression", - /* 348 */ "predicate ::= expression NOT BETWEEN expression AND expression", - /* 349 */ "predicate ::= expression IS NULL", - /* 350 */ "predicate ::= expression IS NOT NULL", - /* 351 */ "predicate ::= expression in_op in_predicate_value", - /* 352 */ "compare_op ::= NK_LT", - /* 353 */ "compare_op ::= NK_GT", - /* 354 */ "compare_op ::= NK_LE", - /* 355 */ "compare_op ::= NK_GE", - /* 356 */ "compare_op ::= NK_NE", - /* 357 */ "compare_op ::= NK_EQ", - /* 358 */ "compare_op ::= LIKE", - /* 359 */ "compare_op ::= NOT LIKE", - /* 360 */ "compare_op ::= MATCH", - /* 361 */ "compare_op ::= NMATCH", - /* 362 */ "compare_op ::= CONTAINS", - /* 363 */ "in_op ::= IN", - /* 364 */ "in_op ::= NOT IN", - /* 365 */ "in_predicate_value ::= NK_LP expression_list NK_RP", - /* 366 */ "boolean_value_expression ::= boolean_primary", - /* 367 */ "boolean_value_expression ::= NOT boolean_primary", - /* 368 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", - /* 369 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", - /* 370 */ "boolean_primary ::= predicate", - /* 371 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", - /* 372 */ "common_expression ::= expression", - /* 373 */ "common_expression ::= boolean_value_expression", - /* 374 */ "from_clause ::= FROM table_reference_list", - /* 375 */ "table_reference_list ::= table_reference", - /* 376 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", - /* 377 */ "table_reference ::= table_primary", - /* 378 */ "table_reference ::= joined_table", - /* 379 */ "table_primary ::= table_name alias_opt", - /* 380 */ "table_primary ::= db_name NK_DOT table_name alias_opt", - /* 381 */ "table_primary ::= subquery alias_opt", - /* 382 */ "table_primary ::= parenthesized_joined_table", - /* 383 */ "alias_opt ::=", - /* 384 */ "alias_opt ::= table_alias", - /* 385 */ "alias_opt ::= AS table_alias", - /* 386 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", - /* 387 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", - /* 388 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition", - /* 389 */ "join_type ::=", - /* 390 */ "join_type ::= INNER", - /* 391 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt", - /* 392 */ "set_quantifier_opt ::=", - /* 393 */ "set_quantifier_opt ::= DISTINCT", - /* 394 */ "set_quantifier_opt ::= ALL", - /* 395 */ "select_list ::= NK_STAR", - /* 396 */ "select_list ::= select_sublist", - /* 397 */ "select_sublist ::= select_item", - /* 398 */ "select_sublist ::= select_sublist NK_COMMA select_item", - /* 399 */ "select_item ::= common_expression", - /* 400 */ "select_item ::= common_expression column_alias", - /* 401 */ "select_item ::= common_expression AS column_alias", - /* 402 */ "select_item ::= table_name NK_DOT NK_STAR", - /* 403 */ "where_clause_opt ::=", - /* 404 */ "where_clause_opt ::= WHERE search_condition", - /* 405 */ "partition_by_clause_opt ::=", - /* 406 */ "partition_by_clause_opt ::= PARTITION BY expression_list", - /* 407 */ "twindow_clause_opt ::=", - /* 408 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP", - /* 409 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP", - /* 410 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt", - /* 411 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt", - /* 412 */ "sliding_opt ::=", - /* 413 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP", - /* 414 */ "fill_opt ::=", - /* 415 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", - /* 416 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP", - /* 417 */ "fill_mode ::= NONE", - /* 418 */ "fill_mode ::= PREV", - /* 419 */ "fill_mode ::= NULL", - /* 420 */ "fill_mode ::= LINEAR", - /* 421 */ "fill_mode ::= NEXT", - /* 422 */ "group_by_clause_opt ::=", - /* 423 */ "group_by_clause_opt ::= GROUP BY group_by_list", - /* 424 */ "group_by_list ::= expression", - /* 425 */ "group_by_list ::= group_by_list NK_COMMA expression", - /* 426 */ "having_clause_opt ::=", - /* 427 */ "having_clause_opt ::= HAVING search_condition", - /* 428 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt", - /* 429 */ "query_expression_body ::= query_primary", - /* 430 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body", - /* 431 */ "query_expression_body ::= query_expression_body UNION query_expression_body", - /* 432 */ "query_primary ::= query_specification", - /* 433 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP", - /* 434 */ "order_by_clause_opt ::=", - /* 435 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", - /* 436 */ "slimit_clause_opt ::=", - /* 437 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", - /* 438 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", - /* 439 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 440 */ "limit_clause_opt ::=", - /* 441 */ "limit_clause_opt ::= LIMIT NK_INTEGER", - /* 442 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", - /* 443 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 444 */ "subquery ::= NK_LP query_expression NK_RP", - /* 445 */ "search_condition ::= common_expression", - /* 446 */ "sort_specification_list ::= sort_specification", - /* 447 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", - /* 448 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt", - /* 449 */ "ordering_specification_opt ::=", - /* 450 */ "ordering_specification_opt ::= ASC", - /* 451 */ "ordering_specification_opt ::= DESC", - /* 452 */ "null_ordering_opt ::=", - /* 453 */ "null_ordering_opt ::= NULLS FIRST", - /* 454 */ "null_ordering_opt ::= NULLS LAST", + /* 227 */ "cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name", + /* 228 */ "cmd ::= DESC full_table_name", + /* 229 */ "cmd ::= DESCRIBE full_table_name", + /* 230 */ "cmd ::= RESET QUERY CACHE", + /* 231 */ "cmd ::= EXPLAIN analyze_opt explain_options query_expression", + /* 232 */ "analyze_opt ::=", + /* 233 */ "analyze_opt ::= ANALYZE", + /* 234 */ "explain_options ::=", + /* 235 */ "explain_options ::= explain_options VERBOSE NK_BOOL", + /* 236 */ "explain_options ::= explain_options RATIO NK_FLOAT", + /* 237 */ "cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP", + /* 238 */ "cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt", + /* 239 */ "cmd ::= DROP FUNCTION exists_opt function_name", + /* 240 */ "agg_func_opt ::=", + /* 241 */ "agg_func_opt ::= AGGREGATE", + /* 242 */ "bufsize_opt ::=", + /* 243 */ "bufsize_opt ::= BUFSIZE NK_INTEGER", + /* 244 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression", + /* 245 */ "cmd ::= DROP STREAM exists_opt stream_name", + /* 246 */ "into_opt ::=", + /* 247 */ "into_opt ::= INTO full_table_name", + /* 248 */ "stream_options ::=", + /* 249 */ "stream_options ::= stream_options TRIGGER AT_ONCE", + /* 250 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE", + /* 251 */ "stream_options ::= stream_options WATERMARK duration_literal", + /* 252 */ "cmd ::= KILL CONNECTION NK_INTEGER", + /* 253 */ "cmd ::= KILL QUERY NK_INTEGER", + /* 254 */ "cmd ::= KILL TRANSACTION NK_INTEGER", + /* 255 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", + /* 256 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", + /* 257 */ "cmd ::= SPLIT VGROUP NK_INTEGER", + /* 258 */ "dnode_list ::= DNODE NK_INTEGER", + /* 259 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", + /* 260 */ "cmd ::= SYNCDB db_name REPLICA", + /* 261 */ "cmd ::= query_expression", + /* 262 */ "literal ::= NK_INTEGER", + /* 263 */ "literal ::= NK_FLOAT", + /* 264 */ "literal ::= NK_STRING", + /* 265 */ "literal ::= NK_BOOL", + /* 266 */ "literal ::= TIMESTAMP NK_STRING", + /* 267 */ "literal ::= duration_literal", + /* 268 */ "literal ::= NULL", + /* 269 */ "literal ::= NK_QUESTION", + /* 270 */ "duration_literal ::= NK_VARIABLE", + /* 271 */ "signed ::= NK_INTEGER", + /* 272 */ "signed ::= NK_PLUS NK_INTEGER", + /* 273 */ "signed ::= NK_MINUS NK_INTEGER", + /* 274 */ "signed ::= NK_FLOAT", + /* 275 */ "signed ::= NK_PLUS NK_FLOAT", + /* 276 */ "signed ::= NK_MINUS NK_FLOAT", + /* 277 */ "signed_literal ::= signed", + /* 278 */ "signed_literal ::= NK_STRING", + /* 279 */ "signed_literal ::= NK_BOOL", + /* 280 */ "signed_literal ::= TIMESTAMP NK_STRING", + /* 281 */ "signed_literal ::= duration_literal", + /* 282 */ "signed_literal ::= NULL", + /* 283 */ "signed_literal ::= literal_func", + /* 284 */ "literal_list ::= signed_literal", + /* 285 */ "literal_list ::= literal_list NK_COMMA signed_literal", + /* 286 */ "db_name ::= NK_ID", + /* 287 */ "table_name ::= NK_ID", + /* 288 */ "column_name ::= NK_ID", + /* 289 */ "function_name ::= NK_ID", + /* 290 */ "table_alias ::= NK_ID", + /* 291 */ "column_alias ::= NK_ID", + /* 292 */ "user_name ::= NK_ID", + /* 293 */ "index_name ::= NK_ID", + /* 294 */ "topic_name ::= NK_ID", + /* 295 */ "stream_name ::= NK_ID", + /* 296 */ "cgroup_name ::= NK_ID", + /* 297 */ "expression ::= literal", + /* 298 */ "expression ::= pseudo_column", + /* 299 */ "expression ::= column_reference", + /* 300 */ "expression ::= function_expression", + /* 301 */ "expression ::= subquery", + /* 302 */ "expression ::= NK_LP expression NK_RP", + /* 303 */ "expression ::= NK_PLUS expression", + /* 304 */ "expression ::= NK_MINUS expression", + /* 305 */ "expression ::= expression NK_PLUS expression", + /* 306 */ "expression ::= expression NK_MINUS expression", + /* 307 */ "expression ::= expression NK_STAR expression", + /* 308 */ "expression ::= expression NK_SLASH expression", + /* 309 */ "expression ::= expression NK_REM expression", + /* 310 */ "expression ::= column_reference NK_ARROW NK_STRING", + /* 311 */ "expression_list ::= expression", + /* 312 */ "expression_list ::= expression_list NK_COMMA expression", + /* 313 */ "column_reference ::= column_name", + /* 314 */ "column_reference ::= table_name NK_DOT column_name", + /* 315 */ "pseudo_column ::= ROWTS", + /* 316 */ "pseudo_column ::= TBNAME", + /* 317 */ "pseudo_column ::= table_name NK_DOT TBNAME", + /* 318 */ "pseudo_column ::= QSTARTTS", + /* 319 */ "pseudo_column ::= QENDTS", + /* 320 */ "pseudo_column ::= WSTARTTS", + /* 321 */ "pseudo_column ::= WENDTS", + /* 322 */ "pseudo_column ::= WDURATION", + /* 323 */ "function_expression ::= function_name NK_LP expression_list NK_RP", + /* 324 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", + /* 325 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP", + /* 326 */ "function_expression ::= literal_func", + /* 327 */ "literal_func ::= noarg_func NK_LP NK_RP", + /* 328 */ "literal_func ::= NOW", + /* 329 */ "noarg_func ::= NOW", + /* 330 */ "noarg_func ::= TODAY", + /* 331 */ "noarg_func ::= TIMEZONE", + /* 332 */ "star_func ::= COUNT", + /* 333 */ "star_func ::= FIRST", + /* 334 */ "star_func ::= LAST", + /* 335 */ "star_func ::= LAST_ROW", + /* 336 */ "star_func_para_list ::= NK_STAR", + /* 337 */ "star_func_para_list ::= other_para_list", + /* 338 */ "other_para_list ::= star_func_para", + /* 339 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", + /* 340 */ "star_func_para ::= expression", + /* 341 */ "star_func_para ::= table_name NK_DOT NK_STAR", + /* 342 */ "predicate ::= expression compare_op expression", + /* 343 */ "predicate ::= expression BETWEEN expression AND expression", + /* 344 */ "predicate ::= expression NOT BETWEEN expression AND expression", + /* 345 */ "predicate ::= expression IS NULL", + /* 346 */ "predicate ::= expression IS NOT NULL", + /* 347 */ "predicate ::= expression in_op in_predicate_value", + /* 348 */ "compare_op ::= NK_LT", + /* 349 */ "compare_op ::= NK_GT", + /* 350 */ "compare_op ::= NK_LE", + /* 351 */ "compare_op ::= NK_GE", + /* 352 */ "compare_op ::= NK_NE", + /* 353 */ "compare_op ::= NK_EQ", + /* 354 */ "compare_op ::= LIKE", + /* 355 */ "compare_op ::= NOT LIKE", + /* 356 */ "compare_op ::= MATCH", + /* 357 */ "compare_op ::= NMATCH", + /* 358 */ "compare_op ::= CONTAINS", + /* 359 */ "in_op ::= IN", + /* 360 */ "in_op ::= NOT IN", + /* 361 */ "in_predicate_value ::= NK_LP expression_list NK_RP", + /* 362 */ "boolean_value_expression ::= boolean_primary", + /* 363 */ "boolean_value_expression ::= NOT boolean_primary", + /* 364 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", + /* 365 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", + /* 366 */ "boolean_primary ::= predicate", + /* 367 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", + /* 368 */ "common_expression ::= expression", + /* 369 */ "common_expression ::= boolean_value_expression", + /* 370 */ "from_clause ::= FROM table_reference_list", + /* 371 */ "table_reference_list ::= table_reference", + /* 372 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", + /* 373 */ "table_reference ::= table_primary", + /* 374 */ "table_reference ::= joined_table", + /* 375 */ "table_primary ::= table_name alias_opt", + /* 376 */ "table_primary ::= db_name NK_DOT table_name alias_opt", + /* 377 */ "table_primary ::= subquery alias_opt", + /* 378 */ "table_primary ::= parenthesized_joined_table", + /* 379 */ "alias_opt ::=", + /* 380 */ "alias_opt ::= table_alias", + /* 381 */ "alias_opt ::= AS table_alias", + /* 382 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", + /* 383 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", + /* 384 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition", + /* 385 */ "join_type ::=", + /* 386 */ "join_type ::= INNER", + /* 387 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt", + /* 388 */ "set_quantifier_opt ::=", + /* 389 */ "set_quantifier_opt ::= DISTINCT", + /* 390 */ "set_quantifier_opt ::= ALL", + /* 391 */ "select_list ::= NK_STAR", + /* 392 */ "select_list ::= select_sublist", + /* 393 */ "select_sublist ::= select_item", + /* 394 */ "select_sublist ::= select_sublist NK_COMMA select_item", + /* 395 */ "select_item ::= common_expression", + /* 396 */ "select_item ::= common_expression column_alias", + /* 397 */ "select_item ::= common_expression AS column_alias", + /* 398 */ "select_item ::= table_name NK_DOT NK_STAR", + /* 399 */ "where_clause_opt ::=", + /* 400 */ "where_clause_opt ::= WHERE search_condition", + /* 401 */ "partition_by_clause_opt ::=", + /* 402 */ "partition_by_clause_opt ::= PARTITION BY expression_list", + /* 403 */ "twindow_clause_opt ::=", + /* 404 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP", + /* 405 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP", + /* 406 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt", + /* 407 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt", + /* 408 */ "sliding_opt ::=", + /* 409 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP", + /* 410 */ "fill_opt ::=", + /* 411 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", + /* 412 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP", + /* 413 */ "fill_mode ::= NONE", + /* 414 */ "fill_mode ::= PREV", + /* 415 */ "fill_mode ::= NULL", + /* 416 */ "fill_mode ::= LINEAR", + /* 417 */ "fill_mode ::= NEXT", + /* 418 */ "group_by_clause_opt ::=", + /* 419 */ "group_by_clause_opt ::= GROUP BY group_by_list", + /* 420 */ "group_by_list ::= expression", + /* 421 */ "group_by_list ::= group_by_list NK_COMMA expression", + /* 422 */ "having_clause_opt ::=", + /* 423 */ "having_clause_opt ::= HAVING search_condition", + /* 424 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt", + /* 425 */ "query_expression_body ::= query_primary", + /* 426 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body", + /* 427 */ "query_expression_body ::= query_expression_body UNION query_expression_body", + /* 428 */ "query_primary ::= query_specification", + /* 429 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP", + /* 430 */ "order_by_clause_opt ::=", + /* 431 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", + /* 432 */ "slimit_clause_opt ::=", + /* 433 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", + /* 434 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", + /* 435 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 436 */ "limit_clause_opt ::=", + /* 437 */ "limit_clause_opt ::= LIMIT NK_INTEGER", + /* 438 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", + /* 439 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 440 */ "subquery ::= NK_LP query_expression NK_RP", + /* 441 */ "search_condition ::= common_expression", + /* 442 */ "sort_specification_list ::= sort_specification", + /* 443 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", + /* 444 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt", + /* 445 */ "ordering_specification_opt ::=", + /* 446 */ "ordering_specification_opt ::= ASC", + /* 447 */ "ordering_specification_opt ::= DESC", + /* 448 */ "null_ordering_opt ::=", + /* 449 */ "null_ordering_opt ::= NULLS FIRST", + /* 450 */ "null_ordering_opt ::= NULLS LAST", }; #endif /* NDEBUG */ @@ -2104,175 +2116,174 @@ static void yy_destructor( */ /********* Begin destructor definitions ***************************************/ /* Default NON-TERMINAL Destructor */ - case 240: /* cmd */ - case 243: /* literal */ - case 254: /* db_options */ - case 256: /* alter_db_options */ - case 261: /* retention */ - case 262: /* full_table_name */ - case 265: /* table_options */ - case 269: /* alter_table_clause */ - case 270: /* alter_table_options */ - case 273: /* signed_literal */ - case 274: /* create_subtable_clause */ - case 277: /* drop_table_clause */ - case 280: /* column_def */ - case 283: /* col_name */ - case 284: /* db_name_cond_opt */ - case 285: /* like_pattern_opt */ - case 286: /* table_name_cond */ - case 287: /* from_db_opt */ - case 288: /* func_name */ - case 291: /* index_options */ - case 293: /* duration_literal */ - case 294: /* sliding_opt */ - case 295: /* func */ - case 298: /* topic_options */ - case 299: /* query_expression */ - case 302: /* explain_options */ - case 306: /* stream_options */ - case 307: /* into_opt */ - case 309: /* signed */ - case 310: /* literal_func */ - case 313: /* expression */ - case 314: /* pseudo_column */ - case 315: /* column_reference */ - case 316: /* function_expression */ - case 317: /* subquery */ - case 322: /* star_func_para */ - case 323: /* predicate */ - case 326: /* in_predicate_value */ - case 327: /* boolean_value_expression */ - case 328: /* boolean_primary */ - case 329: /* common_expression */ - case 330: /* from_clause */ - case 331: /* table_reference_list */ - case 332: /* table_reference */ - case 333: /* table_primary */ - case 334: /* joined_table */ - case 336: /* parenthesized_joined_table */ - case 338: /* search_condition */ - case 339: /* query_specification */ - case 342: /* where_clause_opt */ - case 344: /* twindow_clause_opt */ - case 346: /* having_clause_opt */ - case 348: /* select_item */ - case 349: /* fill_opt */ - case 352: /* query_expression_body */ - case 354: /* slimit_clause_opt */ - case 355: /* limit_clause_opt */ - case 356: /* query_primary */ - case 358: /* sort_specification */ + case 237: /* cmd */ + case 240: /* literal */ + case 251: /* db_options */ + case 253: /* alter_db_options */ + case 258: /* retention */ + case 259: /* full_table_name */ + case 262: /* table_options */ + case 266: /* alter_table_clause */ + case 267: /* alter_table_options */ + case 270: /* signed_literal */ + case 271: /* create_subtable_clause */ + case 274: /* drop_table_clause */ + case 277: /* column_def */ + case 280: /* col_name */ + case 281: /* db_name_cond_opt */ + case 282: /* like_pattern_opt */ + case 283: /* table_name_cond */ + case 284: /* from_db_opt */ + case 285: /* func_name */ + case 288: /* index_options */ + case 290: /* duration_literal */ + case 291: /* sliding_opt */ + case 292: /* func */ + case 295: /* query_expression */ + case 298: /* explain_options */ + case 302: /* stream_options */ + case 303: /* into_opt */ + case 305: /* signed */ + case 306: /* literal_func */ + case 309: /* expression */ + case 310: /* pseudo_column */ + case 311: /* column_reference */ + case 312: /* function_expression */ + case 313: /* subquery */ + case 318: /* star_func_para */ + case 319: /* predicate */ + case 322: /* in_predicate_value */ + case 323: /* boolean_value_expression */ + case 324: /* boolean_primary */ + case 325: /* common_expression */ + case 326: /* from_clause */ + case 327: /* table_reference_list */ + case 328: /* table_reference */ + case 329: /* table_primary */ + case 330: /* joined_table */ + case 332: /* parenthesized_joined_table */ + case 334: /* search_condition */ + case 335: /* query_specification */ + case 338: /* where_clause_opt */ + case 340: /* twindow_clause_opt */ + case 342: /* having_clause_opt */ + case 344: /* select_item */ + case 345: /* fill_opt */ + case 348: /* query_expression_body */ + case 350: /* slimit_clause_opt */ + case 351: /* limit_clause_opt */ + case 352: /* query_primary */ + case 354: /* sort_specification */ { - nodesDestroyNode((yypminor->yy636)); + nodesDestroyNode((yypminor->yy686)); } break; - case 241: /* account_options */ - case 242: /* alter_account_options */ - case 244: /* alter_account_option */ - case 304: /* bufsize_opt */ + case 238: /* account_options */ + case 239: /* alter_account_options */ + case 241: /* alter_account_option */ + case 300: /* bufsize_opt */ { } break; - case 245: /* user_name */ - case 247: /* priv_level */ - case 250: /* db_name */ - case 251: /* dnode_endpoint */ - case 252: /* dnode_host_name */ - case 271: /* column_name */ - case 279: /* table_name */ - case 289: /* function_name */ - case 290: /* index_name */ - case 297: /* topic_name */ - case 300: /* cgroup_name */ - case 305: /* stream_name */ - case 311: /* table_alias */ - case 312: /* column_alias */ - case 318: /* star_func */ - case 320: /* noarg_func */ - case 335: /* alias_opt */ + case 242: /* user_name */ + case 244: /* priv_level */ + case 247: /* db_name */ + case 248: /* dnode_endpoint */ + case 249: /* dnode_host_name */ + case 268: /* column_name */ + case 276: /* table_name */ + case 286: /* function_name */ + case 287: /* index_name */ + case 294: /* topic_name */ + case 296: /* cgroup_name */ + case 301: /* stream_name */ + case 307: /* table_alias */ + case 308: /* column_alias */ + case 314: /* star_func */ + case 316: /* noarg_func */ + case 331: /* alias_opt */ { } break; - case 246: /* privileges */ - case 248: /* priv_type_list */ - case 249: /* priv_type */ + case 243: /* privileges */ + case 245: /* priv_type_list */ + case 246: /* priv_type */ { } break; - case 253: /* not_exists_opt */ - case 255: /* exists_opt */ - case 301: /* analyze_opt */ - case 303: /* agg_func_opt */ - case 340: /* set_quantifier_opt */ + case 250: /* not_exists_opt */ + case 252: /* exists_opt */ + case 297: /* analyze_opt */ + case 299: /* agg_func_opt */ + case 336: /* set_quantifier_opt */ { } break; - case 257: /* integer_list */ - case 258: /* variable_list */ - case 259: /* retention_list */ - case 263: /* column_def_list */ - case 264: /* tags_def_opt */ - case 266: /* multi_create_clause */ - case 267: /* tags_def */ - case 268: /* multi_drop_clause */ - case 275: /* specific_tags_opt */ - case 276: /* literal_list */ - case 278: /* col_name_list */ - case 281: /* func_name_list */ - case 292: /* func_list */ - case 296: /* expression_list */ - case 308: /* dnode_list */ - case 319: /* star_func_para_list */ - case 321: /* other_para_list */ - case 341: /* select_list */ - case 343: /* partition_by_clause_opt */ - case 345: /* group_by_clause_opt */ - case 347: /* select_sublist */ - case 351: /* group_by_list */ - case 353: /* order_by_clause_opt */ - case 357: /* sort_specification_list */ + case 254: /* integer_list */ + case 255: /* variable_list */ + case 256: /* retention_list */ + case 260: /* column_def_list */ + case 261: /* tags_def_opt */ + case 263: /* multi_create_clause */ + case 264: /* tags_def */ + case 265: /* multi_drop_clause */ + case 272: /* specific_tags_opt */ + case 273: /* literal_list */ + case 275: /* col_name_list */ + case 278: /* func_name_list */ + case 289: /* func_list */ + case 293: /* expression_list */ + case 304: /* dnode_list */ + case 315: /* star_func_para_list */ + case 317: /* other_para_list */ + case 337: /* select_list */ + case 339: /* partition_by_clause_opt */ + case 341: /* group_by_clause_opt */ + case 343: /* select_sublist */ + case 347: /* group_by_list */ + case 349: /* order_by_clause_opt */ + case 353: /* sort_specification_list */ { - nodesDestroyList((yypminor->yy236)); + nodesDestroyList((yypminor->yy670)); } break; - case 260: /* alter_db_option */ - case 282: /* alter_table_option */ + case 257: /* alter_db_option */ + case 279: /* alter_table_option */ { } break; - case 272: /* type_name */ + case 269: /* type_name */ { } break; - case 324: /* compare_op */ - case 325: /* in_op */ + case 320: /* compare_op */ + case 321: /* in_op */ { } break; - case 337: /* join_type */ + case 333: /* join_type */ { } break; - case 350: /* fill_mode */ + case 346: /* fill_mode */ { } break; - case 359: /* ordering_specification_opt */ + case 355: /* ordering_specification_opt */ { } break; - case 360: /* null_ordering_opt */ + case 356: /* null_ordering_opt */ { } @@ -2400,15 +2411,18 @@ static YYACTIONTYPE yy_find_shift_action( do{ i = yy_shift_ofst[stateno]; assert( i>=0 ); - /* assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD ); */ + assert( i<=YY_ACTTAB_COUNT ); + assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD ); assert( iLookAhead!=YYNOCODE ); assert( iLookAhead < YYNTOKEN ); i += iLookAhead; - if( i>=YY_NLOOKAHEAD || yy_lookahead[i]!=iLookAhead ){ + assert( i<(int)YY_NLOOKAHEAD ); + if( yy_lookahead[i]!=iLookAhead ){ #ifdef YYFALLBACK YYCODETYPE iFallback; /* Fallback token */ - if( iLookAhead %s\n", @@ -2423,16 +2437,8 @@ static YYACTIONTYPE yy_find_shift_action( #ifdef YYWILDCARD { int j = i - iLookAhead + YYWILDCARD; - if( -#if YY_SHIFT_MIN+YYWILDCARD<0 - j>=0 && -#endif -#if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT - j0 - ){ + assert( j<(int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])) ); + if( yy_lookahead[j]==YYWILDCARD && iLookAhead>0 ){ #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n", @@ -2446,6 +2452,7 @@ static YYACTIONTYPE yy_find_shift_action( #endif /* YYWILDCARD */ return yy_default[stateno]; }else{ + assert( i>=0 && iyytos; #ifndef NDEBUG if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ - yysize = yyRuleInfo[yyruleno].nrhs; + yysize = yyRuleInfoNRhs[yyruleno]; if( yysize ){ - fprintf(yyTraceFILE, "%sReduce %d [%s], go to state %d.\n", + fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", yyTracePrompt, - yyruleno, yyRuleName[yyruleno], yymsp[yysize].stateno); + yyruleno, yyRuleName[yyruleno], + yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){ yypParser->yyhwm++; @@ -3114,11 +3572,11 @@ static YYACTIONTYPE yy_reduce( YYMINORTYPE yylhsminor; case 0: /* cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */ { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } - yy_destructor(yypParser,241,&yymsp[0].minor); + yy_destructor(yypParser,238,&yymsp[0].minor); break; case 1: /* cmd ::= ALTER ACCOUNT NK_ID alter_account_options */ { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } - yy_destructor(yypParser,242,&yymsp[0].minor); + yy_destructor(yypParser,239,&yymsp[0].minor); break; case 2: /* account_options ::= */ { } @@ -3132,20 +3590,20 @@ static YYACTIONTYPE yy_reduce( case 9: /* account_options ::= account_options USERS literal */ yytestcase(yyruleno==9); case 10: /* account_options ::= account_options CONNS literal */ yytestcase(yyruleno==10); case 11: /* account_options ::= account_options STATE literal */ yytestcase(yyruleno==11); -{ yy_destructor(yypParser,241,&yymsp[-2].minor); +{ yy_destructor(yypParser,238,&yymsp[-2].minor); { } - yy_destructor(yypParser,243,&yymsp[0].minor); + yy_destructor(yypParser,240,&yymsp[0].minor); } break; case 12: /* alter_account_options ::= alter_account_option */ -{ yy_destructor(yypParser,244,&yymsp[0].minor); +{ yy_destructor(yypParser,241,&yymsp[0].minor); { } } break; case 13: /* alter_account_options ::= alter_account_options alter_account_option */ -{ yy_destructor(yypParser,242,&yymsp[-1].minor); +{ yy_destructor(yypParser,239,&yymsp[-1].minor); { } - yy_destructor(yypParser,244,&yymsp[0].minor); + yy_destructor(yypParser,241,&yymsp[0].minor); } break; case 14: /* alter_account_option ::= PASS literal */ @@ -3159,63 +3617,63 @@ static YYACTIONTYPE yy_reduce( case 22: /* alter_account_option ::= CONNS literal */ yytestcase(yyruleno==22); case 23: /* alter_account_option ::= STATE literal */ yytestcase(yyruleno==23); { } - yy_destructor(yypParser,243,&yymsp[0].minor); + yy_destructor(yypParser,240,&yymsp[0].minor); break; case 24: /* cmd ::= CREATE USER user_name PASS NK_STRING */ -{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0); } break; case 25: /* cmd ::= ALTER USER user_name PASS NK_STRING */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy53, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy113, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); } break; case 26: /* cmd ::= ALTER USER user_name PRIVILEGE NK_STRING */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy53, TSDB_ALTER_USER_PRIVILEGES, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy113, TSDB_ALTER_USER_PRIVILEGES, &yymsp[0].minor.yy0); } break; case 27: /* cmd ::= DROP USER user_name */ -{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy53); } +{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy113); } break; case 28: /* cmd ::= GRANT privileges ON priv_level TO user_name */ -{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy435, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53); } +{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy123, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113); } break; case 29: /* cmd ::= REVOKE privileges ON priv_level FROM user_name */ -{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy435, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53); } +{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy123, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113); } break; case 30: /* privileges ::= ALL */ -{ yymsp[0].minor.yy435 = PRIVILEGE_TYPE_ALL; } +{ yymsp[0].minor.yy123 = PRIVILEGE_TYPE_ALL; } break; case 31: /* privileges ::= priv_type_list */ case 32: /* priv_type_list ::= priv_type */ yytestcase(yyruleno==32); -{ yylhsminor.yy435 = yymsp[0].minor.yy435; } - yymsp[0].minor.yy435 = yylhsminor.yy435; +{ yylhsminor.yy123 = yymsp[0].minor.yy123; } + yymsp[0].minor.yy123 = yylhsminor.yy123; break; case 33: /* priv_type_list ::= priv_type_list NK_COMMA priv_type */ -{ yylhsminor.yy435 = yymsp[-2].minor.yy435 | yymsp[0].minor.yy435; } - yymsp[-2].minor.yy435 = yylhsminor.yy435; +{ yylhsminor.yy123 = yymsp[-2].minor.yy123 | yymsp[0].minor.yy123; } + yymsp[-2].minor.yy123 = yylhsminor.yy123; break; case 34: /* priv_type ::= READ */ -{ yymsp[0].minor.yy435 = PRIVILEGE_TYPE_READ; } +{ yymsp[0].minor.yy123 = PRIVILEGE_TYPE_READ; } break; case 35: /* priv_type ::= WRITE */ -{ yymsp[0].minor.yy435 = PRIVILEGE_TYPE_WRITE; } +{ yymsp[0].minor.yy123 = PRIVILEGE_TYPE_WRITE; } break; case 36: /* priv_level ::= NK_STAR NK_DOT NK_STAR */ -{ yylhsminor.yy53 = yymsp[-2].minor.yy0; } - yymsp[-2].minor.yy53 = yylhsminor.yy53; +{ yylhsminor.yy113 = yymsp[-2].minor.yy0; } + yymsp[-2].minor.yy113 = yylhsminor.yy113; break; case 37: /* priv_level ::= db_name NK_DOT NK_STAR */ -{ yylhsminor.yy53 = yymsp[-2].minor.yy53; } - yymsp[-2].minor.yy53 = yylhsminor.yy53; +{ yylhsminor.yy113 = yymsp[-2].minor.yy113; } + yymsp[-2].minor.yy113 = yylhsminor.yy113; break; case 38: /* cmd ::= CREATE DNODE dnode_endpoint */ -{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy53, NULL); } +{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy113, NULL); } break; case 39: /* cmd ::= CREATE DNODE dnode_host_name PORT NK_INTEGER */ -{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0); } break; case 40: /* cmd ::= DROP DNODE NK_INTEGER */ { pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy0); } break; case 41: /* cmd ::= DROP DNODE dnode_endpoint */ -{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy53); } +{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy113); } break; case 42: /* cmd ::= ALTER DNODE NK_INTEGER NK_STRING */ { pCxt->pRootNode = createAlterDnodeStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, NULL); } @@ -3232,26 +3690,26 @@ static YYACTIONTYPE yy_reduce( case 46: /* dnode_endpoint ::= NK_STRING */ case 47: /* dnode_host_name ::= NK_ID */ yytestcase(yyruleno==47); case 48: /* dnode_host_name ::= NK_IPTOKEN */ yytestcase(yyruleno==48); - case 290: /* db_name ::= NK_ID */ yytestcase(yyruleno==290); - case 291: /* table_name ::= NK_ID */ yytestcase(yyruleno==291); - case 292: /* column_name ::= NK_ID */ yytestcase(yyruleno==292); - case 293: /* function_name ::= NK_ID */ yytestcase(yyruleno==293); - case 294: /* table_alias ::= NK_ID */ yytestcase(yyruleno==294); - case 295: /* column_alias ::= NK_ID */ yytestcase(yyruleno==295); - case 296: /* user_name ::= NK_ID */ yytestcase(yyruleno==296); - case 297: /* index_name ::= NK_ID */ yytestcase(yyruleno==297); - case 298: /* topic_name ::= NK_ID */ yytestcase(yyruleno==298); - case 299: /* stream_name ::= NK_ID */ yytestcase(yyruleno==299); - case 300: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==300); - case 333: /* noarg_func ::= NOW */ yytestcase(yyruleno==333); - case 334: /* noarg_func ::= TODAY */ yytestcase(yyruleno==334); - case 335: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==335); - case 336: /* star_func ::= COUNT */ yytestcase(yyruleno==336); - case 337: /* star_func ::= FIRST */ yytestcase(yyruleno==337); - case 338: /* star_func ::= LAST */ yytestcase(yyruleno==338); - case 339: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==339); -{ yylhsminor.yy53 = yymsp[0].minor.yy0; } - yymsp[0].minor.yy53 = yylhsminor.yy53; + case 286: /* db_name ::= NK_ID */ yytestcase(yyruleno==286); + case 287: /* table_name ::= NK_ID */ yytestcase(yyruleno==287); + case 288: /* column_name ::= NK_ID */ yytestcase(yyruleno==288); + case 289: /* function_name ::= NK_ID */ yytestcase(yyruleno==289); + case 290: /* table_alias ::= NK_ID */ yytestcase(yyruleno==290); + case 291: /* column_alias ::= NK_ID */ yytestcase(yyruleno==291); + case 292: /* user_name ::= NK_ID */ yytestcase(yyruleno==292); + case 293: /* index_name ::= NK_ID */ yytestcase(yyruleno==293); + case 294: /* topic_name ::= NK_ID */ yytestcase(yyruleno==294); + case 295: /* stream_name ::= NK_ID */ yytestcase(yyruleno==295); + case 296: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==296); + case 329: /* noarg_func ::= NOW */ yytestcase(yyruleno==329); + case 330: /* noarg_func ::= TODAY */ yytestcase(yyruleno==330); + case 331: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==331); + case 332: /* star_func ::= COUNT */ yytestcase(yyruleno==332); + case 333: /* star_func ::= FIRST */ yytestcase(yyruleno==333); + case 334: /* star_func ::= LAST */ yytestcase(yyruleno==334); + case 335: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==335); +{ yylhsminor.yy113 = yymsp[0].minor.yy0; } + yymsp[0].minor.yy113 = yylhsminor.yy113; break; case 49: /* cmd ::= ALTER LOCAL NK_STRING */ { pCxt->pRootNode = createAlterLocalStmt(pCxt, &yymsp[0].minor.yy0, NULL); } @@ -3284,1169 +3742,1153 @@ static YYACTIONTYPE yy_reduce( { pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_MNODE_STMT, &yymsp[0].minor.yy0); } break; case 59: /* cmd ::= CREATE DATABASE not_exists_opt db_name db_options */ -{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy603, &yymsp[-1].minor.yy53, yymsp[0].minor.yy636); } +{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy131, &yymsp[-1].minor.yy113, yymsp[0].minor.yy686); } break; case 60: /* cmd ::= DROP DATABASE exists_opt db_name */ -{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy603, &yymsp[0].minor.yy53); } +{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy131, &yymsp[0].minor.yy113); } break; case 61: /* cmd ::= USE db_name */ -{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy53); } +{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy113); } break; case 62: /* cmd ::= ALTER DATABASE db_name alter_db_options */ -{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy53, yymsp[0].minor.yy636); } +{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy113, yymsp[0].minor.yy686); } break; case 63: /* not_exists_opt ::= IF NOT EXISTS */ -{ yymsp[-2].minor.yy603 = true; } +{ yymsp[-2].minor.yy131 = true; } break; case 64: /* not_exists_opt ::= */ case 66: /* exists_opt ::= */ yytestcase(yyruleno==66); - case 236: /* analyze_opt ::= */ yytestcase(yyruleno==236); - case 244: /* agg_func_opt ::= */ yytestcase(yyruleno==244); - case 392: /* set_quantifier_opt ::= */ yytestcase(yyruleno==392); -{ yymsp[1].minor.yy603 = false; } + case 232: /* analyze_opt ::= */ yytestcase(yyruleno==232); + case 240: /* agg_func_opt ::= */ yytestcase(yyruleno==240); + case 388: /* set_quantifier_opt ::= */ yytestcase(yyruleno==388); +{ yymsp[1].minor.yy131 = false; } break; case 65: /* exists_opt ::= IF EXISTS */ -{ yymsp[-1].minor.yy603 = true; } +{ yymsp[-1].minor.yy131 = true; } break; case 67: /* db_options ::= */ -{ yymsp[1].minor.yy636 = createDefaultDatabaseOptions(pCxt); } +{ yymsp[1].minor.yy686 = createDefaultDatabaseOptions(pCxt); } break; case 68: /* db_options ::= db_options BUFFER NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 69: /* db_options ::= db_options CACHELAST NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_CACHELAST, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_CACHELAST, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 70: /* db_options ::= db_options COMP NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_COMP, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_COMP, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 71: /* db_options ::= db_options DAYS NK_INTEGER */ case 72: /* db_options ::= db_options DAYS NK_VARIABLE */ yytestcase(yyruleno==72); -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_DAYS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_DAYS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 73: /* db_options ::= db_options FSYNC NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 74: /* db_options ::= db_options MAXROWS NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 75: /* db_options ::= db_options MINROWS NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 76: /* db_options ::= db_options KEEP integer_list */ case 77: /* db_options ::= db_options KEEP variable_list */ yytestcase(yyruleno==77); -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_KEEP, yymsp[0].minor.yy236); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_KEEP, yymsp[0].minor.yy670); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 78: /* db_options ::= db_options PAGES NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_PAGES, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_PAGES, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 79: /* db_options ::= db_options PAGESIZE NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 80: /* db_options ::= db_options PRECISION NK_STRING */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 81: /* db_options ::= db_options REPLICA NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 82: /* db_options ::= db_options STRICT NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_STRICT, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_STRICT, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 83: /* db_options ::= db_options WAL NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_WAL, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_WAL, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 84: /* db_options ::= db_options VGROUPS NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 85: /* db_options ::= db_options SINGLE_STABLE NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 86: /* db_options ::= db_options RETENTIONS retention_list */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_RETENTIONS, yymsp[0].minor.yy236); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_RETENTIONS, yymsp[0].minor.yy670); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 87: /* db_options ::= db_options SCHEMALESS NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 88: /* alter_db_options ::= alter_db_option */ -{ yylhsminor.yy636 = createAlterDatabaseOptions(pCxt); yylhsminor.yy636 = setAlterDatabaseOption(pCxt, yylhsminor.yy636, &yymsp[0].minor.yy25); } - yymsp[0].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterDatabaseOptions(pCxt); yylhsminor.yy686 = setAlterDatabaseOption(pCxt, yylhsminor.yy686, &yymsp[0].minor.yy53); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; case 89: /* alter_db_options ::= alter_db_options alter_db_option */ -{ yylhsminor.yy636 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy636, &yymsp[0].minor.yy25); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy686, &yymsp[0].minor.yy53); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; case 90: /* alter_db_option ::= BUFFER NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_BUFFER; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_BUFFER; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 91: /* alter_db_option ::= CACHELAST NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_CACHELAST; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_CACHELAST; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 92: /* alter_db_option ::= FSYNC NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 93: /* alter_db_option ::= KEEP integer_list */ case 94: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==94); -{ yymsp[-1].minor.yy25.type = DB_OPTION_KEEP; yymsp[-1].minor.yy25.pList = yymsp[0].minor.yy236; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_KEEP; yymsp[-1].minor.yy53.pList = yymsp[0].minor.yy670; } break; case 95: /* alter_db_option ::= PAGES NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_PAGES; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_PAGES; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 96: /* alter_db_option ::= REPLICA NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_REPLICA; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_REPLICA; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 97: /* alter_db_option ::= STRICT NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_STRICT; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_STRICT; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 98: /* alter_db_option ::= WAL NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_WAL; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_WAL; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 99: /* integer_list ::= NK_INTEGER */ -{ yylhsminor.yy236 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy236 = yylhsminor.yy236; +{ yylhsminor.yy670 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy670 = yylhsminor.yy670; break; case 100: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */ - case 263: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==263); -{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy236 = yylhsminor.yy236; + case 259: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==259); +{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy670 = yylhsminor.yy670; break; case 101: /* variable_list ::= NK_VARIABLE */ -{ yylhsminor.yy236 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy236 = yylhsminor.yy236; +{ yylhsminor.yy670 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy670 = yylhsminor.yy670; break; case 102: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */ -{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy236 = yylhsminor.yy236; +{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy670 = yylhsminor.yy670; break; case 103: /* retention_list ::= retention */ case 123: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==123); case 126: /* multi_drop_clause ::= drop_table_clause */ yytestcase(yyruleno==126); case 133: /* column_def_list ::= column_def */ yytestcase(yyruleno==133); - case 174: /* col_name_list ::= col_name */ yytestcase(yyruleno==174); - case 212: /* func_name_list ::= func_name */ yytestcase(yyruleno==212); - case 221: /* func_list ::= func */ yytestcase(yyruleno==221); - case 288: /* literal_list ::= signed_literal */ yytestcase(yyruleno==288); - case 342: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==342); - case 397: /* select_sublist ::= select_item */ yytestcase(yyruleno==397); - case 446: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==446); -{ yylhsminor.yy236 = createNodeList(pCxt, yymsp[0].minor.yy636); } - yymsp[0].minor.yy236 = yylhsminor.yy236; + case 173: /* col_name_list ::= col_name */ yytestcase(yyruleno==173); + case 211: /* func_name_list ::= func_name */ yytestcase(yyruleno==211); + case 220: /* func_list ::= func */ yytestcase(yyruleno==220); + case 284: /* literal_list ::= signed_literal */ yytestcase(yyruleno==284); + case 338: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==338); + case 393: /* select_sublist ::= select_item */ yytestcase(yyruleno==393); + case 442: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==442); +{ yylhsminor.yy670 = createNodeList(pCxt, yymsp[0].minor.yy686); } + yymsp[0].minor.yy670 = yylhsminor.yy670; break; case 104: /* retention_list ::= retention_list NK_COMMA retention */ case 134: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==134); - case 175: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==175); - case 213: /* func_name_list ::= func_name_list NK_COMMA func_name */ yytestcase(yyruleno==213); - case 222: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==222); - case 289: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==289); - case 343: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==343); - case 398: /* select_sublist ::= select_sublist NK_COMMA select_item */ yytestcase(yyruleno==398); - case 447: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==447); -{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, yymsp[0].minor.yy636); } - yymsp[-2].minor.yy236 = yylhsminor.yy236; + case 174: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==174); + case 212: /* func_name_list ::= func_name_list NK_COMMA func_name */ yytestcase(yyruleno==212); + case 221: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==221); + case 285: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==285); + case 339: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==339); + case 394: /* select_sublist ::= select_sublist NK_COMMA select_item */ yytestcase(yyruleno==394); + case 443: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==443); +{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, yymsp[0].minor.yy686); } + yymsp[-2].minor.yy670 = yylhsminor.yy670; break; case 105: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ -{ yylhsminor.yy636 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 106: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ case 108: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==108); -{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy603, yymsp[-5].minor.yy636, yymsp[-3].minor.yy236, yymsp[-1].minor.yy236, yymsp[0].minor.yy636); } +{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy131, yymsp[-5].minor.yy686, yymsp[-3].minor.yy670, yymsp[-1].minor.yy670, yymsp[0].minor.yy686); } break; case 107: /* cmd ::= CREATE TABLE multi_create_clause */ -{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy236); } +{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy670); } break; case 109: /* cmd ::= DROP TABLE multi_drop_clause */ -{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy236); } +{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy670); } break; case 110: /* cmd ::= DROP STABLE exists_opt full_table_name */ -{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy603, yymsp[0].minor.yy636); } +{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy131, yymsp[0].minor.yy686); } break; case 111: /* cmd ::= ALTER TABLE alter_table_clause */ case 112: /* cmd ::= ALTER STABLE alter_table_clause */ yytestcase(yyruleno==112); - case 265: /* cmd ::= query_expression */ yytestcase(yyruleno==265); -{ pCxt->pRootNode = yymsp[0].minor.yy636; } + case 261: /* cmd ::= query_expression */ yytestcase(yyruleno==261); +{ pCxt->pRootNode = yymsp[0].minor.yy686; } break; case 113: /* alter_table_clause ::= full_table_name alter_table_options */ -{ yylhsminor.yy636 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy686, yymsp[0].minor.yy686); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; case 114: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */ -{ yylhsminor.yy636 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; case 115: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */ -{ yylhsminor.yy636 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy636, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy53); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy686, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy113); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; case 116: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ -{ yylhsminor.yy636 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; case 117: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ -{ yylhsminor.yy636 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy53, &yymsp[0].minor.yy53); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy113, &yymsp[0].minor.yy113); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; case 118: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */ -{ yylhsminor.yy636 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; case 119: /* alter_table_clause ::= full_table_name DROP TAG column_name */ -{ yylhsminor.yy636 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy636, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy53); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy686, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy113); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; case 120: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ -{ yylhsminor.yy636 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; case 121: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ -{ yylhsminor.yy636 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy53, &yymsp[0].minor.yy53); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy113, &yymsp[0].minor.yy113); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; case 122: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */ -{ yylhsminor.yy636 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy636, &yymsp[-2].minor.yy53, yymsp[0].minor.yy636); } - yymsp[-5].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy686, &yymsp[-2].minor.yy113, yymsp[0].minor.yy686); } + yymsp[-5].minor.yy686 = yylhsminor.yy686; break; case 124: /* multi_create_clause ::= multi_create_clause create_subtable_clause */ case 127: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==127); -{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-1].minor.yy236, yymsp[0].minor.yy636); } - yymsp[-1].minor.yy236 = yylhsminor.yy236; +{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-1].minor.yy670, yymsp[0].minor.yy686); } + yymsp[-1].minor.yy670 = yylhsminor.yy670; break; case 125: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options */ -{ yylhsminor.yy636 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy603, yymsp[-8].minor.yy636, yymsp[-6].minor.yy636, yymsp[-5].minor.yy236, yymsp[-2].minor.yy236, yymsp[0].minor.yy636); } - yymsp[-9].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy131, yymsp[-8].minor.yy686, yymsp[-6].minor.yy686, yymsp[-5].minor.yy670, yymsp[-2].minor.yy670, yymsp[0].minor.yy686); } + yymsp[-9].minor.yy686 = yylhsminor.yy686; break; case 128: /* drop_table_clause ::= exists_opt full_table_name */ -{ yylhsminor.yy636 = createDropTableClause(pCxt, yymsp[-1].minor.yy603, yymsp[0].minor.yy636); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createDropTableClause(pCxt, yymsp[-1].minor.yy131, yymsp[0].minor.yy686); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; case 129: /* specific_tags_opt ::= */ case 160: /* tags_def_opt ::= */ yytestcase(yyruleno==160); - case 405: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==405); - case 422: /* group_by_clause_opt ::= */ yytestcase(yyruleno==422); - case 434: /* order_by_clause_opt ::= */ yytestcase(yyruleno==434); -{ yymsp[1].minor.yy236 = NULL; } + case 401: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==401); + case 418: /* group_by_clause_opt ::= */ yytestcase(yyruleno==418); + case 430: /* order_by_clause_opt ::= */ yytestcase(yyruleno==430); +{ yymsp[1].minor.yy670 = NULL; } break; case 130: /* specific_tags_opt ::= NK_LP col_name_list NK_RP */ -{ yymsp[-2].minor.yy236 = yymsp[-1].minor.yy236; } +{ yymsp[-2].minor.yy670 = yymsp[-1].minor.yy670; } break; case 131: /* full_table_name ::= table_name */ -{ yylhsminor.yy636 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy53, NULL); } - yymsp[0].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy113, NULL); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; case 132: /* full_table_name ::= db_name NK_DOT table_name */ -{ yylhsminor.yy636 = createRealTableNode(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53, NULL); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createRealTableNode(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113, NULL); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 135: /* column_def ::= column_name type_name */ -{ yylhsminor.yy636 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450, NULL); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490, NULL); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; case 136: /* column_def ::= column_name type_name COMMENT NK_STRING */ -{ yylhsminor.yy636 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy53, yymsp[-2].minor.yy450, &yymsp[0].minor.yy0); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy113, yymsp[-2].minor.yy490, &yymsp[0].minor.yy0); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; case 137: /* type_name ::= BOOL */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_BOOL); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_BOOL); } break; case 138: /* type_name ::= TINYINT */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_TINYINT); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_TINYINT); } break; case 139: /* type_name ::= SMALLINT */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_SMALLINT); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_SMALLINT); } break; case 140: /* type_name ::= INT */ case 141: /* type_name ::= INTEGER */ yytestcase(yyruleno==141); -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_INT); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_INT); } break; case 142: /* type_name ::= BIGINT */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_BIGINT); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_BIGINT); } break; case 143: /* type_name ::= FLOAT */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_FLOAT); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_FLOAT); } break; case 144: /* type_name ::= DOUBLE */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_DOUBLE); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_DOUBLE); } break; case 145: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy450 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); } +{ yymsp[-3].minor.yy490 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); } break; case 146: /* type_name ::= TIMESTAMP */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); } break; case 147: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy450 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); } +{ yymsp[-3].minor.yy490 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); } break; case 148: /* type_name ::= TINYINT UNSIGNED */ -{ yymsp[-1].minor.yy450 = createDataType(TSDB_DATA_TYPE_UTINYINT); } +{ yymsp[-1].minor.yy490 = createDataType(TSDB_DATA_TYPE_UTINYINT); } break; case 149: /* type_name ::= SMALLINT UNSIGNED */ -{ yymsp[-1].minor.yy450 = createDataType(TSDB_DATA_TYPE_USMALLINT); } +{ yymsp[-1].minor.yy490 = createDataType(TSDB_DATA_TYPE_USMALLINT); } break; case 150: /* type_name ::= INT UNSIGNED */ -{ yymsp[-1].minor.yy450 = createDataType(TSDB_DATA_TYPE_UINT); } +{ yymsp[-1].minor.yy490 = createDataType(TSDB_DATA_TYPE_UINT); } break; case 151: /* type_name ::= BIGINT UNSIGNED */ -{ yymsp[-1].minor.yy450 = createDataType(TSDB_DATA_TYPE_UBIGINT); } +{ yymsp[-1].minor.yy490 = createDataType(TSDB_DATA_TYPE_UBIGINT); } break; case 152: /* type_name ::= JSON */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_JSON); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_JSON); } break; case 153: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy450 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); } +{ yymsp[-3].minor.yy490 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); } break; case 154: /* type_name ::= MEDIUMBLOB */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); } break; case 155: /* type_name ::= BLOB */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_BLOB); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_BLOB); } break; case 156: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy450 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); } +{ yymsp[-3].minor.yy490 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); } break; case 157: /* type_name ::= DECIMAL */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_DECIMAL); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_DECIMAL); } break; case 158: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy450 = createDataType(TSDB_DATA_TYPE_DECIMAL); } +{ yymsp[-3].minor.yy490 = createDataType(TSDB_DATA_TYPE_DECIMAL); } break; case 159: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ -{ yymsp[-5].minor.yy450 = createDataType(TSDB_DATA_TYPE_DECIMAL); } +{ yymsp[-5].minor.yy490 = createDataType(TSDB_DATA_TYPE_DECIMAL); } break; case 161: /* tags_def_opt ::= tags_def */ - case 341: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==341); - case 396: /* select_list ::= select_sublist */ yytestcase(yyruleno==396); -{ yylhsminor.yy236 = yymsp[0].minor.yy236; } - yymsp[0].minor.yy236 = yylhsminor.yy236; + case 337: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==337); + case 392: /* select_list ::= select_sublist */ yytestcase(yyruleno==392); +{ yylhsminor.yy670 = yymsp[0].minor.yy670; } + yymsp[0].minor.yy670 = yylhsminor.yy670; break; case 162: /* tags_def ::= TAGS NK_LP column_def_list NK_RP */ -{ yymsp[-3].minor.yy236 = yymsp[-1].minor.yy236; } +{ yymsp[-3].minor.yy670 = yymsp[-1].minor.yy670; } break; case 163: /* table_options ::= */ -{ yymsp[1].minor.yy636 = createDefaultTableOptions(pCxt); } +{ yymsp[1].minor.yy686 = createDefaultTableOptions(pCxt); } break; case 164: /* table_options ::= table_options COMMENT NK_STRING */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-2].minor.yy636, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; - break; - case 165: /* table_options ::= table_options DELAY NK_INTEGER */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-2].minor.yy636, TABLE_OPTION_DELAY, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-2].minor.yy686, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 166: /* table_options ::= table_options FILE_FACTOR NK_FLOAT */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-2].minor.yy636, TABLE_OPTION_FILE_FACTOR, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 165: /* table_options ::= table_options FILE_FACTOR NK_FLOAT */ +{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-2].minor.yy686, TABLE_OPTION_FILE_FACTOR, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 167: /* table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-4].minor.yy636, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy236); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; + case 166: /* table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP */ +{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-4].minor.yy686, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy670); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; - case 168: /* table_options ::= table_options TTL NK_INTEGER */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-2].minor.yy636, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 167: /* table_options ::= table_options TTL NK_INTEGER */ +{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-2].minor.yy686, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 169: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-4].minor.yy636, TABLE_OPTION_SMA, yymsp[-1].minor.yy236); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; + case 168: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */ +{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-4].minor.yy686, TABLE_OPTION_SMA, yymsp[-1].minor.yy670); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; - case 170: /* alter_table_options ::= alter_table_option */ -{ yylhsminor.yy636 = createAlterTableOptions(pCxt); yylhsminor.yy636 = setTableOption(pCxt, yylhsminor.yy636, yymsp[0].minor.yy25.type, &yymsp[0].minor.yy25.val); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 169: /* alter_table_options ::= alter_table_option */ +{ yylhsminor.yy686 = createAlterTableOptions(pCxt); yylhsminor.yy686 = setTableOption(pCxt, yylhsminor.yy686, yymsp[0].minor.yy53.type, &yymsp[0].minor.yy53.val); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 171: /* alter_table_options ::= alter_table_options alter_table_option */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-1].minor.yy636, yymsp[0].minor.yy25.type, &yymsp[0].minor.yy25.val); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + case 170: /* alter_table_options ::= alter_table_options alter_table_option */ +{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-1].minor.yy686, yymsp[0].minor.yy53.type, &yymsp[0].minor.yy53.val); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 172: /* alter_table_option ::= COMMENT NK_STRING */ -{ yymsp[-1].minor.yy25.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } + case 171: /* alter_table_option ::= COMMENT NK_STRING */ +{ yymsp[-1].minor.yy53.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; - case 173: /* alter_table_option ::= TTL NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } + case 172: /* alter_table_option ::= TTL NK_INTEGER */ +{ yymsp[-1].minor.yy53.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; - case 176: /* col_name ::= column_name */ -{ yylhsminor.yy636 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy53); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 175: /* col_name ::= column_name */ +{ yylhsminor.yy686 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy113); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 177: /* cmd ::= SHOW DNODES */ + case 176: /* cmd ::= SHOW DNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT, NULL, NULL); } break; - case 178: /* cmd ::= SHOW USERS */ + case 177: /* cmd ::= SHOW USERS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_USERS_STMT, NULL, NULL); } break; - case 179: /* cmd ::= SHOW DATABASES */ + case 178: /* cmd ::= SHOW DATABASES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT, NULL, NULL); } break; - case 180: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy636, yymsp[0].minor.yy636); } + case 179: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ +{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy686, yymsp[0].minor.yy686); } break; - case 181: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy636, yymsp[0].minor.yy636); } + case 180: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ +{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy686, yymsp[0].minor.yy686); } break; - case 182: /* cmd ::= SHOW db_name_cond_opt VGROUPS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy636, NULL); } + case 181: /* cmd ::= SHOW db_name_cond_opt VGROUPS */ +{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy686, NULL); } break; - case 183: /* cmd ::= SHOW MNODES */ + case 182: /* cmd ::= SHOW MNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MNODES_STMT, NULL, NULL); } break; - case 184: /* cmd ::= SHOW MODULES */ + case 183: /* cmd ::= SHOW MODULES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MODULES_STMT, NULL, NULL); } break; - case 185: /* cmd ::= SHOW QNODES */ + case 184: /* cmd ::= SHOW QNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QNODES_STMT, NULL, NULL); } break; - case 186: /* cmd ::= SHOW FUNCTIONS */ + case 185: /* cmd ::= SHOW FUNCTIONS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_FUNCTIONS_STMT, NULL, NULL); } break; - case 187: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } + case 186: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ +{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[-1].minor.yy686, yymsp[0].minor.yy686); } break; - case 188: /* cmd ::= SHOW STREAMS */ + case 187: /* cmd ::= SHOW STREAMS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STREAMS_STMT, NULL, NULL); } break; - case 189: /* cmd ::= SHOW ACCOUNTS */ + case 188: /* cmd ::= SHOW ACCOUNTS */ { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } break; - case 190: /* cmd ::= SHOW APPS */ + case 189: /* cmd ::= SHOW APPS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_APPS_STMT, NULL, NULL); } break; - case 191: /* cmd ::= SHOW CONNECTIONS */ + case 190: /* cmd ::= SHOW CONNECTIONS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONNECTIONS_STMT, NULL, NULL); } break; - case 192: /* cmd ::= SHOW LICENCE */ - case 193: /* cmd ::= SHOW GRANTS */ yytestcase(yyruleno==193); + case 191: /* cmd ::= SHOW LICENCE */ + case 192: /* cmd ::= SHOW GRANTS */ yytestcase(yyruleno==192); { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LICENCE_STMT, NULL, NULL); } break; - case 194: /* cmd ::= SHOW CREATE DATABASE db_name */ -{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy53); } + case 193: /* cmd ::= SHOW CREATE DATABASE db_name */ +{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy113); } break; - case 195: /* cmd ::= SHOW CREATE TABLE full_table_name */ -{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy636); } + case 194: /* cmd ::= SHOW CREATE TABLE full_table_name */ +{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy686); } break; - case 196: /* cmd ::= SHOW CREATE STABLE full_table_name */ -{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy636); } + case 195: /* cmd ::= SHOW CREATE STABLE full_table_name */ +{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy686); } break; - case 197: /* cmd ::= SHOW QUERIES */ + case 196: /* cmd ::= SHOW QUERIES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QUERIES_STMT, NULL, NULL); } break; - case 198: /* cmd ::= SHOW SCORES */ + case 197: /* cmd ::= SHOW SCORES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SCORES_STMT, NULL, NULL); } break; - case 199: /* cmd ::= SHOW TOPICS */ + case 198: /* cmd ::= SHOW TOPICS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TOPICS_STMT, NULL, NULL); } break; - case 200: /* cmd ::= SHOW VARIABLES */ + case 199: /* cmd ::= SHOW VARIABLES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VARIABLE_STMT, NULL, NULL); } break; - case 201: /* cmd ::= SHOW BNODES */ + case 200: /* cmd ::= SHOW BNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_BNODES_STMT, NULL, NULL); } break; - case 202: /* cmd ::= SHOW SNODES */ + case 201: /* cmd ::= SHOW SNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SNODES_STMT, NULL, NULL); } break; - case 203: /* cmd ::= SHOW CLUSTER */ + case 202: /* cmd ::= SHOW CLUSTER */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CLUSTER_STMT, NULL, NULL); } break; - case 204: /* cmd ::= SHOW TRANSACTIONS */ + case 203: /* cmd ::= SHOW TRANSACTIONS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TRANSACTIONS_STMT, NULL, NULL); } break; - case 205: /* db_name_cond_opt ::= */ - case 210: /* from_db_opt ::= */ yytestcase(yyruleno==210); -{ yymsp[1].minor.yy636 = createDefaultDatabaseCondValue(pCxt); } - break; - case 206: /* db_name_cond_opt ::= db_name NK_DOT */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy53); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; - break; - case 207: /* like_pattern_opt ::= */ - case 218: /* index_options ::= */ yytestcase(yyruleno==218); - case 250: /* into_opt ::= */ yytestcase(yyruleno==250); - case 403: /* where_clause_opt ::= */ yytestcase(yyruleno==403); - case 407: /* twindow_clause_opt ::= */ yytestcase(yyruleno==407); - case 412: /* sliding_opt ::= */ yytestcase(yyruleno==412); - case 414: /* fill_opt ::= */ yytestcase(yyruleno==414); - case 426: /* having_clause_opt ::= */ yytestcase(yyruleno==426); - case 436: /* slimit_clause_opt ::= */ yytestcase(yyruleno==436); - case 440: /* limit_clause_opt ::= */ yytestcase(yyruleno==440); -{ yymsp[1].minor.yy636 = NULL; } - break; - case 208: /* like_pattern_opt ::= LIKE NK_STRING */ -{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } + case 204: /* db_name_cond_opt ::= */ + case 209: /* from_db_opt ::= */ yytestcase(yyruleno==209); +{ yymsp[1].minor.yy686 = createDefaultDatabaseCondValue(pCxt); } break; - case 209: /* table_name_cond ::= table_name */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy53); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 205: /* db_name_cond_opt ::= db_name NK_DOT */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy113); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 211: /* from_db_opt ::= FROM db_name */ -{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy53); } + case 206: /* like_pattern_opt ::= */ + case 217: /* index_options ::= */ yytestcase(yyruleno==217); + case 246: /* into_opt ::= */ yytestcase(yyruleno==246); + case 399: /* where_clause_opt ::= */ yytestcase(yyruleno==399); + case 403: /* twindow_clause_opt ::= */ yytestcase(yyruleno==403); + case 408: /* sliding_opt ::= */ yytestcase(yyruleno==408); + case 410: /* fill_opt ::= */ yytestcase(yyruleno==410); + case 422: /* having_clause_opt ::= */ yytestcase(yyruleno==422); + case 432: /* slimit_clause_opt ::= */ yytestcase(yyruleno==432); + case 436: /* limit_clause_opt ::= */ yytestcase(yyruleno==436); +{ yymsp[1].minor.yy686 = NULL; } break; - case 214: /* func_name ::= function_name */ -{ yylhsminor.yy636 = createFunctionNode(pCxt, &yymsp[0].minor.yy53, NULL); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 207: /* like_pattern_opt ::= LIKE NK_STRING */ +{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } break; - case 215: /* cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options */ -{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy603, &yymsp[-3].minor.yy53, &yymsp[-1].minor.yy53, NULL, yymsp[0].minor.yy636); } + case 208: /* table_name_cond ::= table_name */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy113); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 216: /* cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP */ -{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_FULLTEXT, yymsp[-6].minor.yy603, &yymsp[-5].minor.yy53, &yymsp[-3].minor.yy53, yymsp[-1].minor.yy236, NULL); } + case 210: /* from_db_opt ::= FROM db_name */ +{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy113); } break; - case 217: /* cmd ::= DROP INDEX exists_opt index_name ON table_name */ -{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-3].minor.yy603, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53); } + case 213: /* func_name ::= function_name */ +{ yylhsminor.yy686 = createFunctionNode(pCxt, &yymsp[0].minor.yy113, NULL); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 219: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt */ -{ yymsp[-8].minor.yy636 = createIndexOption(pCxt, yymsp[-6].minor.yy236, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), NULL, yymsp[0].minor.yy636); } + case 214: /* cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options */ +{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy131, &yymsp[-3].minor.yy113, &yymsp[-1].minor.yy113, NULL, yymsp[0].minor.yy686); } break; - case 220: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt */ -{ yymsp[-10].minor.yy636 = createIndexOption(pCxt, yymsp[-8].minor.yy236, releaseRawExprNode(pCxt, yymsp[-4].minor.yy636), releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), yymsp[0].minor.yy636); } + case 215: /* cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP */ +{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_FULLTEXT, yymsp[-6].minor.yy131, &yymsp[-5].minor.yy113, &yymsp[-3].minor.yy113, yymsp[-1].minor.yy670, NULL); } break; - case 223: /* func ::= function_name NK_LP expression_list NK_RP */ -{ yylhsminor.yy636 = createFunctionNode(pCxt, &yymsp[-3].minor.yy53, yymsp[-1].minor.yy236); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; + case 216: /* cmd ::= DROP INDEX exists_opt index_name ON table_name */ +{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-3].minor.yy131, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113); } break; - case 224: /* cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS query_expression */ -{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy603, &yymsp[-3].minor.yy53, yymsp[0].minor.yy636, NULL, yymsp[-2].minor.yy636); } + case 218: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt */ +{ yymsp[-8].minor.yy686 = createIndexOption(pCxt, yymsp[-6].minor.yy670, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), NULL, yymsp[0].minor.yy686); } break; - case 225: /* cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS db_name */ -{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy603, &yymsp[-3].minor.yy53, NULL, &yymsp[0].minor.yy53, yymsp[-2].minor.yy636); } + case 219: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt */ +{ yymsp[-10].minor.yy686 = createIndexOption(pCxt, yymsp[-8].minor.yy670, releaseRawExprNode(pCxt, yymsp[-4].minor.yy686), releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), yymsp[0].minor.yy686); } break; - case 226: /* cmd ::= DROP TOPIC exists_opt topic_name */ -{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy603, &yymsp[0].minor.yy53); } + case 222: /* func ::= function_name NK_LP expression_list NK_RP */ +{ yylhsminor.yy686 = createFunctionNode(pCxt, &yymsp[-3].minor.yy113, yymsp[-1].minor.yy670); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; - case 227: /* cmd ::= DROP CGROUP exists_opt cgroup_name ON topic_name */ -{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy603, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53); } + case 223: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression */ +{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-3].minor.yy131, &yymsp[-2].minor.yy113, yymsp[0].minor.yy686, NULL, NULL); } break; - case 228: /* topic_options ::= */ -{ yymsp[1].minor.yy636 = createTopicOptions(pCxt); } + case 224: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */ +{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy131, &yymsp[-3].minor.yy113, NULL, &yymsp[0].minor.yy113, NULL); } break; - case 229: /* topic_options ::= topic_options WITH TABLE */ -{ ((STopicOptions*)yymsp[-2].minor.yy636)->withTable = true; yylhsminor.yy636 = yymsp[-2].minor.yy636; } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 225: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */ +{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy131, &yymsp[-3].minor.yy113, NULL, NULL, yymsp[0].minor.yy686); } break; - case 230: /* topic_options ::= topic_options WITH SCHEMA */ -{ ((STopicOptions*)yymsp[-2].minor.yy636)->withSchema = true; yylhsminor.yy636 = yymsp[-2].minor.yy636; } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 226: /* cmd ::= DROP TOPIC exists_opt topic_name */ +{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy131, &yymsp[0].minor.yy113); } break; - case 231: /* topic_options ::= topic_options WITH TAG */ -{ ((STopicOptions*)yymsp[-2].minor.yy636)->withTag = true; yylhsminor.yy636 = yymsp[-2].minor.yy636; } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 227: /* cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */ +{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy131, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113); } break; - case 232: /* cmd ::= DESC full_table_name */ - case 233: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==233); -{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy636); } + case 228: /* cmd ::= DESC full_table_name */ + case 229: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==229); +{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy686); } break; - case 234: /* cmd ::= RESET QUERY CACHE */ + case 230: /* cmd ::= RESET QUERY CACHE */ { pCxt->pRootNode = createResetQueryCacheStmt(pCxt); } break; - case 235: /* cmd ::= EXPLAIN analyze_opt explain_options query_expression */ -{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy603, yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } + case 231: /* cmd ::= EXPLAIN analyze_opt explain_options query_expression */ +{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy131, yymsp[-1].minor.yy686, yymsp[0].minor.yy686); } break; - case 237: /* analyze_opt ::= ANALYZE */ - case 245: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==245); - case 393: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==393); -{ yymsp[0].minor.yy603 = true; } + case 233: /* analyze_opt ::= ANALYZE */ + case 241: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==241); + case 389: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==389); +{ yymsp[0].minor.yy131 = true; } break; - case 238: /* explain_options ::= */ -{ yymsp[1].minor.yy636 = createDefaultExplainOptions(pCxt); } + case 234: /* explain_options ::= */ +{ yymsp[1].minor.yy686 = createDefaultExplainOptions(pCxt); } break; - case 239: /* explain_options ::= explain_options VERBOSE NK_BOOL */ -{ yylhsminor.yy636 = setExplainVerbose(pCxt, yymsp[-2].minor.yy636, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 235: /* explain_options ::= explain_options VERBOSE NK_BOOL */ +{ yylhsminor.yy686 = setExplainVerbose(pCxt, yymsp[-2].minor.yy686, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 240: /* explain_options ::= explain_options RATIO NK_FLOAT */ -{ yylhsminor.yy636 = setExplainRatio(pCxt, yymsp[-2].minor.yy636, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 236: /* explain_options ::= explain_options RATIO NK_FLOAT */ +{ yylhsminor.yy686 = setExplainRatio(pCxt, yymsp[-2].minor.yy686, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 241: /* cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */ -{ pCxt->pRootNode = createCompactStmt(pCxt, yymsp[-1].minor.yy236); } + case 237: /* cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */ +{ pCxt->pRootNode = createCompactStmt(pCxt, yymsp[-1].minor.yy670); } break; - case 242: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ -{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy603, yymsp[-8].minor.yy603, &yymsp[-5].minor.yy53, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy450, yymsp[0].minor.yy158); } + case 238: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ +{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy131, yymsp[-8].minor.yy131, &yymsp[-5].minor.yy113, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy490, yymsp[0].minor.yy550); } break; - case 243: /* cmd ::= DROP FUNCTION exists_opt function_name */ -{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy603, &yymsp[0].minor.yy53); } + case 239: /* cmd ::= DROP FUNCTION exists_opt function_name */ +{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy131, &yymsp[0].minor.yy113); } break; - case 246: /* bufsize_opt ::= */ -{ yymsp[1].minor.yy158 = 0; } + case 242: /* bufsize_opt ::= */ +{ yymsp[1].minor.yy550 = 0; } break; - case 247: /* bufsize_opt ::= BUFSIZE NK_INTEGER */ -{ yymsp[-1].minor.yy158 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); } + case 243: /* bufsize_opt ::= BUFSIZE NK_INTEGER */ +{ yymsp[-1].minor.yy550 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 248: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */ -{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-5].minor.yy603, &yymsp[-4].minor.yy53, yymsp[-2].minor.yy636, yymsp[-3].minor.yy636, yymsp[0].minor.yy636); } + case 244: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */ +{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-5].minor.yy131, &yymsp[-4].minor.yy113, yymsp[-2].minor.yy686, yymsp[-3].minor.yy686, yymsp[0].minor.yy686); } break; - case 249: /* cmd ::= DROP STREAM exists_opt stream_name */ -{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy603, &yymsp[0].minor.yy53); } + case 245: /* cmd ::= DROP STREAM exists_opt stream_name */ +{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy131, &yymsp[0].minor.yy113); } break; - case 251: /* into_opt ::= INTO full_table_name */ - case 374: /* from_clause ::= FROM table_reference_list */ yytestcase(yyruleno==374); - case 404: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==404); - case 427: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==427); -{ yymsp[-1].minor.yy636 = yymsp[0].minor.yy636; } + case 247: /* into_opt ::= INTO full_table_name */ + case 370: /* from_clause ::= FROM table_reference_list */ yytestcase(yyruleno==370); + case 400: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==400); + case 423: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==423); +{ yymsp[-1].minor.yy686 = yymsp[0].minor.yy686; } break; - case 252: /* stream_options ::= */ -{ yymsp[1].minor.yy636 = createStreamOptions(pCxt); } + case 248: /* stream_options ::= */ +{ yymsp[1].minor.yy686 = createStreamOptions(pCxt); } break; - case 253: /* stream_options ::= stream_options TRIGGER AT_ONCE */ -{ ((SStreamOptions*)yymsp[-2].minor.yy636)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy636 = yymsp[-2].minor.yy636; } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 249: /* stream_options ::= stream_options TRIGGER AT_ONCE */ +{ ((SStreamOptions*)yymsp[-2].minor.yy686)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy686 = yymsp[-2].minor.yy686; } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 254: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ -{ ((SStreamOptions*)yymsp[-2].minor.yy636)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy636 = yymsp[-2].minor.yy636; } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 250: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ +{ ((SStreamOptions*)yymsp[-2].minor.yy686)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy686 = yymsp[-2].minor.yy686; } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 255: /* stream_options ::= stream_options WATERMARK duration_literal */ -{ ((SStreamOptions*)yymsp[-2].minor.yy636)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy636); yylhsminor.yy636 = yymsp[-2].minor.yy636; } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 251: /* stream_options ::= stream_options WATERMARK duration_literal */ +{ ((SStreamOptions*)yymsp[-2].minor.yy686)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy686); yylhsminor.yy686 = yymsp[-2].minor.yy686; } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 256: /* cmd ::= KILL CONNECTION NK_INTEGER */ + case 252: /* cmd ::= KILL CONNECTION NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); } break; - case 257: /* cmd ::= KILL QUERY NK_INTEGER */ + case 253: /* cmd ::= KILL QUERY NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_QUERY_STMT, &yymsp[0].minor.yy0); } break; - case 258: /* cmd ::= KILL TRANSACTION NK_INTEGER */ + case 254: /* cmd ::= KILL TRANSACTION NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); } break; - case 259: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ + case 255: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ { pCxt->pRootNode = createMergeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 260: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ -{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy236); } + case 256: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ +{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy670); } break; - case 261: /* cmd ::= SPLIT VGROUP NK_INTEGER */ + case 257: /* cmd ::= SPLIT VGROUP NK_INTEGER */ { pCxt->pRootNode = createSplitVgroupStmt(pCxt, &yymsp[0].minor.yy0); } break; - case 262: /* dnode_list ::= DNODE NK_INTEGER */ -{ yymsp[-1].minor.yy236 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - break; - case 264: /* cmd ::= SYNCDB db_name REPLICA */ -{ pCxt->pRootNode = createSyncdbStmt(pCxt, &yymsp[-1].minor.yy53); } - break; - case 266: /* literal ::= NK_INTEGER */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 267: /* literal ::= NK_FLOAT */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 268: /* literal ::= NK_STRING */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 269: /* literal ::= NK_BOOL */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 270: /* literal ::= TIMESTAMP NK_STRING */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; - break; - case 271: /* literal ::= duration_literal */ - case 281: /* signed_literal ::= signed */ yytestcase(yyruleno==281); - case 301: /* expression ::= literal */ yytestcase(yyruleno==301); - case 302: /* expression ::= pseudo_column */ yytestcase(yyruleno==302); - case 303: /* expression ::= column_reference */ yytestcase(yyruleno==303); - case 304: /* expression ::= function_expression */ yytestcase(yyruleno==304); - case 305: /* expression ::= subquery */ yytestcase(yyruleno==305); - case 330: /* function_expression ::= literal_func */ yytestcase(yyruleno==330); - case 366: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==366); - case 370: /* boolean_primary ::= predicate */ yytestcase(yyruleno==370); - case 372: /* common_expression ::= expression */ yytestcase(yyruleno==372); - case 373: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==373); - case 375: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==375); - case 377: /* table_reference ::= table_primary */ yytestcase(yyruleno==377); - case 378: /* table_reference ::= joined_table */ yytestcase(yyruleno==378); - case 382: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==382); - case 429: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==429); - case 432: /* query_primary ::= query_specification */ yytestcase(yyruleno==432); -{ yylhsminor.yy636 = yymsp[0].minor.yy636; } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 272: /* literal ::= NULL */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 273: /* literal ::= NK_QUESTION */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 274: /* duration_literal ::= NK_VARIABLE */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 275: /* signed ::= NK_INTEGER */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 276: /* signed ::= NK_PLUS NK_INTEGER */ -{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } - break; - case 277: /* signed ::= NK_MINUS NK_INTEGER */ + case 258: /* dnode_list ::= DNODE NK_INTEGER */ +{ yymsp[-1].minor.yy670 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + break; + case 260: /* cmd ::= SYNCDB db_name REPLICA */ +{ pCxt->pRootNode = createSyncdbStmt(pCxt, &yymsp[-1].minor.yy113); } + break; + case 262: /* literal ::= NK_INTEGER */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 263: /* literal ::= NK_FLOAT */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 264: /* literal ::= NK_STRING */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 265: /* literal ::= NK_BOOL */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 266: /* literal ::= TIMESTAMP NK_STRING */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; + break; + case 267: /* literal ::= duration_literal */ + case 277: /* signed_literal ::= signed */ yytestcase(yyruleno==277); + case 297: /* expression ::= literal */ yytestcase(yyruleno==297); + case 298: /* expression ::= pseudo_column */ yytestcase(yyruleno==298); + case 299: /* expression ::= column_reference */ yytestcase(yyruleno==299); + case 300: /* expression ::= function_expression */ yytestcase(yyruleno==300); + case 301: /* expression ::= subquery */ yytestcase(yyruleno==301); + case 326: /* function_expression ::= literal_func */ yytestcase(yyruleno==326); + case 362: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==362); + case 366: /* boolean_primary ::= predicate */ yytestcase(yyruleno==366); + case 368: /* common_expression ::= expression */ yytestcase(yyruleno==368); + case 369: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==369); + case 371: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==371); + case 373: /* table_reference ::= table_primary */ yytestcase(yyruleno==373); + case 374: /* table_reference ::= joined_table */ yytestcase(yyruleno==374); + case 378: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==378); + case 425: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==425); + case 428: /* query_primary ::= query_specification */ yytestcase(yyruleno==428); +{ yylhsminor.yy686 = yymsp[0].minor.yy686; } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 268: /* literal ::= NULL */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 269: /* literal ::= NK_QUESTION */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 270: /* duration_literal ::= NK_VARIABLE */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 271: /* signed ::= NK_INTEGER */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 272: /* signed ::= NK_PLUS NK_INTEGER */ +{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } + break; + case 273: /* signed ::= NK_MINUS NK_INTEGER */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t); + yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 278: /* signed ::= NK_FLOAT */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 274: /* signed ::= NK_FLOAT */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 279: /* signed ::= NK_PLUS NK_FLOAT */ -{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } + case 275: /* signed ::= NK_PLUS NK_FLOAT */ +{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } break; - case 280: /* signed ::= NK_MINUS NK_FLOAT */ + case 276: /* signed ::= NK_MINUS NK_FLOAT */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t); + yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 282: /* signed_literal ::= NK_STRING */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 278: /* signed_literal ::= NK_STRING */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 283: /* signed_literal ::= NK_BOOL */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 279: /* signed_literal ::= NK_BOOL */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 284: /* signed_literal ::= TIMESTAMP NK_STRING */ -{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } + case 280: /* signed_literal ::= TIMESTAMP NK_STRING */ +{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } break; - case 285: /* signed_literal ::= duration_literal */ - case 287: /* signed_literal ::= literal_func */ yytestcase(yyruleno==287); - case 344: /* star_func_para ::= expression */ yytestcase(yyruleno==344); - case 399: /* select_item ::= common_expression */ yytestcase(yyruleno==399); - case 445: /* search_condition ::= common_expression */ yytestcase(yyruleno==445); -{ yylhsminor.yy636 = releaseRawExprNode(pCxt, yymsp[0].minor.yy636); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 281: /* signed_literal ::= duration_literal */ + case 283: /* signed_literal ::= literal_func */ yytestcase(yyruleno==283); + case 340: /* star_func_para ::= expression */ yytestcase(yyruleno==340); + case 395: /* select_item ::= common_expression */ yytestcase(yyruleno==395); + case 441: /* search_condition ::= common_expression */ yytestcase(yyruleno==441); +{ yylhsminor.yy686 = releaseRawExprNode(pCxt, yymsp[0].minor.yy686); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 286: /* signed_literal ::= NULL */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 282: /* signed_literal ::= NULL */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 306: /* expression ::= NK_LP expression NK_RP */ - case 371: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==371); -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy636)); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 302: /* expression ::= NK_LP expression NK_RP */ + case 367: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==367); +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy686)); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 307: /* expression ::= NK_PLUS expression */ + case 303: /* expression ::= NK_PLUS expression */ { - SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy636)); + SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy686)); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 308: /* expression ::= NK_MINUS expression */ + case 304: /* expression ::= NK_MINUS expression */ { - SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy636), NULL)); + SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy686), NULL)); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 309: /* expression ::= expression NK_PLUS expression */ + case 305: /* expression ::= expression NK_PLUS expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 310: /* expression ::= expression NK_MINUS expression */ + case 306: /* expression ::= expression NK_MINUS expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 311: /* expression ::= expression NK_STAR expression */ + case 307: /* expression ::= expression NK_STAR expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 312: /* expression ::= expression NK_SLASH expression */ + case 308: /* expression ::= expression NK_SLASH expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 313: /* expression ::= expression NK_REM expression */ + case 309: /* expression ::= expression NK_REM expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MOD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MOD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 314: /* expression ::= column_reference NK_ARROW NK_STRING */ + case 310: /* expression ::= column_reference NK_ARROW NK_STRING */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; - break; - case 315: /* expression_list ::= expression */ -{ yylhsminor.yy236 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy636)); } - yymsp[0].minor.yy236 = yylhsminor.yy236; - break; - case 316: /* expression_list ::= expression_list NK_COMMA expression */ -{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, releaseRawExprNode(pCxt, yymsp[0].minor.yy636)); } - yymsp[-2].minor.yy236 = yylhsminor.yy236; - break; - case 317: /* column_reference ::= column_name */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy53, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy53)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 318: /* column_reference ::= table_name NK_DOT column_name */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53, createColumnNode(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53)); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; - break; - case 319: /* pseudo_column ::= ROWTS */ - case 320: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==320); - case 322: /* pseudo_column ::= QSTARTTS */ yytestcase(yyruleno==322); - case 323: /* pseudo_column ::= QENDTS */ yytestcase(yyruleno==323); - case 324: /* pseudo_column ::= WSTARTTS */ yytestcase(yyruleno==324); - case 325: /* pseudo_column ::= WENDTS */ yytestcase(yyruleno==325); - case 326: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==326); - case 332: /* literal_func ::= NOW */ yytestcase(yyruleno==332); -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 321: /* pseudo_column ::= table_name NK_DOT TBNAME */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy53)))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; - break; - case 327: /* function_expression ::= function_name NK_LP expression_list NK_RP */ - case 328: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==328); -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy53, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy53, yymsp[-1].minor.yy236)); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; - break; - case 329: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), yymsp[-1].minor.yy450)); } - yymsp[-5].minor.yy636 = yylhsminor.yy636; - break; - case 331: /* literal_func ::= noarg_func NK_LP NK_RP */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy53, NULL)); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; - break; - case 340: /* star_func_para_list ::= NK_STAR */ -{ yylhsminor.yy236 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy236 = yylhsminor.yy236; - break; - case 345: /* star_func_para ::= table_name NK_DOT NK_STAR */ - case 402: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==402); -{ yylhsminor.yy636 = createColumnNode(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; - break; - case 346: /* predicate ::= expression compare_op expression */ - case 351: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==351); + yymsp[-2].minor.yy686 = yylhsminor.yy686; + break; + case 311: /* expression_list ::= expression */ +{ yylhsminor.yy670 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy686)); } + yymsp[0].minor.yy670 = yylhsminor.yy670; + break; + case 312: /* expression_list ::= expression_list NK_COMMA expression */ +{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, releaseRawExprNode(pCxt, yymsp[0].minor.yy686)); } + yymsp[-2].minor.yy670 = yylhsminor.yy670; + break; + case 313: /* column_reference ::= column_name */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy113, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy113)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 314: /* column_reference ::= table_name NK_DOT column_name */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113, createColumnNode(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113)); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; + break; + case 315: /* pseudo_column ::= ROWTS */ + case 316: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==316); + case 318: /* pseudo_column ::= QSTARTTS */ yytestcase(yyruleno==318); + case 319: /* pseudo_column ::= QENDTS */ yytestcase(yyruleno==319); + case 320: /* pseudo_column ::= WSTARTTS */ yytestcase(yyruleno==320); + case 321: /* pseudo_column ::= WENDTS */ yytestcase(yyruleno==321); + case 322: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==322); + case 328: /* literal_func ::= NOW */ yytestcase(yyruleno==328); +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 317: /* pseudo_column ::= table_name NK_DOT TBNAME */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy113)))); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; + break; + case 323: /* function_expression ::= function_name NK_LP expression_list NK_RP */ + case 324: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==324); +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy113, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy113, yymsp[-1].minor.yy670)); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; + break; + case 325: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), yymsp[-1].minor.yy490)); } + yymsp[-5].minor.yy686 = yylhsminor.yy686; + break; + case 327: /* literal_func ::= noarg_func NK_LP NK_RP */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy113, NULL)); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; + break; + case 336: /* star_func_para_list ::= NK_STAR */ +{ yylhsminor.yy670 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy670 = yylhsminor.yy670; + break; + case 341: /* star_func_para ::= table_name NK_DOT NK_STAR */ + case 398: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==398); +{ yylhsminor.yy686 = createColumnNode(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; + break; + case 342: /* predicate ::= expression compare_op expression */ + case 347: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==347); { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy136, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy632, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 347: /* predicate ::= expression BETWEEN expression AND expression */ + case 343: /* predicate ::= expression BETWEEN expression AND expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy636), releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy686), releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; - case 348: /* predicate ::= expression NOT BETWEEN expression AND expression */ + case 344: /* predicate ::= expression NOT BETWEEN expression AND expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy636), releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy686), releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-5].minor.yy636 = yylhsminor.yy636; + yymsp[-5].minor.yy686 = yylhsminor.yy686; break; - case 349: /* predicate ::= expression IS NULL */ + case 345: /* predicate ::= expression IS NULL */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), NULL)); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), NULL)); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 350: /* predicate ::= expression IS NOT NULL */ + case 346: /* predicate ::= expression IS NOT NULL */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), NULL)); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), NULL)); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; - case 352: /* compare_op ::= NK_LT */ -{ yymsp[0].minor.yy136 = OP_TYPE_LOWER_THAN; } + case 348: /* compare_op ::= NK_LT */ +{ yymsp[0].minor.yy632 = OP_TYPE_LOWER_THAN; } break; - case 353: /* compare_op ::= NK_GT */ -{ yymsp[0].minor.yy136 = OP_TYPE_GREATER_THAN; } + case 349: /* compare_op ::= NK_GT */ +{ yymsp[0].minor.yy632 = OP_TYPE_GREATER_THAN; } break; - case 354: /* compare_op ::= NK_LE */ -{ yymsp[0].minor.yy136 = OP_TYPE_LOWER_EQUAL; } + case 350: /* compare_op ::= NK_LE */ +{ yymsp[0].minor.yy632 = OP_TYPE_LOWER_EQUAL; } break; - case 355: /* compare_op ::= NK_GE */ -{ yymsp[0].minor.yy136 = OP_TYPE_GREATER_EQUAL; } + case 351: /* compare_op ::= NK_GE */ +{ yymsp[0].minor.yy632 = OP_TYPE_GREATER_EQUAL; } break; - case 356: /* compare_op ::= NK_NE */ -{ yymsp[0].minor.yy136 = OP_TYPE_NOT_EQUAL; } + case 352: /* compare_op ::= NK_NE */ +{ yymsp[0].minor.yy632 = OP_TYPE_NOT_EQUAL; } break; - case 357: /* compare_op ::= NK_EQ */ -{ yymsp[0].minor.yy136 = OP_TYPE_EQUAL; } + case 353: /* compare_op ::= NK_EQ */ +{ yymsp[0].minor.yy632 = OP_TYPE_EQUAL; } break; - case 358: /* compare_op ::= LIKE */ -{ yymsp[0].minor.yy136 = OP_TYPE_LIKE; } + case 354: /* compare_op ::= LIKE */ +{ yymsp[0].minor.yy632 = OP_TYPE_LIKE; } break; - case 359: /* compare_op ::= NOT LIKE */ -{ yymsp[-1].minor.yy136 = OP_TYPE_NOT_LIKE; } + case 355: /* compare_op ::= NOT LIKE */ +{ yymsp[-1].minor.yy632 = OP_TYPE_NOT_LIKE; } break; - case 360: /* compare_op ::= MATCH */ -{ yymsp[0].minor.yy136 = OP_TYPE_MATCH; } + case 356: /* compare_op ::= MATCH */ +{ yymsp[0].minor.yy632 = OP_TYPE_MATCH; } break; - case 361: /* compare_op ::= NMATCH */ -{ yymsp[0].minor.yy136 = OP_TYPE_NMATCH; } + case 357: /* compare_op ::= NMATCH */ +{ yymsp[0].minor.yy632 = OP_TYPE_NMATCH; } break; - case 362: /* compare_op ::= CONTAINS */ -{ yymsp[0].minor.yy136 = OP_TYPE_JSON_CONTAINS; } + case 358: /* compare_op ::= CONTAINS */ +{ yymsp[0].minor.yy632 = OP_TYPE_JSON_CONTAINS; } break; - case 363: /* in_op ::= IN */ -{ yymsp[0].minor.yy136 = OP_TYPE_IN; } + case 359: /* in_op ::= IN */ +{ yymsp[0].minor.yy632 = OP_TYPE_IN; } break; - case 364: /* in_op ::= NOT IN */ -{ yymsp[-1].minor.yy136 = OP_TYPE_NOT_IN; } + case 360: /* in_op ::= NOT IN */ +{ yymsp[-1].minor.yy632 = OP_TYPE_NOT_IN; } break; - case 365: /* in_predicate_value ::= NK_LP expression_list NK_RP */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy236)); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 361: /* in_predicate_value ::= NK_LP expression_list NK_RP */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy670)); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 367: /* boolean_value_expression ::= NOT boolean_primary */ + case 363: /* boolean_value_expression ::= NOT boolean_primary */ { - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy636), NULL)); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy686), NULL)); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 368: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ + case 364: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 369: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ + case 365: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 376: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ -{ yylhsminor.yy636 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy636, yymsp[0].minor.yy636, NULL); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 372: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ +{ yylhsminor.yy686 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy686, yymsp[0].minor.yy686, NULL); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 379: /* table_primary ::= table_name alias_opt */ -{ yylhsminor.yy636 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy53, &yymsp[0].minor.yy53); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + case 375: /* table_primary ::= table_name alias_opt */ +{ yylhsminor.yy686 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy113, &yymsp[0].minor.yy113); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 380: /* table_primary ::= db_name NK_DOT table_name alias_opt */ -{ yylhsminor.yy636 = createRealTableNode(pCxt, &yymsp[-3].minor.yy53, &yymsp[-1].minor.yy53, &yymsp[0].minor.yy53); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; + case 376: /* table_primary ::= db_name NK_DOT table_name alias_opt */ +{ yylhsminor.yy686 = createRealTableNode(pCxt, &yymsp[-3].minor.yy113, &yymsp[-1].minor.yy113, &yymsp[0].minor.yy113); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; - case 381: /* table_primary ::= subquery alias_opt */ -{ yylhsminor.yy636 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy636), &yymsp[0].minor.yy53); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + case 377: /* table_primary ::= subquery alias_opt */ +{ yylhsminor.yy686 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy686), &yymsp[0].minor.yy113); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 383: /* alias_opt ::= */ -{ yymsp[1].minor.yy53 = nil_token; } + case 379: /* alias_opt ::= */ +{ yymsp[1].minor.yy113 = nil_token; } break; - case 384: /* alias_opt ::= table_alias */ -{ yylhsminor.yy53 = yymsp[0].minor.yy53; } - yymsp[0].minor.yy53 = yylhsminor.yy53; + case 380: /* alias_opt ::= table_alias */ +{ yylhsminor.yy113 = yymsp[0].minor.yy113; } + yymsp[0].minor.yy113 = yylhsminor.yy113; break; - case 385: /* alias_opt ::= AS table_alias */ -{ yymsp[-1].minor.yy53 = yymsp[0].minor.yy53; } + case 381: /* alias_opt ::= AS table_alias */ +{ yymsp[-1].minor.yy113 = yymsp[0].minor.yy113; } break; - case 386: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - case 387: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==387); -{ yymsp[-2].minor.yy636 = yymsp[-1].minor.yy636; } + case 382: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ + case 383: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==383); +{ yymsp[-2].minor.yy686 = yymsp[-1].minor.yy686; } break; - case 388: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ -{ yylhsminor.yy636 = createJoinTableNode(pCxt, yymsp[-4].minor.yy342, yymsp[-5].minor.yy636, yymsp[-2].minor.yy636, yymsp[0].minor.yy636); } - yymsp[-5].minor.yy636 = yylhsminor.yy636; + case 384: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ +{ yylhsminor.yy686 = createJoinTableNode(pCxt, yymsp[-4].minor.yy120, yymsp[-5].minor.yy686, yymsp[-2].minor.yy686, yymsp[0].minor.yy686); } + yymsp[-5].minor.yy686 = yylhsminor.yy686; break; - case 389: /* join_type ::= */ -{ yymsp[1].minor.yy342 = JOIN_TYPE_INNER; } + case 385: /* join_type ::= */ +{ yymsp[1].minor.yy120 = JOIN_TYPE_INNER; } break; - case 390: /* join_type ::= INNER */ -{ yymsp[0].minor.yy342 = JOIN_TYPE_INNER; } + case 386: /* join_type ::= INNER */ +{ yymsp[0].minor.yy120 = JOIN_TYPE_INNER; } break; - case 391: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ + case 387: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ { - yymsp[-8].minor.yy636 = createSelectStmt(pCxt, yymsp[-7].minor.yy603, yymsp[-6].minor.yy236, yymsp[-5].minor.yy636); - yymsp[-8].minor.yy636 = addWhereClause(pCxt, yymsp[-8].minor.yy636, yymsp[-4].minor.yy636); - yymsp[-8].minor.yy636 = addPartitionByClause(pCxt, yymsp[-8].minor.yy636, yymsp[-3].minor.yy236); - yymsp[-8].minor.yy636 = addWindowClauseClause(pCxt, yymsp[-8].minor.yy636, yymsp[-2].minor.yy636); - yymsp[-8].minor.yy636 = addGroupByClause(pCxt, yymsp[-8].minor.yy636, yymsp[-1].minor.yy236); - yymsp[-8].minor.yy636 = addHavingClause(pCxt, yymsp[-8].minor.yy636, yymsp[0].minor.yy636); + yymsp[-8].minor.yy686 = createSelectStmt(pCxt, yymsp[-7].minor.yy131, yymsp[-6].minor.yy670, yymsp[-5].minor.yy686); + yymsp[-8].minor.yy686 = addWhereClause(pCxt, yymsp[-8].minor.yy686, yymsp[-4].minor.yy686); + yymsp[-8].minor.yy686 = addPartitionByClause(pCxt, yymsp[-8].minor.yy686, yymsp[-3].minor.yy670); + yymsp[-8].minor.yy686 = addWindowClauseClause(pCxt, yymsp[-8].minor.yy686, yymsp[-2].minor.yy686); + yymsp[-8].minor.yy686 = addGroupByClause(pCxt, yymsp[-8].minor.yy686, yymsp[-1].minor.yy670); + yymsp[-8].minor.yy686 = addHavingClause(pCxt, yymsp[-8].minor.yy686, yymsp[0].minor.yy686); } break; - case 394: /* set_quantifier_opt ::= ALL */ -{ yymsp[0].minor.yy603 = false; } + case 390: /* set_quantifier_opt ::= ALL */ +{ yymsp[0].minor.yy131 = false; } break; - case 395: /* select_list ::= NK_STAR */ -{ yymsp[0].minor.yy236 = NULL; } + case 391: /* select_list ::= NK_STAR */ +{ yymsp[0].minor.yy670 = NULL; } break; - case 400: /* select_item ::= common_expression column_alias */ -{ yylhsminor.yy636 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy636), &yymsp[0].minor.yy53); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + case 396: /* select_item ::= common_expression column_alias */ +{ yylhsminor.yy686 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy686), &yymsp[0].minor.yy113); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 401: /* select_item ::= common_expression AS column_alias */ -{ yylhsminor.yy636 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), &yymsp[0].minor.yy53); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 397: /* select_item ::= common_expression AS column_alias */ +{ yylhsminor.yy686 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), &yymsp[0].minor.yy113); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 406: /* partition_by_clause_opt ::= PARTITION BY expression_list */ - case 423: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==423); - case 435: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==435); -{ yymsp[-2].minor.yy236 = yymsp[0].minor.yy236; } + case 402: /* partition_by_clause_opt ::= PARTITION BY expression_list */ + case 419: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==419); + case 431: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==431); +{ yymsp[-2].minor.yy670 = yymsp[0].minor.yy670; } break; - case 408: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ -{ yymsp[-5].minor.yy636 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), releaseRawExprNode(pCxt, yymsp[-1].minor.yy636)); } + case 404: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ +{ yymsp[-5].minor.yy686 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), releaseRawExprNode(pCxt, yymsp[-1].minor.yy686)); } break; - case 409: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ -{ yymsp[-3].minor.yy636 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy636)); } + case 405: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ +{ yymsp[-3].minor.yy686 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy686)); } break; - case 410: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ -{ yymsp[-5].minor.yy636 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), NULL, yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } + case 406: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ +{ yymsp[-5].minor.yy686 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), NULL, yymsp[-1].minor.yy686, yymsp[0].minor.yy686); } break; - case 411: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ -{ yymsp[-7].minor.yy636 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy636), releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } + case 407: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ +{ yymsp[-7].minor.yy686 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy686), releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), yymsp[-1].minor.yy686, yymsp[0].minor.yy686); } break; - case 413: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ -{ yymsp[-3].minor.yy636 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy636); } + case 409: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ +{ yymsp[-3].minor.yy686 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy686); } break; - case 415: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ -{ yymsp[-3].minor.yy636 = createFillNode(pCxt, yymsp[-1].minor.yy18, NULL); } + case 411: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ +{ yymsp[-3].minor.yy686 = createFillNode(pCxt, yymsp[-1].minor.yy522, NULL); } break; - case 416: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ -{ yymsp[-5].minor.yy636 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy236)); } + case 412: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ +{ yymsp[-5].minor.yy686 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy670)); } break; - case 417: /* fill_mode ::= NONE */ -{ yymsp[0].minor.yy18 = FILL_MODE_NONE; } + case 413: /* fill_mode ::= NONE */ +{ yymsp[0].minor.yy522 = FILL_MODE_NONE; } break; - case 418: /* fill_mode ::= PREV */ -{ yymsp[0].minor.yy18 = FILL_MODE_PREV; } + case 414: /* fill_mode ::= PREV */ +{ yymsp[0].minor.yy522 = FILL_MODE_PREV; } break; - case 419: /* fill_mode ::= NULL */ -{ yymsp[0].minor.yy18 = FILL_MODE_NULL; } + case 415: /* fill_mode ::= NULL */ +{ yymsp[0].minor.yy522 = FILL_MODE_NULL; } break; - case 420: /* fill_mode ::= LINEAR */ -{ yymsp[0].minor.yy18 = FILL_MODE_LINEAR; } + case 416: /* fill_mode ::= LINEAR */ +{ yymsp[0].minor.yy522 = FILL_MODE_LINEAR; } break; - case 421: /* fill_mode ::= NEXT */ -{ yymsp[0].minor.yy18 = FILL_MODE_NEXT; } + case 417: /* fill_mode ::= NEXT */ +{ yymsp[0].minor.yy522 = FILL_MODE_NEXT; } break; - case 424: /* group_by_list ::= expression */ -{ yylhsminor.yy236 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[0].minor.yy236 = yylhsminor.yy236; + case 420: /* group_by_list ::= expression */ +{ yylhsminor.yy670 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } + yymsp[0].minor.yy670 = yylhsminor.yy670; break; - case 425: /* group_by_list ::= group_by_list NK_COMMA expression */ -{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[-2].minor.yy236 = yylhsminor.yy236; + case 421: /* group_by_list ::= group_by_list NK_COMMA expression */ +{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } + yymsp[-2].minor.yy670 = yylhsminor.yy670; break; - case 428: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ + case 424: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ { - yylhsminor.yy636 = addOrderByClause(pCxt, yymsp[-3].minor.yy636, yymsp[-2].minor.yy236); - yylhsminor.yy636 = addSlimitClause(pCxt, yylhsminor.yy636, yymsp[-1].minor.yy636); - yylhsminor.yy636 = addLimitClause(pCxt, yylhsminor.yy636, yymsp[0].minor.yy636); + yylhsminor.yy686 = addOrderByClause(pCxt, yymsp[-3].minor.yy686, yymsp[-2].minor.yy670); + yylhsminor.yy686 = addSlimitClause(pCxt, yylhsminor.yy686, yymsp[-1].minor.yy686); + yylhsminor.yy686 = addLimitClause(pCxt, yylhsminor.yy686, yymsp[0].minor.yy686); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; - case 430: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */ -{ yylhsminor.yy636 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy636, yymsp[0].minor.yy636); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; + case 426: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */ +{ yylhsminor.yy686 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy686, yymsp[0].minor.yy686); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; - case 431: /* query_expression_body ::= query_expression_body UNION query_expression_body */ -{ yylhsminor.yy636 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy636, yymsp[0].minor.yy636); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 427: /* query_expression_body ::= query_expression_body UNION query_expression_body */ +{ yylhsminor.yy686 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy686, yymsp[0].minor.yy686); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 433: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ -{ yymsp[-5].minor.yy636 = yymsp[-4].minor.yy636; } - yy_destructor(yypParser,353,&yymsp[-3].minor); - yy_destructor(yypParser,354,&yymsp[-2].minor); - yy_destructor(yypParser,355,&yymsp[-1].minor); + case 429: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ +{ yymsp[-5].minor.yy686 = yymsp[-4].minor.yy686; } + yy_destructor(yypParser,349,&yymsp[-3].minor); + yy_destructor(yypParser,350,&yymsp[-2].minor); + yy_destructor(yypParser,351,&yymsp[-1].minor); break; - case 437: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ - case 441: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==441); -{ yymsp[-1].minor.yy636 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); } + case 433: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ + case 437: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==437); +{ yymsp[-1].minor.yy686 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); } break; - case 438: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - case 442: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==442); -{ yymsp[-3].minor.yy636 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } + case 434: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ + case 438: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==438); +{ yymsp[-3].minor.yy686 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 439: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - case 443: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==443); -{ yymsp[-3].minor.yy636 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); } + case 435: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + case 439: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==439); +{ yymsp[-3].minor.yy686 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); } break; - case 444: /* subquery ::= NK_LP query_expression NK_RP */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy636); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 440: /* subquery ::= NK_LP query_expression NK_RP */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy686); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 448: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */ -{ yylhsminor.yy636 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), yymsp[-1].minor.yy430, yymsp[0].minor.yy185); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 444: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */ +{ yylhsminor.yy686 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), yymsp[-1].minor.yy428, yymsp[0].minor.yy109); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 449: /* ordering_specification_opt ::= */ -{ yymsp[1].minor.yy430 = ORDER_ASC; } + case 445: /* ordering_specification_opt ::= */ +{ yymsp[1].minor.yy428 = ORDER_ASC; } break; - case 450: /* ordering_specification_opt ::= ASC */ -{ yymsp[0].minor.yy430 = ORDER_ASC; } + case 446: /* ordering_specification_opt ::= ASC */ +{ yymsp[0].minor.yy428 = ORDER_ASC; } break; - case 451: /* ordering_specification_opt ::= DESC */ -{ yymsp[0].minor.yy430 = ORDER_DESC; } + case 447: /* ordering_specification_opt ::= DESC */ +{ yymsp[0].minor.yy428 = ORDER_DESC; } break; - case 452: /* null_ordering_opt ::= */ -{ yymsp[1].minor.yy185 = NULL_ORDER_DEFAULT; } + case 448: /* null_ordering_opt ::= */ +{ yymsp[1].minor.yy109 = NULL_ORDER_DEFAULT; } break; - case 453: /* null_ordering_opt ::= NULLS FIRST */ -{ yymsp[-1].minor.yy185 = NULL_ORDER_FIRST; } + case 449: /* null_ordering_opt ::= NULLS FIRST */ +{ yymsp[-1].minor.yy109 = NULL_ORDER_FIRST; } break; - case 454: /* null_ordering_opt ::= NULLS LAST */ -{ yymsp[-1].minor.yy185 = NULL_ORDER_LAST; } + case 450: /* null_ordering_opt ::= NULLS LAST */ +{ yymsp[-1].minor.yy109 = NULL_ORDER_LAST; } break; default: break; /********** End reduce actions ************************************************/ }; - assert( yyrulenocreateTableBuilder("performance_schema", "streams", TSDB_SYSTEM_TABLE, 1) - .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); + .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); builder.done(); } } @@ -157,23 +157,29 @@ void generateTestST1(MockCatalogService* mcs) { mcs->createSubTable("test", "st1", "st1s3", 1); } +void generateFunctions(MockCatalogService* mcs) { + mcs->createFunction("udf1", TSDB_FUNC_TYPE_SCALAR, TSDB_DATA_TYPE_INT, tDataTypes[TSDB_DATA_TYPE_INT].bytes, 0); + mcs->createFunction("udf2", TSDB_FUNC_TYPE_AGGREGATE, TSDB_DATA_TYPE_DOUBLE, tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, + 8); +} + } // namespace int32_t __catalogGetHandle(const char* clusterId, struct SCatalog** catalogHandle) { return 0; } int32_t __catalogGetTableMeta(struct SCatalog* pCatalog, void* pRpc, const SEpSet* pMgmtEps, const SName* pTableName, STableMeta** pTableMeta) { - return mockCatalogService->catalogGetTableMeta(pTableName, pTableMeta); + return g_mockCatalogService->catalogGetTableMeta(pTableName, pTableMeta); } int32_t __catalogGetTableHashVgroup(struct SCatalog* pCatalog, void* pRpc, const SEpSet* pMgmtEps, const SName* pTableName, SVgroupInfo* vgInfo) { - return mockCatalogService->catalogGetTableHashVgroup(pTableName, vgInfo); + return g_mockCatalogService->catalogGetTableHashVgroup(pTableName, vgInfo); } int32_t __catalogGetTableDistVgInfo(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, const SName* pTableName, SArray** pVgList) { - return mockCatalogService->catalogGetTableDistVgInfo(pTableName, pVgList); + return g_mockCatalogService->catalogGetTableDistVgInfo(pTableName, pVgList); } int32_t __catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* version, int64_t* dbId, @@ -182,8 +188,8 @@ int32_t __catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* ve } int32_t __catalogGetDBVgInfo(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, const char* dbFName, - SArray** vgroupList) { - return 0; + SArray** pVgList) { + return g_mockCatalogService->catalogGetDBVgInfo(dbFName, pVgList); } int32_t __catalogGetDBCfg(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, const char* dbFName, SDbCfgInfo* pDbCfg) { @@ -196,8 +202,13 @@ int32_t __catalogChkAuth(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, con return 0; } +int32_t __catalogGetUdfInfo(SCatalog* pCtg, void* pTrans, const SEpSet* pMgmtEps, const char* funcName, + SFuncInfo* pInfo) { + return g_mockCatalogService->catalogGetUdfInfo(funcName, pInfo); +} + void initMetaDataEnv() { - mockCatalogService.reset(new MockCatalogService()); + g_mockCatalogService.reset(new MockCatalogService()); static Stub stub; stub.set(catalogGetHandle, __catalogGetHandle); @@ -209,6 +220,7 @@ void initMetaDataEnv() { stub.set(catalogGetDBVgInfo, __catalogGetDBVgInfo); stub.set(catalogGetDBCfg, __catalogGetDBCfg); stub.set(catalogChkAuth, __catalogChkAuth); + stub.set(catalogGetUdfInfo, __catalogGetUdfInfo); // { // AddrAny any("libcatalog.so"); // std::map result; @@ -252,11 +264,12 @@ void initMetaDataEnv() { } void generateMetaData() { - generateInformationSchema(mockCatalogService.get()); - generatePerformanceSchema(mockCatalogService.get()); - generateTestT1(mockCatalogService.get()); - generateTestST1(mockCatalogService.get()); - mockCatalogService->showTables(); + generateInformationSchema(g_mockCatalogService.get()); + generatePerformanceSchema(g_mockCatalogService.get()); + generateTestT1(g_mockCatalogService.get()); + generateTestST1(g_mockCatalogService.get()); + generateFunctions(g_mockCatalogService.get()); + g_mockCatalogService->showTables(); } -void destroyMetaDataEnv() { mockCatalogService.reset(); } +void destroyMetaDataEnv() { g_mockCatalogService.reset(); } diff --git a/source/libs/parser/test/mockCatalogService.cpp b/source/libs/parser/test/mockCatalogService.cpp index f86cecb9e3399bf6b5b55c59adcc6b99e1950468..4834d2d37711d537d09d0e1b12e2bd8dc9697827 100644 --- a/source/libs/parser/test/mockCatalogService.cpp +++ b/source/libs/parser/test/mockCatalogService.cpp @@ -18,12 +18,13 @@ #include #include #include +#include #include "tdatablock.h" #include "tname.h" #include "ttypes.h" -std::unique_ptr mockCatalogService; +std::unique_ptr g_mockCatalogService; class TableBuilder : public ITableBuilder { public: @@ -120,6 +121,57 @@ class MockCatalogServiceImpl { return copyTableVgroup(db, tNameGetTableName(pTableName), vgList); } + int32_t catalogGetDBVgInfo(const char* pDbFName, SArray** pVgList) const { + std::string dbFName(pDbFName); + DbMetaCache::const_iterator it = meta_.find(dbFName.substr(std::string(pDbFName).find_last_of('.') + 1)); + if (meta_.end() == it) { + return TSDB_CODE_FAILED; + } + std::set vgSet; + *pVgList = taosArrayInit(it->second.size(), sizeof(SVgroupInfo)); + for (const auto& vgs : it->second) { + for (const auto& vg : vgs.second->vgs) { + if (0 == vgSet.count(vg.vgId)) { + taosArrayPush(*pVgList, &vg); + vgSet.insert(vg.vgId); + } + } + } + return TSDB_CODE_SUCCESS; + } + + int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const { + auto it = udf_.find(funcName); + if (udf_.end() == it) { + return TSDB_CODE_FAILED; + } + memcpy(pInfo, it->second.get(), sizeof(SFuncInfo)); + return TSDB_CODE_SUCCESS; + } + + int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const { + int32_t code = getAllTableMeta(pCatalogReq->pTableMeta, &pMetaData->pTableMeta); + if (TSDB_CODE_SUCCESS == code) { + code = getAllTableVgroup(pCatalogReq->pTableHash, &pMetaData->pTableHash); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllDbVgroup(pCatalogReq->pDbVgroup, &pMetaData->pDbVgroup); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllDbCfg(pCatalogReq->pDbCfg, &pMetaData->pDbCfg); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllDbInfo(pCatalogReq->pDbInfo, &pMetaData->pDbInfo); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllUserAuth(pCatalogReq->pUser, &pMetaData->pUser); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllUdf(pCatalogReq->pUdf, &pMetaData->pUdfList); + } + return code; + } + TableBuilder& createTableBuilder(const std::string& db, const std::string& tbname, int8_t tableType, int32_t numOfColumns, int32_t numOfTags) { builder_ = TableBuilder::createTableBuilder(tableType, numOfColumns, numOfTags); @@ -155,9 +207,9 @@ class MockCatalogServiceImpl { // number of backward fills #define NOB(n) ((n) % 2 ? (n) / 2 + 1 : (n) / 2) // center aligned -#define CA(n, s) \ - std::setw(NOF((n) - (s).length())) << "" << (s) << std::setw(NOB((n) - (s).length())) << "" \ - << "|" +#define CA(n, s) \ + std::setw(NOF((n) - int((s).length()))) << "" << (s) << std::setw(NOB((n) - int((s).length()))) << "" \ + << "|" // string field length #define SFL 20 // string field header @@ -203,21 +255,23 @@ class MockCatalogServiceImpl { } } - std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const { - DbMetaCache::const_iterator it = meta_.find(db); - if (meta_.end() == it) { - return std::shared_ptr(); - } - TableMetaCache::const_iterator tit = it->second.find(tbname); - if (it->second.end() == tit) { - return std::shared_ptr(); - } - return tit->second; + void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize) { + std::shared_ptr info(new SFuncInfo); + strcpy(info->name, func.c_str()); + info->funcType = funcType; + info->scriptType = TSDB_FUNC_SCRIPT_BIN_LIB; + info->outputType = outputType; + info->outputLen = outputLen; + info->bufSize = bufSize; + info->pCode = nullptr; + info->pComment = nullptr; + udf_.insert(std::make_pair(func, info)); } private: typedef std::map> TableMetaCache; typedef std::map DbMetaCache; + typedef std::map> UdfMetaCache; std::string toDbname(const std::string& dbFullName) const { std::string::size_type n = dbFullName.find("."); @@ -300,9 +354,128 @@ class MockCatalogServiceImpl { return TSDB_CODE_SUCCESS; } + std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const { + DbMetaCache::const_iterator it = meta_.find(db); + if (meta_.end() == it) { + return std::shared_ptr(); + } + TableMetaCache::const_iterator tit = it->second.find(tbname); + if (it->second.end() == tit) { + return std::shared_ptr(); + } + return tit->second; + } + + int32_t getAllTableMeta(SArray* pTableMetaReq, SArray** pTableMetaData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pTableMetaReq) { + int32_t ntables = taosArrayGetSize(pTableMetaReq); + *pTableMetaData = taosArrayInit(ntables, POINTER_BYTES); + for (int32_t i = 0; i < ntables; ++i) { + STableMeta* pMeta = NULL; + code = catalogGetTableMeta((const SName*)taosArrayGet(pTableMetaReq, i), &pMeta); + if (TSDB_CODE_SUCCESS == code) { + taosArrayPush(*pTableMetaData, &pMeta); + } else { + break; + } + } + } + return code; + } + + int32_t getAllTableVgroup(SArray* pTableVgroupReq, SArray** pTableVgroupData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pTableVgroupReq) { + int32_t ntables = taosArrayGetSize(pTableVgroupReq); + *pTableVgroupData = taosArrayInit(ntables, sizeof(SVgroupInfo)); + for (int32_t i = 0; i < ntables; ++i) { + SVgroupInfo vgInfo = {0}; + code = catalogGetTableHashVgroup((const SName*)taosArrayGet(pTableVgroupReq, i), &vgInfo); + if (TSDB_CODE_SUCCESS == code) { + taosArrayPush(*pTableVgroupData, &vgInfo); + } else { + break; + } + } + } + return code; + } + + int32_t getAllDbVgroup(SArray* pDbVgroupReq, SArray** pDbVgroupData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pDbVgroupReq) { + int32_t ndbs = taosArrayGetSize(pDbVgroupReq); + *pDbVgroupData = taosArrayInit(ndbs, POINTER_BYTES); + for (int32_t i = 0; i < ndbs; ++i) { + int64_t zeroVg = 0; + taosArrayPush(*pDbVgroupData, &zeroVg); + } + } + return code; + } + + int32_t getAllDbCfg(SArray* pDbCfgReq, SArray** pDbCfgData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pDbCfgReq) { + int32_t ndbs = taosArrayGetSize(pDbCfgReq); + *pDbCfgData = taosArrayInit(ndbs, sizeof(SDbCfgInfo)); + for (int32_t i = 0; i < ndbs; ++i) { + SDbCfgInfo dbCfg = {0}; + taosArrayPush(*pDbCfgData, &dbCfg); + } + } + return code; + } + + int32_t getAllDbInfo(SArray* pDbInfoReq, SArray** pDbInfoData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pDbInfoReq) { + int32_t ndbs = taosArrayGetSize(pDbInfoReq); + *pDbInfoData = taosArrayInit(ndbs, sizeof(SDbCfgInfo)); + for (int32_t i = 0; i < ndbs; ++i) { + SDbInfo dbInfo = {0}; + taosArrayPush(*pDbInfoData, &dbInfo); + } + } + return code; + } + + int32_t getAllUserAuth(SArray* pUserAuthReq, SArray** pUserAuthData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pUserAuthReq) { + int32_t num = taosArrayGetSize(pUserAuthReq); + *pUserAuthData = taosArrayInit(num, sizeof(bool)); + for (int32_t i = 0; i < num; ++i) { + bool pass = true; + taosArrayPush(*pUserAuthData, &pass); + } + } + return code; + } + + int32_t getAllUdf(SArray* pUdfReq, SArray** pUdfData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pUdfReq) { + int32_t num = taosArrayGetSize(pUdfReq); + *pUdfData = taosArrayInit(num, sizeof(SFuncInfo)); + for (int32_t i = 0; i < num; ++i) { + SFuncInfo info = {0}; + code = catalogGetUdfInfo((char*)taosArrayGet(pUdfReq, i), &info); + if (TSDB_CODE_SUCCESS == code) { + taosArrayPush(*pUdfData, &info); + } else { + break; + } + } + } + return code; + } + uint64_t id_; std::unique_ptr builder_; DbMetaCache meta_; + UdfMetaCache udf_; }; MockCatalogService::MockCatalogService() : impl_(new MockCatalogServiceImpl()) {} @@ -321,9 +494,9 @@ void MockCatalogService::createSubTable(const std::string& db, const std::string void MockCatalogService::showTables() const { impl_->showTables(); } -std::shared_ptr MockCatalogService::getTableMeta(const std::string& db, - const std::string& tbname) const { - return impl_->getTableMeta(db, tbname); +void MockCatalogService::createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, + int32_t bufSize) { + impl_->createFunction(func, funcType, outputType, outputLen, bufSize); } int32_t MockCatalogService::catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const { @@ -337,3 +510,15 @@ int32_t MockCatalogService::catalogGetTableHashVgroup(const SName* pTableName, S int32_t MockCatalogService::catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const { return impl_->catalogGetTableDistVgInfo(pTableName, pVgList); } + +int32_t MockCatalogService::catalogGetDBVgInfo(const char* pDbFName, SArray** pVgList) const { + return impl_->catalogGetDBVgInfo(pDbFName, pVgList); +} + +int32_t MockCatalogService::catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const { + return impl_->catalogGetUdfInfo(funcName, pInfo); +} + +int32_t MockCatalogService::catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const { + return impl_->catalogGetAllMeta(pCatalogReq, pMetaData); +} diff --git a/source/libs/parser/test/mockCatalogService.h b/source/libs/parser/test/mockCatalogService.h index edfc40dbc2114611707276d34bbc491714152b26..133a355c591f80f130fd8fe47d444780b88cc660 100644 --- a/source/libs/parser/test/mockCatalogService.h +++ b/source/libs/parser/test/mockCatalogService.h @@ -56,16 +56,19 @@ class MockCatalogService { int32_t numOfColumns, int32_t numOfTags = 0); void createSubTable(const std::string& db, const std::string& stbname, const std::string& tbname, int16_t vgid); void showTables() const; - std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const; + void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize); int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const; int32_t catalogGetTableHashVgroup(const SName* pTableName, SVgroupInfo* vgInfo) const; int32_t catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const; + int32_t catalogGetDBVgInfo(const char* pDbFName, SArray** pVgList) const; + int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const; + int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const; private: std::unique_ptr impl_; }; -extern std::unique_ptr mockCatalogService; +extern std::unique_ptr g_mockCatalogService; #endif // MOCK_CATALOG_SERVICE_H diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index a5e7ef51a797a01ff404dc01275ded61534fde33..65d5194936811a856ef7e36de2f249e0e8bda63b 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -228,7 +228,44 @@ TEST_F(ParserInitialCTest, createDnode) { run("CREATE DNODE 1.1.1.1 PORT 9000"); } -// todo CREATE FUNCTION +// CREATE [AGGREGATE] FUNCTION [IF NOT EXISTS] func_name AS library_path OUTPUTTYPE type_name [BUFSIZE value] +TEST_F(ParserInitialCTest, createFunction) { + useDb("root", "test"); + + SCreateFuncReq expect = {0}; + + auto setCreateFuncReqFunc = [&](const char* pUdfName, int8_t outputType, int32_t outputBytes = 0, + int8_t funcType = TSDB_FUNC_TYPE_SCALAR, int8_t igExists = 0, int32_t bufSize = 0) { + memset(&expect, 0, sizeof(SCreateFuncReq)); + strcpy(expect.name, pUdfName); + expect.igExists = igExists; + expect.funcType = funcType; + expect.scriptType = TSDB_FUNC_SCRIPT_BIN_LIB; + expect.outputType = outputType; + expect.outputLen = outputBytes > 0 ? outputBytes : tDataTypes[outputType].bytes; + expect.bufSize = bufSize; + }; + + setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) { + ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_FUNCTION_STMT); + SCreateFuncReq req = {0}; + ASSERT_TRUE(TSDB_CODE_SUCCESS == tDeserializeSCreateFuncReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req)); + + ASSERT_EQ(std::string(req.name), std::string(expect.name)); + ASSERT_EQ(req.igExists, expect.igExists); + ASSERT_EQ(req.funcType, expect.funcType); + ASSERT_EQ(req.scriptType, expect.scriptType); + ASSERT_EQ(req.outputType, expect.outputType); + ASSERT_EQ(req.outputLen, expect.outputLen); + ASSERT_EQ(req.bufSize, expect.bufSize); + }); + + setCreateFuncReqFunc("udf1", TSDB_DATA_TYPE_INT); + // run("CREATE FUNCTION udf1 AS './build/lib/libudf1.so' OUTPUTTYPE INT"); + + setCreateFuncReqFunc("udf2", TSDB_DATA_TYPE_DOUBLE, 0, TSDB_FUNC_TYPE_AGGREGATE, 1, 8); + // run("CREATE AGGREGATE FUNCTION IF NOT EXISTS udf2 AS './build/lib/libudf2.so' OUTPUTTYPE DOUBLE BUFSIZE 8"); +} TEST_F(ParserInitialCTest, createIndexSma) { useDb("root", "test"); @@ -261,14 +298,12 @@ TEST_F(ParserInitialCTest, createStable) { auto setCreateStbReqFunc = [&](const char* pTbname, int8_t igExists = 0, float xFilesFactor = TSDB_DEFAULT_ROLLUP_FILE_FACTOR, - int32_t delay = TSDB_DEFAULT_ROLLUP_DELAY, int32_t ttl = TSDB_DEFAULT_TABLE_TTL, - const char* pComment = nullptr) { + int32_t ttl = TSDB_DEFAULT_TABLE_TTL, const char* pComment = nullptr) { memset(&expect, 0, sizeof(SMCreateStbReq)); int32_t len = snprintf(expect.name, sizeof(expect.name), "0.test.%s", pTbname); expect.name[len] = '\0'; expect.igExists = igExists; expect.xFilesFactor = xFilesFactor; - expect.delay = delay; expect.ttl = ttl; if (nullptr != pComment) { expect.comment = strdup(pComment); @@ -356,7 +391,7 @@ TEST_F(ParserInitialCTest, createStable) { addFieldToCreateStbReqFunc(false, "id", TSDB_DATA_TYPE_INT); run("CREATE STABLE t1(ts TIMESTAMP, c1 INT) TAGS(id INT)"); - setCreateStbReqFunc("t1", 1, 0.1, 2, 100, "test create table"); + setCreateStbReqFunc("t1", 1, 0.1, 100, "test create table"); addFieldToCreateStbReqFunc(true, "ts", TSDB_DATA_TYPE_TIMESTAMP, 0, 0); addFieldToCreateStbReqFunc(true, "c1", TSDB_DATA_TYPE_INT); addFieldToCreateStbReqFunc(true, "c2", TSDB_DATA_TYPE_UINT); @@ -394,7 +429,7 @@ TEST_F(ParserInitialCTest, createStable) { "TAGS (a1 TIMESTAMP, a2 INT, a3 INT UNSIGNED, a4 BIGINT, a5 BIGINT UNSIGNED, a6 FLOAT, a7 DOUBLE, " "a8 BINARY(20), a9 SMALLINT, a10 SMALLINT UNSIGNED COMMENT 'test column comment', a11 TINYINT, " "a12 TINYINT UNSIGNED, a13 BOOL, a14 NCHAR(30), a15 VARCHAR(50)) " - "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1 DELAY 2"); + "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1"); } TEST_F(ParserInitialCTest, createStream) { @@ -427,7 +462,7 @@ TEST_F(ParserInitialCTest, createTable) { "TAGS (a1 TIMESTAMP, a2 INT, a3 INT UNSIGNED, a4 BIGINT, a5 BIGINT UNSIGNED, a6 FLOAT, a7 DOUBLE, a8 BINARY(20), " "a9 SMALLINT, a10 SMALLINT UNSIGNED COMMENT 'test column comment', a11 TINYINT, a12 TINYINT UNSIGNED, a13 BOOL, " "a14 NCHAR(30), a15 VARCHAR(50)) " - "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1 DELAY 2"); + "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1"); run("CREATE TABLE IF NOT EXISTS t1 USING st1 TAGS(1, 'wxy')"); @@ -440,13 +475,62 @@ TEST_F(ParserInitialCTest, createTable) { TEST_F(ParserInitialCTest, createTopic) { useDb("root", "test"); + SCMCreateTopicReq expect = {0}; + + auto setCreateTopicReqFunc = [&](const char* pTopicName, int8_t igExists, const char* pSql, const char* pAst, + const char* pDbName = nullptr, const char* pTbname = nullptr) { + memset(&expect, 0, sizeof(SMCreateStbReq)); + snprintf(expect.name, sizeof(expect.name), "0.%s", pTopicName); + expect.igExists = igExists; + expect.sql = (char*)pSql; + if (nullptr != pTbname) { + expect.subType = TOPIC_SUB_TYPE__TABLE; + snprintf(expect.subStbName, sizeof(expect.subStbName), "0.%s.%s", pDbName, pTbname); + } else if (nullptr != pAst) { + expect.subType = TOPIC_SUB_TYPE__COLUMN; + expect.ast = (char*)pAst; + } else { + expect.subType = TOPIC_SUB_TYPE__DB; + snprintf(expect.subDbName, sizeof(expect.subDbName), "0.%s", pDbName); + } + }; + + setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) { + ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_TOPIC_STMT); + SCMCreateTopicReq req = {0}; + ASSERT_TRUE(TSDB_CODE_SUCCESS == + tDeserializeSCMCreateTopicReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req)); + + ASSERT_EQ(std::string(req.name), std::string(expect.name)); + ASSERT_EQ(req.igExists, expect.igExists); + ASSERT_EQ(req.subType, expect.subType); + ASSERT_EQ(std::string(req.sql), std::string(expect.sql)); + switch (expect.subType) { + case TOPIC_SUB_TYPE__DB: + ASSERT_EQ(std::string(req.subDbName), std::string(expect.subDbName)); + break; + case TOPIC_SUB_TYPE__TABLE: + ASSERT_EQ(std::string(req.subStbName), std::string(expect.subStbName)); + break; + case TOPIC_SUB_TYPE__COLUMN: + ASSERT_NE(req.ast, nullptr); + break; + default: + ASSERT_TRUE(false); + } + }); + + setCreateTopicReqFunc("tp1", 0, "create topic tp1 as select * from t1", "ast"); run("CREATE TOPIC tp1 AS SELECT * FROM t1"); - run("CREATE TOPIC IF NOT EXISTS tp1 AS SELECT * FROM t1"); + setCreateTopicReqFunc("tp1", 1, "create topic if not exists tp1 as select ts, ceil(c1) from t1", "ast"); + run("CREATE TOPIC IF NOT EXISTS tp1 AS SELECT ts, CEIL(c1) FROM t1"); - run("CREATE TOPIC tp1 AS test"); + setCreateTopicReqFunc("tp1", 0, "create topic tp1 as database test", nullptr, "test"); + run("CREATE TOPIC tp1 AS DATABASE test"); - run("CREATE TOPIC IF NOT EXISTS tp1 AS test"); + setCreateTopicReqFunc("tp1", 1, "create topic if not exists tp1 as stable st1", nullptr, "test", "st1"); + run("CREATE TOPIC IF NOT EXISTS tp1 AS STABLE st1"); } TEST_F(ParserInitialCTest, createUser) { diff --git a/source/libs/parser/test/parInitialDTest.cpp b/source/libs/parser/test/parInitialDTest.cpp index 7cf3337fea3c29afcd0eaac8d6bd160c5ec9aacd..5ad427d964ad1dc47a4fed64b51f89257ae53da6 100644 --- a/source/libs/parser/test/parInitialDTest.cpp +++ b/source/libs/parser/test/parInitialDTest.cpp @@ -32,7 +32,7 @@ TEST_F(ParserInitialDTest, dropBnode) { run("DROP BNODE ON DNODE 1"); } -// DROP CGROUP [ IF EXISTS ] cgroup_name ON topic_name +// DROP CONSUMER GROUP [ IF EXISTS ] cgroup_name ON topic_name TEST_F(ParserInitialDTest, dropCGroup) { useDb("root", "test"); @@ -56,10 +56,10 @@ TEST_F(ParserInitialDTest, dropCGroup) { }); setDropCgroupReqFunc("tp1", "cg1"); - run("DROP CGROUP cg1 ON tp1"); + run("DROP CONSUMER GROUP cg1 ON tp1"); setDropCgroupReqFunc("tp1", "cg1", 1); - run("DROP CGROUP IF EXISTS cg1 ON tp1"); + run("DROP CONSUMER GROUP IF EXISTS cg1 ON tp1"); } // todo drop database @@ -103,6 +103,7 @@ TEST_F(ParserInitialDTest, dropTopic) { } TEST_F(ParserInitialDTest, dropUser) { + login("root"); useDb("root", "test"); run("drop user wxy"); diff --git a/source/libs/parser/test/parInsertTest.cpp b/source/libs/parser/test/parInsertTest.cpp index 7fafec88824111ef8b170ba25f3b092fd7ba1f1a..4d313fca766e8ab8f8d6ba404f7faf2fe833e9e6 100644 --- a/source/libs/parser/test/parInsertTest.cpp +++ b/source/libs/parser/test/parInsertTest.cpp @@ -15,6 +15,7 @@ #include +#include "mockCatalogService.h" #include "os.h" #include "parInt.h" @@ -57,6 +58,38 @@ class InsertTest : public Test { return code_; } + int32_t runAsync() { + code_ = parseInsertSyntax(&cxt_, &res_); + if (code_ != TSDB_CODE_SUCCESS) { + cout << "parseInsertSyntax code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; + return code_; + } + + SCatalogReq catalogReq = {0}; + code_ = buildCatalogReq(res_->pMetaCache, &catalogReq); + if (code_ != TSDB_CODE_SUCCESS) { + cout << "buildCatalogReq code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; + return code_; + } + + SMetaData metaData = {0}; + g_mockCatalogService->catalogGetAllMeta(&catalogReq, &metaData); + + code_ = putMetaDataToCache(&catalogReq, &metaData, res_->pMetaCache); + if (code_ != TSDB_CODE_SUCCESS) { + cout << "putMetaDataToCache code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; + return code_; + } + + code_ = parseInsertSql(&cxt_, &res_); + if (code_ != TSDB_CODE_SUCCESS) { + cout << "parseInsertSql code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; + return code_; + } + + return code_; + } + void dumpReslut() { SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_); size_t num = taosArrayGetSize(pStmt->pDataBlocks); @@ -125,7 +158,7 @@ class InsertTest : public Test { SQuery* res_; }; -// INSERT INTO tb_name VALUES (field1_value, ...) +// INSERT INTO tb_name [(field1_name, ...)] VALUES (field1_value, ...) TEST_F(InsertTest, singleTableSingleRowTest) { setDatabase("root", "test"); @@ -133,6 +166,17 @@ TEST_F(InsertTest, singleTableSingleRowTest) { ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(1, 1); + + bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)"); + ASSERT_EQ(run(), TSDB_CODE_SUCCESS); + + bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); + dumpReslut(); + checkReslut(1, 1); + + bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } // INSERT INTO tb_name VALUES (field1_value, ...)(field1_value, ...) @@ -140,11 +184,16 @@ TEST_F(InsertTest, singleTableMultiRowTest) { setDatabase("root", "test"); bind( - "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)(now+2s, 3, 'guangzhou', 9, " - "10, 11)"); + "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)" + "(now+2s, 3, 'guangzhou', 9, 10, 11)"); ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(1, 3); + + bind( + "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)" + "(now+2s, 3, 'guangzhou', 9, 10, 11)"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } // INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...) @@ -155,6 +204,9 @@ TEST_F(InsertTest, multiTableSingleRowTest) { ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(2, 1); + + bind("insert into st1s1 values (now, 1, \"beijing\") st1s2 values (now, 10, \"131028\")"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } // INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...) @@ -167,6 +219,11 @@ TEST_F(InsertTest, multiTableMultiRowTest) { ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(2, 3, 2); + + bind( + "insert into st1s1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")" + " st1s2 values (now, 10, \"131028\")(now+1s, 20, \"132028\")"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } // INSERT INTO @@ -181,6 +238,21 @@ TEST_F(InsertTest, autoCreateTableTest) { ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(1, 3); + + bind( + "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")" + "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"); + ASSERT_EQ(run(), TSDB_CODE_SUCCESS); + + bind( + "insert into st1s1 using st1 tags(1, 'wxy') values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, " + "\"guangzhou\")"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); + + bind( + "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")" + "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } TEST_F(InsertTest, toleranceTest) { @@ -190,4 +262,9 @@ TEST_F(InsertTest, toleranceTest) { ASSERT_NE(run(), TSDB_CODE_SUCCESS); bind("insert into t"); ASSERT_NE(run(), TSDB_CODE_SUCCESS); + + bind("insert into"); + ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS); + bind("insert into t"); + ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS); } diff --git a/source/libs/parser/test/parSelectTest.cpp b/source/libs/parser/test/parSelectTest.cpp index f00500faa4963f4efef561bce103658585a029a6..a5192595f0be83afa459429748dab3d8e9b65c4e 100644 --- a/source/libs/parser/test/parSelectTest.cpp +++ b/source/libs/parser/test/parSelectTest.cpp @@ -44,6 +44,8 @@ TEST_F(ParserSelectTest, constant) { "timestamp '2022-02-09 17:30:20', true, false, 15s FROM t1"); run("SELECT 123 + 45 FROM t1 WHERE 2 - 1"); + + run("SELECT * FROM t1 WHERE -2"); } TEST_F(ParserSelectTest, expression) { @@ -76,6 +78,12 @@ TEST_F(ParserSelectTest, pseudoColumnSemanticCheck) { run("SELECT TBNAME FROM (SELECT * FROM st1s1)", TSDB_CODE_PAR_INVALID_TBNAME, PARSER_STAGE_TRANSLATE); } +TEST_F(ParserSelectTest, aggFunc) { + useDb("root", "test"); + + run("SELECT LEASTSQUARES(c1, -1, 1) FROM t1"); +} + TEST_F(ParserSelectTest, multiResFunc) { useDb("root", "test"); @@ -141,6 +149,14 @@ TEST_F(ParserSelectTest, IndefiniteRowsFuncSemanticCheck) { // run("SELECT DIFF(c1) FROM t1 INTERVAL(10s)"); } +TEST_F(ParserSelectTest, useDefinedFunc) { + useDb("root", "test"); + + run("SELECT udf1(c1) FROM t1"); + + run("SELECT udf2(c1) FROM t1 GROUP BY c2"); +} + TEST_F(ParserSelectTest, groupBy) { useDb("root", "test"); @@ -236,6 +252,8 @@ TEST_F(ParserSelectTest, semanticError) { // TSDB_CODE_PAR_AMBIGUOUS_COLUMN run("SELECT c2 FROM t1 tt1, t1 tt2 WHERE tt1.c1 = tt2.c1", TSDB_CODE_PAR_AMBIGUOUS_COLUMN, PARSER_STAGE_TRANSLATE); + run("SELECT c2 FROM (SELECT c1 c2, c2 FROM t1)", TSDB_CODE_PAR_AMBIGUOUS_COLUMN, PARSER_STAGE_TRANSLATE); + // TSDB_CODE_PAR_WRONG_VALUE_TYPE run("SELECT timestamp '2010a' FROM t1", TSDB_CODE_PAR_WRONG_VALUE_TYPE, PARSER_STAGE_TRANSLATE); diff --git a/source/libs/parser/test/parTestMain.cpp b/source/libs/parser/test/parTestMain.cpp index ebc83fb21981e56666b82ec6a5a08a63cd7f0c87..820b8cca3cdc02633982a3ea797aa605db1e3fd3 100644 --- a/source/libs/parser/test/parTestMain.cpp +++ b/source/libs/parser/test/parTestMain.cpp @@ -37,6 +37,7 @@ class ParserEnv : public testing::Environment { virtual void SetUp() { initMetaDataEnv(); generateMetaData(); + initLog(TD_TMP_DIR_PATH "td"); } virtual void TearDown() { @@ -47,16 +48,55 @@ class ParserEnv : public testing::Environment { ParserEnv() {} virtual ~ParserEnv() {} + + private: + void initLog(const char* path) { + int32_t logLevel = getLogLevel(); + dDebugFlag = logLevel; + vDebugFlag = logLevel; + mDebugFlag = logLevel; + cDebugFlag = logLevel; + jniDebugFlag = logLevel; + tmrDebugFlag = logLevel; + uDebugFlag = logLevel; + rpcDebugFlag = logLevel; + qDebugFlag = logLevel; + wDebugFlag = logLevel; + sDebugFlag = logLevel; + tsdbDebugFlag = logLevel; + tsLogEmbedded = 1; + tsAsyncLog = 0; + + taosRemoveDir(path); + taosMkDir(path); + tstrncpy(tsLogDir, path, PATH_MAX); + if (taosInitLog("taoslog", 1) != 0) { + std::cout << "failed to init log file" << std::endl; + } + } }; static void parseArg(int argc, char* argv[]) { - int opt = 0; - const char* optstring = ""; - static struct option long_options[] = {{"dump", no_argument, NULL, 'd'}, {0, 0, 0, 0}}; + int opt = 0; + const char* optstring = ""; + // clang-format off + static struct option long_options[] = { + {"dump", no_argument, NULL, 'd'}, + {"async", required_argument, NULL, 'a'}, + {"skipSql", required_argument, NULL, 's'}, + {0, 0, 0, 0} + }; + // clang-format on while ((opt = getopt_long(argc, argv, optstring, long_options, NULL)) != -1) { switch (opt) { case 'd': - g_isDump = true; + g_dump = true; + break; + case 'a': + setAsyncFlag(optarg); + break; + case 's': + setSkipSqlNum(optarg); break; default: break; diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp index 250ac1c52885f10d45a4ef96321d410f115b9255..fab7ed35b1cb408a5cdd6f455994da07a26596fd 100644 --- a/source/libs/parser/test/parTestUtil.cpp +++ b/source/libs/parser/test/parTestUtil.cpp @@ -17,7 +17,10 @@ #include #include +#include +#include "catalog.h" +#include "mockCatalogService.h" #include "parInt.h" using namespace std; @@ -41,22 +44,40 @@ namespace ParserTest { } \ } while (0); -bool g_isDump = false; +bool g_dump = false; +bool g_testAsyncApis = true; +int32_t g_logLevel = 131; +int32_t g_skipSql = 0; + +void setAsyncFlag(const char* pFlag) { g_testAsyncApis = stoi(pFlag) > 0 ? true : false; } +void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); } struct TerminateFlag : public exception { const char* what() const throw() { return "success and terminate"; } }; +void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); } + +int32_t getLogLevel() { return g_logLevel; } + class ParserTestBaseImpl { public: ParserTestBaseImpl(ParserTestBase* pBase) : pBase_(pBase) {} + void login(const std::string& user) { caseEnv_.user_ = user; } + void useDb(const string& acctId, const string& db) { caseEnv_.acctId_ = acctId; caseEnv_.db_ = db; + caseEnv_.nsql_ = g_skipSql; } void run(const string& sql, int32_t expect, ParserStage checkStage) { + if (caseEnv_.nsql_ > 0) { + --(caseEnv_.nsql_); + return; + } + reset(expect, checkStage); try { SParseContext cxt = {0}; @@ -65,11 +86,13 @@ class ParserTestBaseImpl { SQuery* pQuery = nullptr; doParse(&cxt, &pQuery); + doAuthenticate(&cxt, pQuery); + doTranslate(&cxt, pQuery); doCalculateConstant(&cxt, pQuery); - if (g_isDump) { + if (g_dump) { dump(); } } catch (const TerminateFlag& e) { @@ -79,12 +102,20 @@ class ParserTestBaseImpl { dump(); throw; } + + if (g_testAsyncApis) { + runAsync(sql, expect, checkStage); + } } private: struct caseEnv { - string acctId_; - string db_; + string acctId_; + string user_; + string db_; + int32_t nsql_; + + caseEnv() : user_("wangxiaoyu"), nsql_(0) {} }; struct stmtEnv { @@ -144,16 +175,19 @@ class ParserTestBaseImpl { cout << res_.calcConstAst_ << endl; } - void setParseContext(const string& sql, SParseContext* pCxt) { + void setParseContext(const string& sql, SParseContext* pCxt, bool async = false) { stmtEnv_.sql_ = sql; transform(stmtEnv_.sql_.begin(), stmtEnv_.sql_.end(), stmtEnv_.sql_.begin(), ::tolower); pCxt->acctId = atoi(caseEnv_.acctId_.c_str()); pCxt->db = caseEnv_.db_.c_str(); + pCxt->pUser = caseEnv_.user_.c_str(); + pCxt->isSuperUser = caseEnv_.user_ == "root"; pCxt->pSql = stmtEnv_.sql_.c_str(); pCxt->sqlLen = stmtEnv_.sql_.length(); pCxt->pMsg = stmtEnv_.msgBuf_.data(); pCxt->msgLen = stmtEnv_.msgBuf_.max_size(); + pCxt->async = async; } void doParse(SParseContext* pCxt, SQuery** pQuery) { @@ -162,6 +196,25 @@ class ParserTestBaseImpl { res_.parsedAst_ = toString((*pQuery)->pRoot); } + void doCollectMetaKey(SParseContext* pCxt, SQuery* pQuery) { + DO_WITH_THROW(collectMetaKey, pCxt, pQuery); + ASSERT_NE(pQuery->pMetaCache, nullptr); + } + + void doBuildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) { + DO_WITH_THROW(buildCatalogReq, pMetaCache, pCatalogReq); + } + + void doGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) { + DO_WITH_THROW(g_mockCatalogService->catalogGetAllMeta, pCatalogReq, pMetaData); + } + + void doPutMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) { + DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache); + } + + void doAuthenticate(SParseContext* pCxt, SQuery* pQuery) { DO_WITH_THROW(authenticate, pCxt, pQuery); } + void doTranslate(SParseContext* pCxt, SQuery* pQuery) { DO_WITH_THROW(translate, pCxt, pQuery); checkQuery(pQuery, PARSER_STAGE_TRANSLATE); @@ -184,6 +237,59 @@ class ParserTestBaseImpl { void checkQuery(const SQuery* pQuery, ParserStage stage) { pBase_->checkDdl(pQuery, stage); } + void runAsync(const string& sql, int32_t expect, ParserStage checkStage) { + reset(expect, checkStage); + try { + SParseContext cxt = {0}; + setParseContext(sql, &cxt, true); + + SQuery* pQuery = nullptr; + doParse(&cxt, &pQuery); + + doCollectMetaKey(&cxt, pQuery); + + SCatalogReq catalogReq = {0}; + doBuildCatalogReq(pQuery->pMetaCache, &catalogReq); + + string err; + thread t1([&]() { + try { + SMetaData metaData = {0}; + doGetAllMeta(&catalogReq, &metaData); + + doPutMetaDataToCache(&catalogReq, &metaData, pQuery->pMetaCache); + + doAuthenticate(&cxt, pQuery); + + doTranslate(&cxt, pQuery); + + doCalculateConstant(&cxt, pQuery); + } catch (const TerminateFlag& e) { + // success and terminate + } catch (const runtime_error& e) { + err = e.what(); + } catch (...) { + err = "unknown error"; + } + }); + + t1.join(); + if (!err.empty()) { + throw runtime_error(err); + } + + if (g_dump) { + dump(); + } + } catch (const TerminateFlag& e) { + // success and terminate + return; + } catch (...) { + dump(); + throw; + } + } + caseEnv caseEnv_; stmtEnv stmtEnv_; stmtRes res_; @@ -194,6 +300,8 @@ ParserTestBase::ParserTestBase() : impl_(new ParserTestBaseImpl(this)) {} ParserTestBase::~ParserTestBase() {} +void ParserTestBase::login(const std::string& user) { return impl_->login(user); } + void ParserTestBase::useDb(const std::string& acctId, const std::string& db) { impl_->useDb(acctId, db); } void ParserTestBase::run(const std::string& sql, int32_t expect, ParserStage checkStage) { diff --git a/source/libs/parser/test/parTestUtil.h b/source/libs/parser/test/parTestUtil.h index c7d7ead8dbc8a5d6b7a45cde0552e9e979ea07ec..44be7a24746ecde078f69555c88e4d85344b8313 100644 --- a/source/libs/parser/test/parTestUtil.h +++ b/source/libs/parser/test/parTestUtil.h @@ -34,6 +34,7 @@ class ParserTestBase : public testing::Test { ParserTestBase(); virtual ~ParserTestBase(); + void login(const std::string& user); void useDb(const std::string& acctId, const std::string& db); void run(const std::string& sql, int32_t expect = TSDB_CODE_SUCCESS, ParserStage checkStage = PARSER_STAGE_ALL); @@ -63,7 +64,12 @@ class ParserDdlTest : public ParserTestBase { std::function checkDdl_; }; -extern bool g_isDump; +extern bool g_dump; + +extern void setAsyncFlag(const char* pFlag); +extern void setLogLevel(const char* pLogLevel); +extern int32_t getLogLevel(); +extern void setSkipSqlNum(const char* pNum); } // namespace ParserTest diff --git a/source/libs/planner/inc/planInt.h b/source/libs/planner/inc/planInt.h index 6a18a267e2e3909fa57afc3af99105c0663b5caa..1a8c7657df4abc1661e42ea6275281981ee79086 100644 --- a/source/libs/planner/inc/planInt.h +++ b/source/libs/planner/inc/planInt.h @@ -36,6 +36,7 @@ extern "C" { #define planTrace(param, ...) qTrace("PLAN: " param, __VA_ARGS__) int32_t generateUsageErrMsg(char* pBuf, int32_t len, int32_t errCode, ...); +int32_t createColumnByRewriteExps(SNodeList* pExprs, SNodeList** pList); int32_t createLogicPlan(SPlanContext* pCxt, SLogicNode** pLogicNode); int32_t optimizeLogicPlan(SPlanContext* pCxt, SLogicNode* pLogicNode); diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 467b26b7c4af61a8f0cca3d706f34c0133995fe3..1cf7ae22f9eb5e64220ae443d5353df062a148a4 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -124,6 +124,7 @@ static int32_t createChildLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelec SLogicNode* pNode = NULL; int32_t code = func(pCxt, pSelect, &pNode); if (TSDB_CODE_SUCCESS == code && NULL != pNode) { + pNode->precision = pSelect->precision; code = pushLogicNode(pCxt, pRoot, pNode); } if (TSDB_CODE_SUCCESS != code) { @@ -132,56 +133,56 @@ static int32_t createChildLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelec return code; } -typedef struct SCreateColumnCxt { - int32_t errCode; - SNodeList* pList; -} SCreateColumnCxt; - -static EDealRes doCreateColumn(SNode* pNode, void* pContext) { - SCreateColumnCxt* pCxt = (SCreateColumnCxt*)pContext; - switch (nodeType(pNode)) { - case QUERY_NODE_COLUMN: { - SNode* pCol = nodesCloneNode(pNode); - if (NULL == pCol) { - return DEAL_RES_ERROR; - } - return (TSDB_CODE_SUCCESS == nodesListAppend(pCxt->pList, pCol) ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR); - } - case QUERY_NODE_OPERATOR: - case QUERY_NODE_LOGIC_CONDITION: - case QUERY_NODE_FUNCTION: { - SExprNode* pExpr = (SExprNode*)pNode; - SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); - if (NULL == pCol) { - return DEAL_RES_ERROR; - } - pCol->node.resType = pExpr->resType; - strcpy(pCol->colName, pExpr->aliasName); - return (TSDB_CODE_SUCCESS == nodesListAppend(pCxt->pList, pCol) ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR); - } - default: - break; - } - - return DEAL_RES_CONTINUE; -} - -static int32_t createColumnByRewriteExps(SLogicPlanContext* pCxt, SNodeList* pExprs, SNodeList** pList) { - SCreateColumnCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pList = (NULL == *pList ? nodesMakeList() : *pList)}; - if (NULL == cxt.pList) { - return TSDB_CODE_OUT_OF_MEMORY; - } - - nodesWalkExprs(pExprs, doCreateColumn, &cxt); - if (TSDB_CODE_SUCCESS != cxt.errCode) { - nodesDestroyList(cxt.pList); - return cxt.errCode; - } - if (NULL == *pList) { - *pList = cxt.pList; - } - return cxt.errCode; -} +// typedef struct SCreateColumnCxt { +// int32_t errCode; +// SNodeList* pList; +// } SCreateColumnCxt; + +// static EDealRes doCreateColumn(SNode* pNode, void* pContext) { +// SCreateColumnCxt* pCxt = (SCreateColumnCxt*)pContext; +// switch (nodeType(pNode)) { +// case QUERY_NODE_COLUMN: { +// SNode* pCol = nodesCloneNode(pNode); +// if (NULL == pCol) { +// return DEAL_RES_ERROR; +// } +// return (TSDB_CODE_SUCCESS == nodesListAppend(pCxt->pList, pCol) ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR); +// } +// case QUERY_NODE_OPERATOR: +// case QUERY_NODE_LOGIC_CONDITION: +// case QUERY_NODE_FUNCTION: { +// SExprNode* pExpr = (SExprNode*)pNode; +// SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); +// if (NULL == pCol) { +// return DEAL_RES_ERROR; +// } +// pCol->node.resType = pExpr->resType; +// strcpy(pCol->colName, pExpr->aliasName); +// return (TSDB_CODE_SUCCESS == nodesListAppend(pCxt->pList, pCol) ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR); +// } +// default: +// break; +// } + +// return DEAL_RES_CONTINUE; +// } + +// static int32_t createColumnByRewriteExps(SNodeList* pExprs, SNodeList** pList) { +// SCreateColumnCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pList = (NULL == *pList ? nodesMakeList() : *pList)}; +// if (NULL == cxt.pList) { +// return TSDB_CODE_OUT_OF_MEMORY; +// } + +// nodesWalkExprs(pExprs, doCreateColumn, &cxt); +// if (TSDB_CODE_SUCCESS != cxt.errCode) { +// nodesDestroyList(cxt.pList); +// return cxt.errCode; +// } +// if (NULL == *pList) { +// *pList = cxt.pList; +// } +// return cxt.errCode; +// } static EScanType getScanType(SLogicPlanContext* pCxt, SNodeList* pScanPseudoCols, SNodeList* pScanCols, STableMeta* pMeta) { @@ -293,10 +294,10 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect // set output if (TSDB_CODE_SUCCESS == code) { - code = createColumnByRewriteExps(pCxt, pScan->pScanCols, &pScan->node.pTargets); + code = createColumnByRewriteExps(pScan->pScanCols, &pScan->node.pTargets); } if (TSDB_CODE_SUCCESS == code) { - code = createColumnByRewriteExps(pCxt, pScan->pScanPseudoCols, &pScan->node.pTargets); + code = createColumnByRewriteExps(pScan->pScanPseudoCols, &pScan->node.pTargets); } if (TSDB_CODE_SUCCESS == code) { @@ -400,6 +401,7 @@ static int32_t createLogicNodeByTable(SLogicPlanContext* pCxt, SSelectStmt* pSel nodesDestroyNode(pNode); return TSDB_CODE_OUT_OF_MEMORY; } + pNode->precision = pSelect->precision; *pLogicNode = pNode; } return code; @@ -461,10 +463,10 @@ static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, // set the output if (TSDB_CODE_SUCCESS == code && NULL != pAgg->pGroupKeys) { - code = createColumnByRewriteExps(pCxt, pAgg->pGroupKeys, &pAgg->node.pTargets); + code = createColumnByRewriteExps(pAgg->pGroupKeys, &pAgg->node.pTargets); } if (TSDB_CODE_SUCCESS == code && NULL != pAgg->pAggFuncs) { - code = createColumnByRewriteExps(pCxt, pAgg->pAggFuncs, &pAgg->node.pTargets); + code = createColumnByRewriteExps(pAgg->pAggFuncs, &pAgg->node.pTargets); } if (TSDB_CODE_SUCCESS == code) { @@ -485,12 +487,16 @@ static int32_t createWindowLogicNodeFinalize(SLogicPlanContext* pCxt, SSelectStm pWindow->watermark = pCxt->pPlanCxt->watermark; } + if (pCxt->pPlanCxt->rSmaQuery) { + pWindow->filesFactor = pCxt->pPlanCxt->filesFactor; + } + if (TSDB_CODE_SUCCESS == code) { code = rewriteExprForSelect(pWindow->pFuncs, pSelect, SQL_CLAUSE_WINDOW); } if (TSDB_CODE_SUCCESS == code) { - code = createColumnByRewriteExps(pCxt, pWindow->pFuncs, &pWindow->node.pTargets); + code = createColumnByRewriteExps(pWindow->pFuncs, &pWindow->node.pTargets); } pSelect->hasAggFuncs = false; @@ -556,6 +562,7 @@ static int32_t createWindowLogicNodeByInterval(SLogicPlanContext* pCxt, SInterva pWindow->sliding = (NULL != pInterval->pSliding ? ((SValueNode*)pInterval->pSliding)->datum.i : pWindow->interval); pWindow->slidingUnit = (NULL != pInterval->pSliding ? ((SValueNode*)pInterval->pSliding)->unit : pWindow->intervalUnit); + pWindow->stmInterAlgo = STREAM_INTERVAL_ALGO_SINGLE; pWindow->pTspk = nodesCloneNode(pInterval->pCol); if (NULL == pWindow->pTspk) { @@ -760,7 +767,7 @@ static int32_t createDistinctLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSe // set the output if (TSDB_CODE_SUCCESS == code) { - code = createColumnByRewriteExps(pCxt, pAgg->pGroupKeys, &pAgg->node.pTargets); + code = createColumnByRewriteExps(pAgg->pGroupKeys, &pAgg->node.pTargets); } if (TSDB_CODE_SUCCESS == code) { @@ -907,7 +914,7 @@ static int32_t createSetOpAggLogicNode(SLogicPlanContext* pCxt, SSetOperator* pS // set the output if (TSDB_CODE_SUCCESS == code) { - code = createColumnByRewriteExps(pCxt, pAgg->pGroupKeys, &pAgg->node.pTargets); + code = createColumnByRewriteExps(pAgg->pGroupKeys, &pAgg->node.pTargets); } if (TSDB_CODE_SUCCESS == code) { diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 4d489f68e7c4ff042e6f0d0c82bbd98a6dbbfb2b..5f88fc40e54c5e000a6e4506b30a2063acfbc8f1 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -99,7 +99,8 @@ static bool osdMayBeOptimized(SLogicNode* pNode) { return false; } // todo: release after function splitting - if (TSDB_SUPER_TABLE == ((SScanLogicNode*)pNode)->pMeta->tableType) { + if (TSDB_SUPER_TABLE == ((SScanLogicNode*)pNode)->pMeta->tableType && + SCAN_TYPE_STREAM != ((SScanLogicNode*)pNode)->scanType) { return false; } if (NULL == pNode->pParent || (QUERY_NODE_LOGIC_PLAN_WINDOW != nodeType(pNode->pParent) && @@ -223,6 +224,10 @@ static void setScanWindowInfo(SScanLogicNode* pScan) { pScan->sliding = ((SWindowLogicNode*)pScan->node.pParent)->sliding; pScan->intervalUnit = ((SWindowLogicNode*)pScan->node.pParent)->intervalUnit; pScan->slidingUnit = ((SWindowLogicNode*)pScan->node.pParent)->slidingUnit; + pScan->triggerType = ((SWindowLogicNode*)pScan->node.pParent)->triggerType; + pScan->watermark = ((SWindowLogicNode*)pScan->node.pParent)->watermark; + pScan->tsColId = ((SColumnNode*)((SWindowLogicNode*)pScan->node.pParent)->pTspk)->colId; + pScan->filesFactor = ((SWindowLogicNode*)pScan->node.pParent)->filesFactor; } } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 0f88a54e913c57c1fdc848317d7b8a85a4ac0e88..737c0fc1d557b939162e63ec3c5d4e07ea0ebb57 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -468,6 +468,7 @@ static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubpla return TSDB_CODE_OUT_OF_MEMORY; } vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); + SQueryNodeLoad node = {.addr = pSubplan->execNode, .load = 0}; taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode); return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTagScan, pPhyNode); } @@ -489,7 +490,8 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp pSubplan->execNodeStat.tableNum = pScanLogicNode->pVgroupList->vgroups[0].numOfTable; } if (pCxt->pExecNodeList) { - taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode); + SQueryNodeLoad node = {.addr = pSubplan->execNode, .load = 0}; + taosArrayPush(pCxt->pExecNodeList, &node); } tNameGetFullDbName(&pScanLogicNode->tableName, pSubplan->dbFName); pTableScan->dataRequired = pScanLogicNode->dataRequired; @@ -503,6 +505,10 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp pTableScan->sliding = pScanLogicNode->sliding; pTableScan->intervalUnit = pScanLogicNode->intervalUnit; pTableScan->slidingUnit = pScanLogicNode->slidingUnit; + pTableScan->triggerType = pScanLogicNode->triggerType; + pTableScan->watermark = pScanLogicNode->watermark; + pTableScan->tsColId = pScanLogicNode->tsColId; + pTableScan->filesFactor = pScanLogicNode->filesFactor; return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode); } @@ -520,10 +526,11 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pScan->accountId = pCxt->pPlanCxt->acctId; if (0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_USER_TABLES)) { vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); + SQueryNodeLoad node = {.addr = pSubplan->execNode, .load = 0}; taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode); } else { - SQueryNodeAddr addr = {.nodeId = MNODE_HANDLE, .epSet = pCxt->pPlanCxt->mgmtEpSet}; - taosArrayPush(pCxt->pExecNodeList, &addr); + SQueryNodeLoad node = {.addr = {.nodeId = MNODE_HANDLE, .epSet = pCxt->pPlanCxt->mgmtEpSet}, .load = 0}; + taosArrayPush(pCxt->pExecNodeList, &node); } pScan->mgmtEpSet = pCxt->pPlanCxt->mgmtEpSet; tNameGetFullDbName(&pScanLogicNode->tableName, pSubplan->dbFName); @@ -832,7 +839,7 @@ static int32_t createProjectPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChild static int32_t doCreateExchangePhysiNode(SPhysiPlanContext* pCxt, SExchangeLogicNode* pExchangeLogicNode, SPhysiNode** pPhyNode) { SExchangePhysiNode* pExchange = (SExchangePhysiNode*)makePhysiNode( - pCxt, pExchangeLogicNode->precision, (SLogicNode*)pExchangeLogicNode, QUERY_NODE_PHYSICAL_PLAN_EXCHANGE); + pCxt, pExchangeLogicNode->node.precision, (SLogicNode*)pExchangeLogicNode, QUERY_NODE_PHYSICAL_PLAN_EXCHANGE); if (NULL == pExchange) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -842,10 +849,11 @@ static int32_t doCreateExchangePhysiNode(SPhysiPlanContext* pCxt, SExchangeLogic return TSDB_CODE_SUCCESS; } + static int32_t createStreamScanPhysiNodeByExchange(SPhysiPlanContext* pCxt, SExchangeLogicNode* pExchangeLogicNode, SPhysiNode** pPhyNode) { SScanPhysiNode* pScan = (SScanPhysiNode*)makePhysiNode( - pCxt, pExchangeLogicNode->precision, (SLogicNode*)pExchangeLogicNode, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN); + pCxt, pExchangeLogicNode->node.precision, (SLogicNode*)pExchangeLogicNode, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN); if (NULL == pScan) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -914,6 +922,7 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList* pWindow->triggerType = pWindowLogicNode->triggerType; pWindow->watermark = pWindowLogicNode->watermark; + pWindow->filesFactor = pWindowLogicNode->filesFactor; if (TSDB_CODE_SUCCESS == code) { *pPhyNode = (SPhysiNode*)pWindow; @@ -924,11 +933,22 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList* return code; } +static ENodeType getIntervalOperatorType(bool streamQuery, EStreamIntervalAlgorithm stmAlgo) { + if (streamQuery) { + return STREAM_INTERVAL_ALGO_FINAL == stmAlgo + ? QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL + : (STREAM_INTERVAL_ALGO_SEMI == stmAlgo ? QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL + : QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL); + } else { + return QUERY_NODE_PHYSICAL_PLAN_INTERVAL; + } +} + static int32_t createIntervalPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) { SIntervalPhysiNode* pInterval = (SIntervalPhysiNode*)makePhysiNode( pCxt, getPrecision(pChildren), (SLogicNode*)pWindowLogicNode, - (pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL : QUERY_NODE_PHYSICAL_PLAN_INTERVAL)); + getIntervalOperatorType(pCxt->pPlanCxt->streamQuery, pWindowLogicNode->stmInterAlgo)); if (NULL == pInterval) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -946,7 +966,8 @@ static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) { SSessionWinodwPhysiNode* pSession = (SSessionWinodwPhysiNode*)makePhysiNode( pCxt, getPrecision(pChildren), (SLogicNode*)pWindowLogicNode, - (pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW : QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW)); + (pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION_WINDOW + : QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW)); if (NULL == pSession) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -1129,6 +1150,54 @@ static int32_t createFillPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren return code; } +static int32_t createExchangePhysiNodeByMerge(SMergePhysiNode* pMerge) { + SExchangePhysiNode* pExchange = nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN_EXCHANGE); + if (NULL == pExchange) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pExchange->srcGroupId = pMerge->srcGroupId; + pExchange->node.pParent = (SPhysiNode*)pMerge; + pExchange->node.pOutputDataBlockDesc = nodesCloneNode(pMerge->node.pOutputDataBlockDesc); + if (NULL == pExchange->node.pOutputDataBlockDesc) { + nodesDestroyNode(pExchange); + return TSDB_CODE_OUT_OF_MEMORY; + } + return nodesListMakeStrictAppend(&pMerge->node.pChildren, pExchange); +} + +static int32_t createMergePhysiNode(SPhysiPlanContext* pCxt, SMergeLogicNode* pMergeLogicNode, SPhysiNode** pPhyNode) { + SMergePhysiNode* pMerge = (SMergePhysiNode*)makePhysiNode( + pCxt, pMergeLogicNode->node.precision, (SLogicNode*)pMergeLogicNode, QUERY_NODE_PHYSICAL_PLAN_MERGE); + if (NULL == pMerge) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + pMerge->numOfChannels = pMergeLogicNode->numOfChannels; + pMerge->srcGroupId = pMergeLogicNode->srcGroupId; + + int32_t code = TSDB_CODE_SUCCESS; + + for (int32_t i = 0; i < pMerge->numOfChannels; ++i) { + code = createExchangePhysiNodeByMerge(pMerge); + if (TSDB_CODE_SUCCESS != code) { + break; + } + } + + if (TSDB_CODE_SUCCESS == code) { + code = setListSlotId(pCxt, pMerge->node.pOutputDataBlockDesc->dataBlockId, -1, pMergeLogicNode->pMergeKeys, + &pMerge->pMergeKeys); + } + + if (TSDB_CODE_SUCCESS == code) { + *pPhyNode = (SPhysiNode*)pMerge; + } else { + nodesDestroyNode(pMerge); + } + + return code; +} + static int32_t doCreatePhysiNode(SPhysiPlanContext* pCxt, SLogicNode* pLogicNode, SSubplan* pSubplan, SNodeList* pChildren, SPhysiNode** pPhyNode) { switch (nodeType(pLogicNode)) { @@ -1150,6 +1219,8 @@ static int32_t doCreatePhysiNode(SPhysiPlanContext* pCxt, SLogicNode* pLogicNode return createPartitionPhysiNode(pCxt, pChildren, (SPartitionLogicNode*)pLogicNode, pPhyNode); case QUERY_NODE_LOGIC_PLAN_FILL: return createFillPhysiNode(pCxt, pChildren, (SFillLogicNode*)pLogicNode, pPhyNode); + case QUERY_NODE_LOGIC_PLAN_MERGE: + return createMergePhysiNode(pCxt, (SMergeLogicNode*)pLogicNode, pPhyNode); default: break; } @@ -1180,9 +1251,13 @@ static int32_t createPhysiNode(SPhysiPlanContext* pCxt, SLogicNode* pLogicNode, } if (TSDB_CODE_SUCCESS == code) { - (*pPhyNode)->pChildren = pChildren; - SNode* pChild; - FOREACH(pChild, (*pPhyNode)->pChildren) { ((SPhysiNode*)pChild)->pParent = (*pPhyNode); } + if (LIST_LENGTH(pChildren) > 0) { + (*pPhyNode)->pChildren = pChildren; + SNode* pChild; + FOREACH(pChild, (*pPhyNode)->pChildren) { ((SPhysiNode*)pChild)->pParent = (*pPhyNode); } + } else { + nodesDestroyList(pChildren); + } } else { nodesDestroyList(pChildren); } @@ -1243,7 +1318,8 @@ static int32_t createPhysiSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogic SVnodeModifLogicNode* pModif = (SVnodeModifLogicNode*)pLogicSubplan->pNode; pSubplan->msgType = pModif->msgType; pSubplan->execNode.epSet = pModif->pVgDataBlocks->vg.epSet; - taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode); + SQueryNodeLoad node = {.addr = pSubplan->execNode, .load = 0}; + taosArrayPush(pCxt->pExecNodeList, &node); code = createDataInserter(pCxt, pModif->pVgDataBlocks, &pSubplan->pDataSink); } else { pSubplan->msgType = TDMT_VND_QUERY; diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index ea149f8363955233fc45eb60a7d71378c8198d17..e3c8b82e3988bd7943815a645332920f9d31d08b 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -13,19 +13,21 @@ * along with this program. If not, see . */ +#include "functionMgt.h" #include "planInt.h" #define SPLIT_FLAG_MASK(n) (1 << n) -#define SPLIT_FLAG_STS SPLIT_FLAG_MASK(0) +#define SPLIT_FLAG_STABLE_SPLIT SPLIT_FLAG_MASK(0) #define SPLIT_FLAG_SET_MASK(val, mask) (val) |= (mask) #define SPLIT_FLAG_TEST_MASK(val, mask) (((val) & (mask)) != 0) typedef struct SSplitContext { - uint64_t queryId; - int32_t groupId; - bool split; + SPlanContext* pPlanCxt; + uint64_t queryId; + int32_t groupId; + bool split; } SSplitContext; typedef int32_t (*FSplit)(SSplitContext* pCxt, SLogicSubplan* pSubplan); @@ -35,30 +37,19 @@ typedef struct SSplitRule { FSplit splitFunc; } SSplitRule; -typedef struct SStsInfo { - SScanLogicNode* pScan; - SLogicSubplan* pSubplan; -} SStsInfo; - -typedef struct SCtjInfo { - SJoinLogicNode* pJoin; - SLogicNode* pSplitNode; - SLogicSubplan* pSubplan; -} SCtjInfo; +typedef bool (*FSplFindSplitNode)(SSplitContext* pCxt, SLogicSubplan* pSubplan, void* pInfo); -typedef struct SUaInfo { - SProjectLogicNode* pProject; - SLogicSubplan* pSubplan; -} SUaInfo; - -typedef struct SUnInfo { - SAggLogicNode* pAgg; - SLogicSubplan* pSubplan; -} SUnInfo; - -typedef bool (*FSplFindSplitNode)(SLogicSubplan* pSubplan, void* pInfo); +static void splSetSubplanVgroups(SLogicSubplan* pSubplan, SLogicNode* pNode) { + if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode)) { + TSWAP(pSubplan->pVgroupList, ((SScanLogicNode*)pNode)->pVgroupList); + } else { + if (1 == LIST_LENGTH(pNode->pChildren)) { + splSetSubplanVgroups(pSubplan, (SLogicNode*)nodesListGetNode(pNode->pChildren, 0)); + } + } +} -static SLogicSubplan* splCreateSubplan(SSplitContext* pCxt, SLogicNode* pNode, int32_t flag) { +static SLogicSubplan* splCreateScanSubplan(SSplitContext* pCxt, SLogicNode* pNode, int32_t flag) { SLogicSubplan* pSubplan = nodesMakeNode(QUERY_NODE_LOGIC_SUBPLAN); if (NULL == pSubplan) { return NULL; @@ -66,27 +57,36 @@ static SLogicSubplan* splCreateSubplan(SSplitContext* pCxt, SLogicNode* pNode, i pSubplan->id.queryId = pCxt->queryId; pSubplan->id.groupId = pCxt->groupId; pSubplan->subplanType = SUBPLAN_TYPE_SCAN; - pSubplan->pNode = (SLogicNode*)nodesCloneNode(pNode); - if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode)) { - TSWAP(pSubplan->pVgroupList, ((SScanLogicNode*)pSubplan->pNode)->pVgroupList); - } + pSubplan->pNode = pNode; + pSubplan->pNode->pParent = NULL; + splSetSubplanVgroups(pSubplan, pNode); SPLIT_FLAG_SET_MASK(pSubplan->splitFlag, flag); return pSubplan; } -static int32_t splCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SLogicNode* pSplitNode, - ESubplanType subplanType) { +static int32_t splCreateExchangeNode(SSplitContext* pCxt, SLogicNode* pChild, SExchangeLogicNode** pOutput) { SExchangeLogicNode* pExchange = nodesMakeNode(QUERY_NODE_LOGIC_PLAN_EXCHANGE); if (NULL == pExchange) { return TSDB_CODE_OUT_OF_MEMORY; } pExchange->srcGroupId = pCxt->groupId; - pExchange->precision = pSplitNode->precision; - pExchange->node.pTargets = nodesCloneList(pSplitNode->pTargets); + pExchange->node.precision = pChild->precision; + pExchange->node.pTargets = nodesCloneList(pChild->pTargets); if (NULL == pExchange->node.pTargets) { return TSDB_CODE_OUT_OF_MEMORY; } + *pOutput = pExchange; + return TSDB_CODE_SUCCESS; +} + +static int32_t splCreateExchangeNodeForSubplan(SSplitContext* pCxt, SLogicSubplan* pSubplan, SLogicNode* pSplitNode, + ESubplanType subplanType) { + SExchangeLogicNode* pExchange = NULL; + if (TSDB_CODE_SUCCESS != splCreateExchangeNode(pCxt, pSplitNode, &pExchange)) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pSubplan->subplanType = subplanType; if (NULL == pSplitNode->pParent) { @@ -98,7 +98,7 @@ static int32_t splCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubpla FOREACH(pNode, pSplitNode->pParent->pChildren) { if (nodesEqualNode(pNode, pSplitNode)) { REPLACE_NODE(pExchange); - nodesDestroyNode(pNode); + pExchange->node.pParent = pSplitNode->pParent; return TSDB_CODE_SUCCESS; } } @@ -108,7 +108,7 @@ static int32_t splCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubpla static bool splMatch(SSplitContext* pCxt, SLogicSubplan* pSubplan, int32_t flag, FSplFindSplitNode func, void* pInfo) { if (!SPLIT_FLAG_TEST_MASK(pSubplan->splitFlag, flag)) { - if (func(pSubplan, pInfo)) { + if (func(pCxt, pSubplan, pInfo)) { return true; } } @@ -121,14 +121,62 @@ static bool splMatch(SSplitContext* pCxt, SLogicSubplan* pSubplan, int32_t flag, return false; } -static SLogicNode* stsMatchByNode(SLogicNode* pNode) { - if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode) && NULL != ((SScanLogicNode*)pNode)->pVgroupList && - ((SScanLogicNode*)pNode)->pVgroupList->numOfVgroups > 1) { +typedef struct SStableSplitInfo { + SLogicNode* pSplitNode; + SLogicSubplan* pSubplan; +} SStableSplitInfo; + +static bool stbSplHasGatherExecFunc(const SNodeList* pFuncs) { + SNode* pFunc = NULL; + FOREACH(pFunc, pFuncs) { + if (!fmIsDistExecFunc(((SFunctionNode*)pFunc)->funcId)) { + return true; + } + } + return false; +} + +static bool stbSplIsMultiTbScan(bool streamQuery, SScanLogicNode* pScan) { + return (NULL != pScan->pVgroupList && pScan->pVgroupList->numOfVgroups > 1) || + (streamQuery && TSDB_SUPER_TABLE == pScan->pMeta->tableType); +} + +static bool stbSplHasMultiTbScan(bool streamQuery, SLogicNode* pNode) { + if (1 != LIST_LENGTH(pNode->pChildren)) { + return false; + } + SNode* pChild = nodesListGetNode(pNode->pChildren, 0); + return (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pChild) && stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pChild)); +} + +static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) { + switch (nodeType(pNode)) { + // case QUERY_NODE_LOGIC_PLAN_AGG: + // return !stbSplHasGatherExecFunc(((SAggLogicNode*)pNode)->pAggFuncs) && stbSplHasMultiTbScan(pNode); + case QUERY_NODE_LOGIC_PLAN_WINDOW: { + SWindowLogicNode* pWindow = (SWindowLogicNode*)pNode; + if (WINDOW_TYPE_INTERVAL != pWindow->winType) { + return false; + } + return !stbSplHasGatherExecFunc(pWindow->pFuncs) && stbSplHasMultiTbScan(streamQuery, pNode); + } + // case QUERY_NODE_LOGIC_PLAN_SORT: + // return stbSplHasMultiTbScan(pNode); + case QUERY_NODE_LOGIC_PLAN_SCAN: + return stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pNode); + default: + break; + } + return false; +} + +static SLogicNode* stbSplMatchByNode(bool streamQuery, SLogicNode* pNode) { + if (stbSplNeedSplit(streamQuery, pNode)) { return pNode; } SNode* pChild; FOREACH(pChild, pNode->pChildren) { - SLogicNode* pSplitNode = stsMatchByNode((SLogicNode*)pChild); + SLogicNode* pSplitNode = stbSplMatchByNode(streamQuery, (SLogicNode*)pChild); if (NULL != pSplitNode) { return pSplitNode; } @@ -136,31 +184,230 @@ static SLogicNode* stsMatchByNode(SLogicNode* pNode) { return NULL; } -static bool stsFindSplitNode(SLogicSubplan* pSubplan, SStsInfo* pInfo) { - SLogicNode* pSplitNode = stsMatchByNode(pSubplan->pNode); +static bool stbSplFindSplitNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SStableSplitInfo* pInfo) { + SLogicNode* pSplitNode = stbSplMatchByNode(pCxt->pPlanCxt->streamQuery, pSubplan->pNode); if (NULL != pSplitNode) { - pInfo->pScan = (SScanLogicNode*)pSplitNode; + pInfo->pSplitNode = pSplitNode; pInfo->pSubplan = pSubplan; } return NULL != pSplitNode; } -static int32_t stsSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { - SStsInfo info = {0}; - if (!splMatch(pCxt, pSubplan, SPLIT_FLAG_STS, (FSplFindSplitNode)stsFindSplitNode, &info)) { - return TSDB_CODE_SUCCESS; +static int32_t stbSplRewriteFuns(const SNodeList* pFuncs, SNodeList** pPartialFuncs, SNodeList** pMergeFuncs) { + SNode* pNode = NULL; + FOREACH(pNode, pFuncs) { + SFunctionNode* pFunc = (SFunctionNode*)pNode; + SFunctionNode* pPartFunc = NULL; + SFunctionNode* pMergeFunc = NULL; + int32_t code = TSDB_CODE_SUCCESS; + if (fmIsWindowPseudoColumnFunc(pFunc->funcId)) { + pPartFunc = nodesCloneNode(pFunc); + pMergeFunc = nodesCloneNode(pFunc); + if (NULL == pPartFunc || NULL == pMergeFunc) { + nodesDestroyNode(pPartFunc); + nodesDestroyNode(pMergeFunc); + code = TSDB_CODE_OUT_OF_MEMORY; + } + } else { + code = fmGetDistMethod(pFunc, &pPartFunc, &pMergeFunc); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesListMakeStrictAppend(pPartialFuncs, pPartFunc); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesListMakeStrictAppend(pMergeFuncs, pMergeFunc); + } + if (TSDB_CODE_SUCCESS != code) { + nodesDestroyList(*pPartialFuncs); + nodesDestroyList(*pMergeFuncs); + return code; + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t stbSplAppendWStart(SNodeList* pFuncs, int32_t* pIndex) { + int32_t index = 0; + SNode* pFunc = NULL; + FOREACH(pFunc, pFuncs) { + if (FUNCTION_TYPE_WSTARTTS == ((SFunctionNode*)pFunc)->funcType) { + *pIndex = index; + return TSDB_CODE_SUCCESS; + } + ++index; + } + + SFunctionNode* pWStart = nodesMakeNode(QUERY_NODE_FUNCTION); + if (NULL == pWStart) { + return TSDB_CODE_OUT_OF_MEMORY; + } + strcpy(pWStart->functionName, "_wstartts"); + snprintf(pWStart->node.aliasName, sizeof(pWStart->node.aliasName), "%s.%p", pWStart->functionName, pWStart); + int32_t code = fmGetFuncInfo(pWStart, NULL, 0); + if (TSDB_CODE_SUCCESS == code) { + code = nodesListStrictAppend(pFuncs, pWStart); + } + *pIndex = index; + return code; +} + +static int32_t stbSplCreatePartWindowNode(SWindowLogicNode* pMergeWindow, SLogicNode** pPartWindow) { + SNodeList* pFunc = pMergeWindow->pFuncs; + pMergeWindow->pFuncs = NULL; + SNodeList* pTargets = pMergeWindow->node.pTargets; + pMergeWindow->node.pTargets = NULL; + SNodeList* pChildren = pMergeWindow->node.pChildren; + pMergeWindow->node.pChildren = NULL; + + int32_t code = TSDB_CODE_SUCCESS; + SWindowLogicNode* pPartWin = nodesCloneNode(pMergeWindow); + if (NULL == pPartWin) { + code = TSDB_CODE_OUT_OF_MEMORY; + } + + if (TSDB_CODE_SUCCESS == code) { + pMergeWindow->node.pTargets = pTargets; + pPartWin->node.pChildren = pChildren; + code = stbSplRewriteFuns(pFunc, &pPartWin->pFuncs, &pMergeWindow->pFuncs); } - int32_t code = nodesListMakeStrictAppend(&info.pSubplan->pChildren, - splCreateSubplan(pCxt, (SLogicNode*)info.pScan, SPLIT_FLAG_STS)); + int32_t index = 0; if (TSDB_CODE_SUCCESS == code) { - code = splCreateExchangeNode(pCxt, info.pSubplan, (SLogicNode*)info.pScan, SUBPLAN_TYPE_MERGE); + code = stbSplAppendWStart(pPartWin->pFuncs, &index); } + if (TSDB_CODE_SUCCESS == code) { + code = createColumnByRewriteExps(pPartWin->pFuncs, &pPartWin->node.pTargets); + } + if (TSDB_CODE_SUCCESS == code) { + nodesDestroyNode(pMergeWindow->pTspk); + pMergeWindow->pTspk = nodesCloneNode(nodesListGetNode(pPartWin->node.pTargets, index)); + if (NULL == pMergeWindow->pTspk) { + code = TSDB_CODE_OUT_OF_MEMORY; + } + } + + nodesDestroyList(pFunc); + if (TSDB_CODE_SUCCESS == code) { + *pPartWindow = (SLogicNode*)pPartWin; + } else { + nodesDestroyNode(pPartWin); + } + + return code; +} + +static int32_t stbSplCreateMergeNode(SSplitContext* pCxt, SLogicNode* pParent, SLogicNode* pPartChild) { + SMergeLogicNode* pMerge = nodesMakeNode(QUERY_NODE_LOGIC_PLAN_MERGE); + if (NULL == pMerge) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pMerge->numOfChannels = ((SScanLogicNode*)nodesListGetNode(pPartChild->pChildren, 0))->pVgroupList->numOfVgroups; + pMerge->srcGroupId = pCxt->groupId; + pMerge->node.pParent = pParent; + pMerge->node.precision = pPartChild->precision; + int32_t code = nodesListMakeStrictAppend(&pMerge->pMergeKeys, nodesCloneNode(((SWindowLogicNode*)pParent)->pTspk)); + if (TSDB_CODE_SUCCESS == code) { + pMerge->node.pTargets = nodesCloneList(pPartChild->pTargets); + if (NULL == pMerge->node.pTargets) { + code = TSDB_CODE_OUT_OF_MEMORY; + } + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesListMakeAppend(&pParent->pChildren, pMerge); + } + + return code; +} + +static int32_t stbSplSplitWindowNodeForBatch(SSplitContext* pCxt, SStableSplitInfo* pInfo) { + SLogicNode* pPartWindow = NULL; + int32_t code = stbSplCreatePartWindowNode((SWindowLogicNode*)pInfo->pSplitNode, &pPartWindow); + if (TSDB_CODE_SUCCESS == code) { + code = stbSplCreateMergeNode(pCxt, pInfo->pSplitNode, pPartWindow); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren, + splCreateScanSubplan(pCxt, pPartWindow, SPLIT_FLAG_STABLE_SPLIT)); + } + pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE; + return code; +} + +static int32_t stbSplCreateExchangeNode(SSplitContext* pCxt, SLogicNode* pParent, SLogicNode* pPartChild) { + SExchangeLogicNode* pExchange = NULL; + int32_t code = splCreateExchangeNode(pCxt, pPartChild, &pExchange); + if (TSDB_CODE_SUCCESS == code) { + code = nodesListMakeAppend(&pParent->pChildren, pExchange); + } + return code; +} + +static int32_t stbSplSplitWindowNodeForStream(SSplitContext* pCxt, SStableSplitInfo* pInfo) { + SLogicNode* pPartWindow = NULL; + int32_t code = stbSplCreatePartWindowNode((SWindowLogicNode*)pInfo->pSplitNode, &pPartWindow); + if (TSDB_CODE_SUCCESS == code) { + ((SWindowLogicNode*)pPartWindow)->stmInterAlgo = STREAM_INTERVAL_ALGO_SEMI; + ((SWindowLogicNode*)pInfo->pSplitNode)->stmInterAlgo = STREAM_INTERVAL_ALGO_FINAL; + code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartWindow); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren, + splCreateScanSubplan(pCxt, pPartWindow, SPLIT_FLAG_STABLE_SPLIT)); + } + pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE; + return code; +} + +static int32_t stbSplSplitWindowNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) { + if (pCxt->pPlanCxt->streamQuery) { + return stbSplSplitWindowNodeForStream(pCxt, pInfo); + } else { + return stbSplSplitWindowNodeForBatch(pCxt, pInfo); + } +} + +static int32_t stbSplSplitScanNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) { + int32_t code = splCreateExchangeNodeForSubplan(pCxt, pInfo->pSubplan, pInfo->pSplitNode, SUBPLAN_TYPE_MERGE); + if (TSDB_CODE_SUCCESS == code) { + code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren, + splCreateScanSubplan(pCxt, pInfo->pSplitNode, SPLIT_FLAG_STABLE_SPLIT)); + } + return code; +} + +static int32_t stableSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { + if (pCxt->pPlanCxt->rSmaQuery) { + return TSDB_CODE_SUCCESS; + } + + SStableSplitInfo info = {0}; + if (!splMatch(pCxt, pSubplan, SPLIT_FLAG_STABLE_SPLIT, (FSplFindSplitNode)stbSplFindSplitNode, &info)) { + return TSDB_CODE_SUCCESS; + } + + int32_t code = TSDB_CODE_SUCCESS; + switch (nodeType(info.pSplitNode)) { + case QUERY_NODE_LOGIC_PLAN_WINDOW: + code = stbSplSplitWindowNode(pCxt, &info); + break; + case QUERY_NODE_LOGIC_PLAN_SCAN: + code = stbSplSplitScanNode(pCxt, &info); + break; + default: + break; + } + ++(pCxt->groupId); pCxt->split = true; return code; } -static bool needSplit(SJoinLogicNode* pJoin) { +typedef struct SSigTbJoinSplitInfo { + SJoinLogicNode* pJoin; + SLogicNode* pSplitNode; + SLogicSubplan* pSubplan; +} SSigTbJoinSplitInfo; + +static bool sigTbJoinSplNeedSplit(SJoinLogicNode* pJoin) { if (!pJoin->isSingleTableJoin) { return false; } @@ -168,13 +415,13 @@ static bool needSplit(SJoinLogicNode* pJoin) { QUERY_NODE_LOGIC_PLAN_EXCHANGE != nodeType(nodesListGetNode(pJoin->node.pChildren, 1)); } -static SJoinLogicNode* ctjMatchByNode(SLogicNode* pNode) { - if (QUERY_NODE_LOGIC_PLAN_JOIN == nodeType(pNode) && needSplit((SJoinLogicNode*)pNode)) { +static SJoinLogicNode* sigTbJoinSplMatchByNode(SLogicNode* pNode) { + if (QUERY_NODE_LOGIC_PLAN_JOIN == nodeType(pNode) && sigTbJoinSplNeedSplit((SJoinLogicNode*)pNode)) { return (SJoinLogicNode*)pNode; } SNode* pChild; FOREACH(pChild, pNode->pChildren) { - SJoinLogicNode* pSplitNode = ctjMatchByNode((SLogicNode*)pChild); + SJoinLogicNode* pSplitNode = sigTbJoinSplMatchByNode((SLogicNode*)pChild); if (NULL != pSplitNode) { return pSplitNode; } @@ -182,8 +429,8 @@ static SJoinLogicNode* ctjMatchByNode(SLogicNode* pNode) { return NULL; } -static bool ctjFindSplitNode(SLogicSubplan* pSubplan, SCtjInfo* pInfo) { - SJoinLogicNode* pJoin = ctjMatchByNode(pSubplan->pNode); +static bool sigTbJoinSplFindSplitNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SSigTbJoinSplitInfo* pInfo) { + SJoinLogicNode* pJoin = sigTbJoinSplMatchByNode(pSubplan->pNode); if (NULL != pJoin) { pInfo->pJoin = pJoin; pInfo->pSplitNode = nodesListGetNode(pJoin->node.pChildren, 1); @@ -192,14 +439,14 @@ static bool ctjFindSplitNode(SLogicSubplan* pSubplan, SCtjInfo* pInfo) { return NULL != pJoin; } -static int32_t ctjSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { - SCtjInfo info = {0}; - if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)ctjFindSplitNode, &info)) { +static int32_t singleTableJoinSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { + SSigTbJoinSplitInfo info = {0}; + if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)sigTbJoinSplFindSplitNode, &info)) { return TSDB_CODE_SUCCESS; } - int32_t code = nodesListMakeStrictAppend(&info.pSubplan->pChildren, splCreateSubplan(pCxt, info.pSplitNode, 0)); + int32_t code = splCreateExchangeNodeForSubplan(pCxt, info.pSubplan, info.pSplitNode, info.pSubplan->subplanType); if (TSDB_CODE_SUCCESS == code) { - code = splCreateExchangeNode(pCxt, info.pSubplan, info.pSplitNode, info.pSubplan->subplanType); + code = nodesListMakeStrictAppend(&info.pSubplan->pChildren, splCreateScanSubplan(pCxt, info.pSplitNode, 0)); } ++(pCxt->groupId); pCxt->split = true; @@ -277,13 +524,18 @@ static int32_t unionSplitSubplan(SSplitContext* pCxt, SLogicSubplan* pUnionSubpl return code; } -static SLogicNode* uaMatchByNode(SLogicNode* pNode) { +typedef struct SUnionAllSplitInfo { + SProjectLogicNode* pProject; + SLogicSubplan* pSubplan; +} SUnionAllSplitInfo; + +static SLogicNode* unAllSplMatchByNode(SLogicNode* pNode) { if (QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pNode) && LIST_LENGTH(pNode->pChildren) > 1) { return pNode; } SNode* pChild; FOREACH(pChild, pNode->pChildren) { - SLogicNode* pSplitNode = uaMatchByNode((SLogicNode*)pChild); + SLogicNode* pSplitNode = unAllSplMatchByNode((SLogicNode*)pChild); if (NULL != pSplitNode) { return pSplitNode; } @@ -291,8 +543,8 @@ static SLogicNode* uaMatchByNode(SLogicNode* pNode) { return NULL; } -static bool uaFindSplitNode(SLogicSubplan* pSubplan, SUaInfo* pInfo) { - SLogicNode* pSplitNode = uaMatchByNode(pSubplan->pNode); +static bool unAllSplFindSplitNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SUnionAllSplitInfo* pInfo) { + SLogicNode* pSplitNode = unAllSplMatchByNode(pSubplan->pNode); if (NULL != pSplitNode) { pInfo->pProject = (SProjectLogicNode*)pSplitNode; pInfo->pSubplan = pSubplan; @@ -300,13 +552,13 @@ static bool uaFindSplitNode(SLogicSubplan* pSubplan, SUaInfo* pInfo) { return NULL != pSplitNode; } -static int32_t uaCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SProjectLogicNode* pProject) { +static int32_t unAllSplCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SProjectLogicNode* pProject) { SExchangeLogicNode* pExchange = nodesMakeNode(QUERY_NODE_LOGIC_PLAN_EXCHANGE); if (NULL == pExchange) { return TSDB_CODE_OUT_OF_MEMORY; } pExchange->srcGroupId = pCxt->groupId; - // pExchange->precision = pScan->pMeta->tableInfo.precision; + pExchange->node.precision = pProject->node.precision; pExchange->node.pTargets = nodesCloneList(pProject->node.pTargets); if (NULL == pExchange->node.pTargets) { return TSDB_CODE_OUT_OF_MEMORY; @@ -332,28 +584,33 @@ static int32_t uaCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan return TSDB_CODE_FAILED; } -static int32_t uaSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { - SUaInfo info = {0}; - if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)uaFindSplitNode, &info)) { +static int32_t unionAllSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { + SUnionAllSplitInfo info = {0}; + if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)unAllSplFindSplitNode, &info)) { return TSDB_CODE_SUCCESS; } int32_t code = unionSplitSubplan(pCxt, info.pSubplan, (SLogicNode*)info.pProject); if (TSDB_CODE_SUCCESS == code) { - code = uaCreateExchangeNode(pCxt, info.pSubplan, info.pProject); + code = unAllSplCreateExchangeNode(pCxt, info.pSubplan, info.pProject); } ++(pCxt->groupId); pCxt->split = true; return code; } -static SLogicNode* unMatchByNode(SLogicNode* pNode) { +typedef struct SUnionDistinctSplitInfo { + SAggLogicNode* pAgg; + SLogicSubplan* pSubplan; +} SUnionDistinctSplitInfo; + +static SLogicNode* unDistSplMatchByNode(SLogicNode* pNode) { if (QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pNode) && LIST_LENGTH(pNode->pChildren) > 1) { return pNode; } SNode* pChild; FOREACH(pChild, pNode->pChildren) { - SLogicNode* pSplitNode = unMatchByNode((SLogicNode*)pChild); + SLogicNode* pSplitNode = unDistSplMatchByNode((SLogicNode*)pChild); if (NULL != pSplitNode) { return pSplitNode; } @@ -361,13 +618,13 @@ static SLogicNode* unMatchByNode(SLogicNode* pNode) { return NULL; } -static int32_t unCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SAggLogicNode* pAgg) { +static int32_t unDistSplCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SAggLogicNode* pAgg) { SExchangeLogicNode* pExchange = nodesMakeNode(QUERY_NODE_LOGIC_PLAN_EXCHANGE); if (NULL == pExchange) { return TSDB_CODE_OUT_OF_MEMORY; } pExchange->srcGroupId = pCxt->groupId; - // pExchange->precision = pScan->pMeta->tableInfo.precision; + pExchange->node.precision = pAgg->node.precision; pExchange->node.pTargets = nodesCloneList(pAgg->pGroupKeys); if (NULL == pExchange->node.pTargets) { return TSDB_CODE_OUT_OF_MEMORY; @@ -378,8 +635,8 @@ static int32_t unCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan return nodesListMakeAppend(&pAgg->node.pChildren, pExchange); } -static bool unFindSplitNode(SLogicSubplan* pSubplan, SUnInfo* pInfo) { - SLogicNode* pSplitNode = unMatchByNode(pSubplan->pNode); +static bool unDistSplFindSplitNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SUnionDistinctSplitInfo* pInfo) { + SLogicNode* pSplitNode = unDistSplMatchByNode(pSubplan->pNode); if (NULL != pSplitNode) { pInfo->pAgg = (SAggLogicNode*)pSplitNode; pInfo->pSubplan = pSubplan; @@ -387,25 +644,29 @@ static bool unFindSplitNode(SLogicSubplan* pSubplan, SUnInfo* pInfo) { return NULL != pSplitNode; } -static int32_t unSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { - SUnInfo info = {0}; - if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)unFindSplitNode, &info)) { +static int32_t unionDistinctSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { + SUnionDistinctSplitInfo info = {0}; + if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)unDistSplFindSplitNode, &info)) { return TSDB_CODE_SUCCESS; } int32_t code = unionSplitSubplan(pCxt, info.pSubplan, (SLogicNode*)info.pAgg); if (TSDB_CODE_SUCCESS == code) { - code = unCreateExchangeNode(pCxt, info.pSubplan, info.pAgg); + code = unDistSplCreateExchangeNode(pCxt, info.pSubplan, info.pAgg); } ++(pCxt->groupId); pCxt->split = true; return code; } -static const SSplitRule splitRuleSet[] = {{.pName = "SuperTableScan", .splitFunc = stsSplit}, - {.pName = "ChildTableJoin", .splitFunc = ctjSplit}, - {.pName = "UnionAll", .splitFunc = uaSplit}, - {.pName = "Union", .splitFunc = unSplit}}; +// clang-format off +static const SSplitRule splitRuleSet[] = { + {.pName = "SuperTableSplit", .splitFunc = stableSplit}, + {.pName = "SingleTableJoinSplit", .splitFunc = singleTableJoinSplit}, + {.pName = "UnionAllSplit", .splitFunc = unionAllSplit}, + {.pName = "UnionDistinctSplit", .splitFunc = unionDistinctSplit} +}; +// clang-format on static const int32_t splitRuleNum = (sizeof(splitRuleSet) / sizeof(SSplitRule)); @@ -416,9 +677,10 @@ static void dumpLogicSubplan(const char* pRuleName, SLogicSubplan* pSubplan) { taosMemoryFree(pStr); } -static int32_t applySplitRule(SLogicSubplan* pSubplan) { - SSplitContext cxt = {.queryId = pSubplan->id.queryId, .groupId = pSubplan->id.groupId + 1, .split = false}; - bool split = false; +static int32_t applySplitRule(SPlanContext* pCxt, SLogicSubplan* pSubplan) { + SSplitContext cxt = { + .pPlanCxt = pCxt, .queryId = pSubplan->id.queryId, .groupId = pSubplan->id.groupId + 1, .split = false}; + bool split = false; do { split = false; for (int32_t i = 0; i < splitRuleNum; ++i) { @@ -465,7 +727,7 @@ int32_t splitLogicPlan(SPlanContext* pCxt, SLogicNode* pLogicNode, SLogicSubplan pSubplan->id.groupId = 1; setLogicNodeParent(pSubplan->pNode); - int32_t code = applySplitRule(pSubplan); + int32_t code = applySplitRule(pCxt, pSubplan); if (TSDB_CODE_SUCCESS == code) { *pLogicSubplan = pSubplan; } else { diff --git a/source/libs/planner/src/planUtil.c b/source/libs/planner/src/planUtil.c index 3c83d9f53a8669535eda1dc883af2951e9470d54..63d31912f0cccdf177b87681687e0faf8168642a 100644 --- a/source/libs/planner/src/planUtil.c +++ b/source/libs/planner/src/planUtil.c @@ -34,3 +34,54 @@ int32_t generateUsageErrMsg(char* pBuf, int32_t len, int32_t errCode, ...) { va_end(vArgList); return errCode; } + +typedef struct SCreateColumnCxt { + int32_t errCode; + SNodeList* pList; +} SCreateColumnCxt; + +static EDealRes doCreateColumn(SNode* pNode, void* pContext) { + SCreateColumnCxt* pCxt = (SCreateColumnCxt*)pContext; + switch (nodeType(pNode)) { + case QUERY_NODE_COLUMN: { + SNode* pCol = nodesCloneNode(pNode); + if (NULL == pCol) { + return DEAL_RES_ERROR; + } + return (TSDB_CODE_SUCCESS == nodesListAppend(pCxt->pList, pCol) ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR); + } + case QUERY_NODE_OPERATOR: + case QUERY_NODE_LOGIC_CONDITION: + case QUERY_NODE_FUNCTION: { + SExprNode* pExpr = (SExprNode*)pNode; + SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == pCol) { + return DEAL_RES_ERROR; + } + pCol->node.resType = pExpr->resType; + strcpy(pCol->colName, pExpr->aliasName); + return (TSDB_CODE_SUCCESS == nodesListAppend(pCxt->pList, pCol) ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR); + } + default: + break; + } + + return DEAL_RES_CONTINUE; +} + +int32_t createColumnByRewriteExps(SNodeList* pExprs, SNodeList** pList) { + SCreateColumnCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pList = (NULL == *pList ? nodesMakeList() : *pList)}; + if (NULL == cxt.pList) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + nodesWalkExprs(pExprs, doCreateColumn, &cxt); + if (TSDB_CODE_SUCCESS != cxt.errCode) { + nodesDestroyList(cxt.pList); + return cxt.errCode; + } + if (NULL == *pList) { + *pList = cxt.pList; + } + return cxt.errCode; +} diff --git a/source/libs/planner/src/planner.c b/source/libs/planner/src/planner.c index af62c52a89baa90aaf857fa6606267a437275f87..f8d240c7b2d2162800cbc32ee7af2eeb62645d89 100644 --- a/source/libs/planner/src/planner.c +++ b/source/libs/planner/src/planner.c @@ -58,16 +58,19 @@ static int32_t setSubplanExecutionNode(SPhysiNode* pNode, int32_t groupId, SDown if (QUERY_NODE_PHYSICAL_PLAN_EXCHANGE == nodeType(pNode)) { SExchangePhysiNode* pExchange = (SExchangePhysiNode*)pNode; if (pExchange->srcGroupId == groupId) { - if (NULL == pExchange->pSrcEndPoints) { - pExchange->pSrcEndPoints = nodesMakeList(); - if (NULL == pExchange->pSrcEndPoints) { - return TSDB_CODE_OUT_OF_MEMORY; - } - } - if (TSDB_CODE_SUCCESS != nodesListStrictAppend(pExchange->pSrcEndPoints, nodesCloneNode(pSource))) { - return TSDB_CODE_OUT_OF_MEMORY; + return nodesListMakeStrictAppend(&pExchange->pSrcEndPoints, nodesCloneNode(pSource)); + } + } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE == nodeType(pNode)) { + SMergePhysiNode* pMerge = (SMergePhysiNode*)pNode; + if (pMerge->srcGroupId == groupId) { + SExchangePhysiNode* pExchange = + (SExchangePhysiNode*)nodesListGetNode(pMerge->node.pChildren, pMerge->numOfChannels - 1); + if (1 == pMerge->numOfChannels) { + pMerge->numOfChannels = LIST_LENGTH(pMerge->node.pChildren); + } else { + --(pMerge->numOfChannels); } - return TSDB_CODE_SUCCESS; + return nodesListMakeStrictAppend(&pExchange->pSrcEndPoints, nodesCloneNode(pSource)); } } diff --git a/source/libs/planner/test/CMakeLists.txt b/source/libs/planner/test/CMakeLists.txt index a21b36fef6b3eecc51bdbe4abbb7fff3dc065098..abea60b0c798a055617abf3693be25f365fbc867 100644 --- a/source/libs/planner/test/CMakeLists.txt +++ b/source/libs/planner/test/CMakeLists.txt @@ -32,7 +32,9 @@ if(${BUILD_WINGETOPT}) target_link_libraries(plannerTest PUBLIC wingetopt) endif() -add_test( - NAME plannerTest - COMMAND plannerTest -) +if(NOT TD_WINDOWS) + add_test( + NAME plannerTest + COMMAND plannerTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/libs/planner/test/planIntervalTest.cpp b/source/libs/planner/test/planIntervalTest.cpp index c9bae46ca9438977f4078ceac82e6c7c4b3c680e..a04f47741e50f4b0b02bc86e6713636b9b4fff97 100644 --- a/source/libs/planner/test/planIntervalTest.cpp +++ b/source/libs/planner/test/planIntervalTest.cpp @@ -50,4 +50,10 @@ TEST_F(PlanIntervalTest, selectFunc) { run("SELECT MAX(c1), MIN(c1) FROM t1 INTERVAL(10s)"); // select function along with the columns of select row, and with INTERVAL clause run("SELECT MAX(c1), c2 FROM t1 INTERVAL(10s)"); -} \ No newline at end of file +} + +TEST_F(PlanIntervalTest, stable) { + useDb("root", "test"); + + run("SELECT COUNT(*) FROM st1 INTERVAL(10s)"); +} diff --git a/source/libs/planner/test/planOtherTest.cpp b/source/libs/planner/test/planOtherTest.cpp index 67c09d706e34ea44ab0c4070d9bbb665a15dded1..f153604e6b6b43ca601bfe662f7a21b2f36327ff 100644 --- a/source/libs/planner/test/planOtherTest.cpp +++ b/source/libs/planner/test/planOtherTest.cpp @@ -33,6 +33,12 @@ TEST_F(PlanOtherTest, createStream) { "interval(10s)"); } +TEST_F(PlanOtherTest, createStreamUseSTable) { + useDb("root", "test"); + + run("create stream if not exists s1 as select count(*) from st1 interval(10s)"); +} + TEST_F(PlanOtherTest, createSmaIndex) { useDb("root", "test"); diff --git a/source/libs/planner/test/planSTableTest.cpp b/source/libs/planner/test/planSTableTest.cpp index ed75b75e514aede02f41bf29ea044ccf833aef83..d1608cbad1155baf1bda19cf7c06a5121b0d581a 100644 --- a/source/libs/planner/test/planSTableTest.cpp +++ b/source/libs/planner/test/planSTableTest.cpp @@ -27,6 +27,14 @@ TEST_F(PlanSuperTableTest, pseudoCol) { run("SELECT TBNAME, tag1, tag2 FROM st1"); } +TEST_F(PlanSuperTableTest, pseudoColOnChildTable) { + useDb("root", "test"); + + run("SELECT TBNAME FROM st1s1"); + + run("SELECT TBNAME, tag1, tag2 FROM st1s1"); +} + TEST_F(PlanSuperTableTest, orderBy) { useDb("root", "test"); diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp index 084762088823edee627b4ea3bad2286208d570ac..e2082d49364727719bc72f3445bcb038d5584976 100644 --- a/source/libs/planner/test/planTestUtil.cpp +++ b/source/libs/planner/test/planTestUtil.cpp @@ -73,7 +73,7 @@ void setDumpModule(const char* pModule) { } } -void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(optarg); } +void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); } void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); } diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c index f4ba2fca8146b37a3e25bc7173488dd85c8e48d0..a5a499aaf5bc3b38998528d1550bd9b16c1d7671 100644 --- a/source/libs/qcom/src/queryUtil.c +++ b/source/libs/qcom/src/queryUtil.c @@ -199,3 +199,30 @@ SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* nam tstrncpy(s.name, name, tListLen(s.name)); return s; } + +void destroyQueryExecRes(SQueryExecRes* pRes) { + if (NULL == pRes || NULL == pRes->res) { + return; + } + + switch (pRes->msgType) { + case TDMT_VND_ALTER_TABLE: + case TDMT_MND_ALTER_STB: { + tFreeSTableMetaRsp((STableMetaRsp *)pRes->res); + taosMemoryFreeClear(pRes->res); + break; + } + case TDMT_VND_SUBMIT: { + tFreeSSubmitRsp((SSubmitRsp*)pRes->res); + break; + } + case TDMT_VND_QUERY: { + taosArrayDestroy((SArray*)pRes->res); + break; + } + default: + qError("invalid exec result for request type %d", pRes->msgType); + } +} + + diff --git a/source/libs/qcom/src/querymsg.c b/source/libs/qcom/src/querymsg.c index 636b2b50a83cc300b59ef97fb7f09c09808fb717..e77f2b0ca42e58744ea14de5286ae24d1c4ceb14 100644 --- a/source/libs/qcom/src/querymsg.c +++ b/source/libs/qcom/src/querymsg.c @@ -273,7 +273,7 @@ static int32_t queryConvertTableMetaMsg(STableMetaRsp *pMetaMsg) { return TSDB_CODE_SUCCESS; } -int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isSuperTable, STableMeta **pMeta) { +int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isStb, STableMeta **pMeta) { int32_t total = msg->numOfColumns + msg->numOfTags; int32_t metaSize = sizeof(STableMeta) + sizeof(SSchema) * total; @@ -283,14 +283,14 @@ int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isSuperTable, STabl return TSDB_CODE_TSC_OUT_OF_MEMORY; } - pTableMeta->vgId = isSuperTable ? 0 : msg->vgId; - pTableMeta->tableType = isSuperTable ? TSDB_SUPER_TABLE : msg->tableType; - pTableMeta->uid = isSuperTable ? msg->suid : msg->tuid; + pTableMeta->vgId = isStb ? 0 : msg->vgId; + pTableMeta->tableType = isStb ? TSDB_SUPER_TABLE : msg->tableType; + pTableMeta->uid = isStb ? msg->suid : msg->tuid; pTableMeta->suid = msg->suid; pTableMeta->sversion = msg->sversion; pTableMeta->tversion = msg->tversion; - if (isSuperTable) { + if (isStb) { qDebug("stable %s meta returned, suid:%" PRIx64, msg->stbName, pTableMeta->suid); } @@ -373,7 +373,7 @@ int32_t queryProcessQnodeListRsp(void *output, char *msg, int32_t msgSize) { return code; } - out.addrsList = (SArray *)output; + out.qnodeList = (SArray *)output; if (tDeserializeSQnodeListRsp(msg, msgSize, &out) != 0) { qError("invalid qnode list rsp msg, msgSize:%d", msgSize); code = TSDB_CODE_INVALID_MSG; diff --git a/source/libs/qworker/inc/qworkerInt.h b/source/libs/qworker/inc/qwInt.h similarity index 83% rename from source/libs/qworker/inc/qworkerInt.h rename to source/libs/qworker/inc/qwInt.h index 48ad737334445daf4351841d38fc0871ddc8d212..4fe3c1839310be9e264f7241fbc0cce48837a05c 100644 --- a/source/libs/qworker/inc/qworkerInt.h +++ b/source/libs/qworker/inc/qwInt.h @@ -26,7 +26,7 @@ extern "C" { #include "ttimer.h" #include "tref.h" #include "plannodes.h" - +#include "executor.h" #include "trpc.h" #define QW_DEFAULT_SCHEDULER_NUMBER 10000 @@ -76,6 +76,8 @@ typedef struct SQWDebug { bool dumpEnable; } SQWDebug; +extern SQWDebug gQWDebug; + typedef struct SQWMsg { void *node; int32_t code; @@ -143,6 +145,32 @@ typedef struct SQWSchStatus { SHashObj *tasksHash; // key:queryId+taskId, value: SQWTaskStatus } SQWSchStatus; +typedef struct SQWTimeInQ { + uint64_t num; + uint64_t total; +} SQWTimeInQ; + +typedef struct SQWMsgStat { + SQWTimeInQ waitTime[2]; + uint64_t queryProcessed; + uint64_t cqueryProcessed; + uint64_t fetchProcessed; + uint64_t fetchRspProcessed; + uint64_t cancelProcessed; + uint64_t dropProcessed; + uint64_t hbProcessed; +} SQWMsgStat; + +typedef struct SQWRTStat { + uint64_t startTaskNum; + uint64_t stopTaskNum; +} SQWRTStat; + +typedef struct SQWStat { + SQWMsgStat msgStat; + SQWRTStat rtStat; +} SQWStat; + // Qnode/Vnode level task management typedef struct SQWorker { int64_t refId; @@ -153,9 +181,10 @@ typedef struct SQWorker { tmr_h hbTimer; SRWLatch schLock; // SRWLatch ctxLock; - SHashObj *schHash; // key: schedulerId, value: SQWSchStatus - SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx - SMsgCb msgCb; + SHashObj *schHash; // key: schedulerId, value: SQWSchStatus + SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx + SMsgCb msgCb; + SQWStat stat; } SQWorker; typedef struct SQWorkerMgmt { @@ -170,10 +199,13 @@ typedef struct SQWorkerMgmt { #define QW_IDS() sId, qId, tId, rId #define QW_FPARAMS() mgmt, QW_IDS() -#define QW_GET_EVENT_VALUE(ctx, event) atomic_load_8(&(ctx)->events[event]) +#define QW_STAT_INC(_item, _n) atomic_add_fetch_64(&(_item), _n) +#define QW_STAT_DEC(_item, _n) atomic_sub_fetch_64(&(_item), _n) +#define QW_STAT_GET(_item) atomic_load_64(&(_item)) -#define QW_IS_EVENT_RECEIVED(ctx, event) (atomic_load_8(&(ctx)->events[event]) == QW_EVENT_RECEIVED) -#define QW_IS_EVENT_PROCESSED(ctx, event) (atomic_load_8(&(ctx)->events[event]) == QW_EVENT_PROCESSED) +#define QW_GET_EVENT(ctx, event) atomic_load_8(&(ctx)->events[event]) +#define QW_IS_EVENT_RECEIVED(ctx, event) (QW_GET_EVENT(ctx, event) == QW_EVENT_RECEIVED) +#define QW_IS_EVENT_PROCESSED(ctx, event) (QW_GET_EVENT(ctx, event) == QW_EVENT_PROCESSED) #define QW_SET_EVENT_RECEIVED(ctx, event) atomic_store_8(&(ctx)->events[event], QW_EVENT_RECEIVED) #define QW_SET_EVENT_PROCESSED(ctx, event) atomic_store_8(&(ctx)->events[event], QW_EVENT_PROCESSED) @@ -303,9 +335,29 @@ typedef struct SQWorkerMgmt { extern SQWorkerMgmt gQwMgmt; static FORCE_INLINE SQWorker *qwAcquire(int64_t refId) { return (SQWorker *)taosAcquireRef(atomic_load_32(&gQwMgmt.qwRef), refId); } - static FORCE_INLINE int32_t qwRelease(int64_t refId) { return taosReleaseRef(gQwMgmt.qwRef, refId); } +char *qwPhaseStr(int32_t phase); +char *qwBufStatusStr(int32_t bufStatus); +int32_t qwAcquireAddScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch); +void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt); +int32_t qwAddTaskStatus(QW_FPARAMS_DEF, int32_t status); +int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx); +int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx); +int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx); +void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx); +int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx); +int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status); +int32_t qwDropTask(QW_FPARAMS_DEF); +void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx); +int32_t qwOpenRef(void); +void qwSetHbParam(int64_t refId, SQWHbParam **pParam); +int32_t qwUpdateTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type); +int64_t qwGetTimeInQueue(SQWorker *mgmt, EQueueType type); + +void qwDbgDumpMgmtInfo(SQWorker *mgmt); +int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore); + #ifdef __cplusplus } diff --git a/source/libs/qworker/inc/qworkerMsg.h b/source/libs/qworker/inc/qwMsg.h similarity index 92% rename from source/libs/qworker/inc/qworkerMsg.h rename to source/libs/qworker/inc/qwMsg.h index 6453cff70095b246f0ede7034da07536b1075f2f..ede085b6f912842c85dce8597374613856d80f1f 100644 --- a/source/libs/qworker/inc/qworkerMsg.h +++ b/source/libs/qworker/inc/qwMsg.h @@ -20,7 +20,7 @@ extern "C" { #endif -#include "qworkerInt.h" +#include "qwInt.h" #include "dataSinkMgt.h" int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, int8_t taskType, int8_t explain); @@ -36,12 +36,10 @@ int32_t qwBuildAndSendFetchRsp(SRpcHandleInfo *pConn, SRetrieveTableRsp *pRsp, i int32_t code); void qwBuildFetchRsp(void *msg, SOutputData *input, int32_t len, bool qComplete); int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn); -int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo); -int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code); +int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo); int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execInfo, int32_t num); void qwFreeFetchRsp(void *msg); int32_t qwMallocFetchRsp(int32_t length, SRetrieveTableRsp **rsp); -int32_t qwGetSchTasksStatus(SQWorker *mgmt, uint64_t sId, SSchedulerStatusRsp **rsp); int32_t qwBuildAndSendHbRsp(SRpcHandleInfo *pConn, SSchedulerHbRsp *rsp, int32_t code); int32_t qwRegisterQueryBrokenLinkArg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn); int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *pConn); diff --git a/source/libs/qworker/src/qwDbg.c b/source/libs/qworker/src/qwDbg.c new file mode 100644 index 0000000000000000000000000000000000000000..27fe22295d3706eb21a237f8d662e34b4dce9b36 --- /dev/null +++ b/source/libs/qworker/src/qwDbg.c @@ -0,0 +1,128 @@ +#include "qworker.h" +#include "dataSinkMgt.h" +#include "executor.h" +#include "planner.h" +#include "query.h" +#include "qwInt.h" +#include "qwMsg.h" +#include "tcommon.h" +#include "tmsg.h" +#include "tname.h" + +SQWDebug gQWDebug = {.statusEnable = true, .dumpEnable = true}; + +int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore) { + if (!gQWDebug.statusEnable) { + return TSDB_CODE_SUCCESS; + } + + int32_t code = 0; + + if (oriStatus == newStatus) { + if (newStatus == JOB_TASK_STATUS_EXECUTING || newStatus == JOB_TASK_STATUS_FAILED) { + *ignore = true; + return TSDB_CODE_SUCCESS; + } + + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + switch (oriStatus) { + case JOB_TASK_STATUS_NULL: + if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_FAILED && + newStatus != JOB_TASK_STATUS_NOT_START) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_NOT_START: + if (newStatus != JOB_TASK_STATUS_CANCELLED) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_EXECUTING: + if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_SUCCEED && + newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_CANCELLING && + newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_PARTIAL_SUCCEED: + if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_SUCCEED && + newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_FAILED && + newStatus != JOB_TASK_STATUS_DROPPING) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_SUCCEED: + if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING && + newStatus != JOB_TASK_STATUS_FAILED) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_FAILED: + if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + break; + + case JOB_TASK_STATUS_CANCELLING: + if (newStatus != JOB_TASK_STATUS_CANCELLED) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + break; + case JOB_TASK_STATUS_CANCELLED: + case JOB_TASK_STATUS_DROPPING: + if (newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) { + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + break; + + default: + QW_TASK_ELOG("invalid task origStatus:%s", jobTaskStatusStr(oriStatus)); + return TSDB_CODE_QRY_APP_ERROR; + } + + return TSDB_CODE_SUCCESS; + +_return: + + QW_TASK_ELOG("invalid task status update from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus)); + QW_RET(code); +} + +void qwDbgDumpSchInfo(SQWSchStatus *sch, int32_t i) {} + +void qwDbgDumpMgmtInfo(SQWorker *mgmt) { + if (!gQWDebug.dumpEnable) { + return; + } + + QW_LOCK(QW_READ, &mgmt->schLock); + + /*QW_DUMP("total remain schduler num:%d", taosHashGetSize(mgmt->schHash));*/ + + void *key = NULL; + size_t keyLen = 0; + int32_t i = 0; + SQWSchStatus *sch = NULL; + + void *pIter = taosHashIterate(mgmt->schHash, NULL); + while (pIter) { + sch = (SQWSchStatus *)pIter; + qwDbgDumpSchInfo(sch, i); + ++i; + pIter = taosHashIterate(mgmt->schHash, pIter); + } + + QW_UNLOCK(QW_READ, &mgmt->schLock); + + /*QW_DUMP("total remain ctx num:%d", taosHashGetSize(mgmt->ctxHash));*/ +} + + diff --git a/source/libs/qworker/src/qworkerMsg.c b/source/libs/qworker/src/qwMsg.c similarity index 73% rename from source/libs/qworker/src/qworkerMsg.c rename to source/libs/qworker/src/qwMsg.c index 0a192eb795b689285831f366aff30af4a3743b27..f8205a6bb4b2d004bc1c4f35b67eabc5635c5ca7 100644 --- a/source/libs/qworker/src/qworkerMsg.c +++ b/source/libs/qworker/src/qwMsg.c @@ -1,10 +1,10 @@ -#include "qworkerMsg.h" +#include "qwMsg.h" #include "dataSinkMgt.h" #include "executor.h" #include "planner.h" #include "query.h" #include "qworker.h" -#include "qworkerInt.h" +#include "qwInt.h" #include "tcommon.h" #include "tmsg.h" #include "tname.h" @@ -43,28 +43,8 @@ void qwFreeFetchRsp(void *msg) { } } -int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code) { - SQueryTableRsp rsp = {.code = code}; - - int32_t contLen = tSerializeSQueryTableRsp(NULL, 0, &rsp); - void * msg = rpcMallocCont(contLen); - tSerializeSQueryTableRsp(msg, contLen, &rsp); - - SRpcMsg rpcRsp = { - .msgType = TDMT_VND_QUERY_RSP, - .pCont = msg, - .contLen = contLen, - .code = code, - .info = *pConn, - }; - - tmsgSendRsp(&rpcRsp); - - return TSDB_CODE_SUCCESS; -} - -int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo) { - SResReadyRsp *pRsp = (SResReadyRsp *)rpcMallocCont(sizeof(SResReadyRsp)); +int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo) { + SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp)); pRsp->code = code; if (tbInfo) { strcpy(pRsp->tbFName, tbInfo->tbFName); @@ -73,13 +53,12 @@ int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* } SRpcMsg rpcRsp = { - .msgType = TDMT_VND_RES_READY_RSP, + .msgType = TDMT_VND_QUERY_RSP, .pCont = pRsp, .contLen = sizeof(*pRsp), .code = code, .info = *pConn, }; - rpcRsp.info.ahandle = NULL; tmsgSendRsp(&rpcRsp); @@ -177,76 +156,6 @@ int32_t qwBuildAndSendDropRsp(SRpcHandleInfo *pConn, int32_t code) { return TSDB_CODE_SUCCESS; } -int32_t qwBuildAndSendShowRsp(SRpcMsg *pMsg, int32_t code) { - int32_t numOfCols = 6; - SVShowTablesRsp showRsp = {0}; - - // showRsp.showId = 1; - showRsp.tableMeta.pSchemas = taosMemoryCalloc(numOfCols, sizeof(SSchema)); - if (showRsp.tableMeta.pSchemas == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - - col_id_t cols = 0; - SSchema *pSchema = showRsp.tableMeta.pSchemas; - - const SSchema *s = tGetTbnameColumnSchema(); - *pSchema = createSchema(s->type, s->bytes, ++cols, "name"); - pSchema++; - - int32_t type = TSDB_DATA_TYPE_TIMESTAMP; - *pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "created"); - pSchema++; - - type = TSDB_DATA_TYPE_SMALLINT; - *pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "columns"); - pSchema++; - - *pSchema = createSchema(s->type, s->bytes, ++cols, "stable"); - pSchema++; - - type = TSDB_DATA_TYPE_BIGINT; - *pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "uid"); - pSchema++; - - type = TSDB_DATA_TYPE_INT; - *pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "vgId"); - - assert(cols == numOfCols); - showRsp.tableMeta.numOfColumns = cols; - - int32_t bufLen = tSerializeSShowRsp(NULL, 0, &showRsp); - void * pBuf = rpcMallocCont(bufLen); - tSerializeSShowRsp(pBuf, bufLen, &showRsp); - - SRpcMsg rpcMsg = { - .info = pMsg->info, - .pCont = pBuf, - .contLen = bufLen, - .code = code, - }; - - tmsgSendRsp(&rpcMsg); - return TSDB_CODE_SUCCESS; -} - -int32_t qwBuildAndSendShowFetchRsp(SRpcMsg *pMsg, SVShowTablesFetchReq *pFetchReq) { - SVShowTablesFetchRsp *pRsp = (SVShowTablesFetchRsp *)rpcMallocCont(sizeof(SVShowTablesFetchRsp)); - int32_t handle = htonl(pFetchReq->id); - - pRsp->numOfRows = 0; - SRpcMsg rpcMsg = { - .info = pMsg->info, - .pCont = pRsp, - .contLen = sizeof(*pRsp), - .code = 0, - }; - - tmsgSendRsp(&rpcMsg); - return TSDB_CODE_SUCCESS; -} - int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) { SQueryContinueReq *req = (SQueryContinueReq *)rpcMallocCont(sizeof(SQueryContinueReq)); if (NULL == req) { @@ -339,7 +248,7 @@ int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo * return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } @@ -348,6 +257,9 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SSubQueryMsg *msg = pMsg->pCont; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateTimeInQueue(mgmt, ts, QUERY_QUEUE); + QW_STAT_INC(mgmt->stat.msgStat.queryProcessed, 1); + if (NULL == msg || pMsg->contLen <= sizeof(*msg)) { QW_ELOG("invalid query msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -377,7 +289,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { int32_t code = 0; int8_t status = 0; bool queryDone = false; @@ -386,6 +298,9 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SQWTaskCtx * handles = NULL; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateTimeInQueue(mgmt, ts, QUERY_QUEUE); + QW_STAT_INC(mgmt->stat.msgStat.cqueryProcessed, 1); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { QW_ELOG("invalid cquery msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -407,66 +322,7 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessReadyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { - if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { - return TSDB_CODE_QRY_INVALID_INPUT; - } - - SQWorker * mgmt = (SQWorker *)qWorkerMgmt; - SResReadyReq *msg = pMsg->pCont; - if (NULL == msg || pMsg->contLen < sizeof(*msg)) { - QW_ELOG("invalid task ready msg, msg:%p, msgLen:%d", msg, pMsg->contLen); - QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); - } - - msg->sId = be64toh(msg->sId); - msg->queryId = be64toh(msg->queryId); - msg->taskId = be64toh(msg->taskId); - - uint64_t sId = msg->sId; - uint64_t qId = msg->queryId; - uint64_t tId = msg->taskId; - int64_t rId = 0; - - SQWMsg qwMsg = {.node = node, .msg = NULL, .msgLen = 0, .connInfo = pMsg->info}; - - QW_SCH_TASK_DLOG("processReady start, node:%p, handle:%p", node, pMsg->info.handle); - - QW_ERR_RET(qwProcessReady(QW_FPARAMS(), &qwMsg)); - - QW_SCH_TASK_DLOG("processReady end, node:%p", node); - - return TSDB_CODE_SUCCESS; -} - -int32_t qWorkerProcessStatusMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { - if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { - return TSDB_CODE_QRY_INVALID_INPUT; - } - - int32_t code = 0; - SSchTasksStatusReq *msg = pMsg->pCont; - if (NULL == msg || pMsg->contLen < sizeof(*msg)) { - qError("invalid task status msg"); - QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); - } - - SQWorker *mgmt = (SQWorker *)qWorkerMgmt; - msg->sId = htobe64(msg->sId); - uint64_t sId = msg->sId; - - SSchedulerStatusRsp *sStatus = NULL; - - // QW_ERR_JRET(qwGetSchTasksStatus(qWorkerMgmt, msg->sId, &sStatus)); - -_return: - - // QW_ERR_RET(qwBuildAndSendStatusRsp(pMsg, sStatus)); - - return TSDB_CODE_SUCCESS; -} - -int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -474,6 +330,9 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SResFetchReq *msg = pMsg->pCont; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateTimeInQueue(mgmt, ts, FETCH_QUEUE); + QW_STAT_INC(mgmt->stat.msgStat.fetchProcessed, 1); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { QW_ELOG("invalid fetch msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -499,13 +358,19 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { + SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + if (mgmt) { + qwUpdateTimeInQueue(mgmt, ts, FETCH_QUEUE); + QW_STAT_INC(mgmt->stat.msgStat.fetchRspProcessed, 1); + } + qProcessFetchRsp(NULL, pMsg, NULL); pMsg->pCont = NULL; return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -513,6 +378,10 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SQWorker * mgmt = (SQWorker *)qWorkerMgmt; int32_t code = 0; STaskCancelReq *msg = pMsg->pCont; + + qwUpdateTimeInQueue(mgmt, ts, FETCH_QUEUE); + QW_STAT_INC(mgmt->stat.msgStat.cancelProcessed, 1); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { qError("invalid task cancel msg"); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -540,7 +409,7 @@ _return: return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -549,6 +418,9 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { STaskDropReq *msg = pMsg->pCont; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateTimeInQueue(mgmt, ts, FETCH_QUEUE); + QW_STAT_INC(mgmt->stat.msgStat.dropProcessed, 1); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { QW_ELOG("invalid task drop msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -579,7 +451,7 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -588,6 +460,9 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SSchedulerHbReq req = {0}; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateTimeInQueue(mgmt, ts, FETCH_QUEUE); + QW_STAT_INC(mgmt->stat.msgStat.hbProcessed, 1); + if (NULL == pMsg->pCont) { QW_ELOG("invalid hb msg, msg:%p, msgLen:%d", pMsg->pCont, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -613,22 +488,3 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } - -int32_t qWorkerProcessShowMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { - if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { - return TSDB_CODE_QRY_INVALID_INPUT; - } - - int32_t code = 0; - SVShowTablesReq *pReq = pMsg->pCont; - QW_RET(qwBuildAndSendShowRsp(pMsg, code)); -} - -int32_t qWorkerProcessShowFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { - if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { - return TSDB_CODE_QRY_INVALID_INPUT; - } - - SVShowTablesFetchReq *pFetchReq = pMsg->pCont; - QW_RET(qwBuildAndSendShowFetchRsp(pMsg, pFetchReq)); -} diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c new file mode 100644 index 0000000000000000000000000000000000000000..3d0204e355bd228836a2729cc9e52e74981c4e0f --- /dev/null +++ b/source/libs/qworker/src/qwUtil.c @@ -0,0 +1,537 @@ +#include "dataSinkMgt.h" +#include "executor.h" +#include "planner.h" +#include "query.h" +#include "qwInt.h" +#include "qwMsg.h" +#include "qworker.h" +#include "tcommon.h" +#include "tmsg.h" +#include "tname.h" + +char *qwPhaseStr(int32_t phase) { + switch (phase) { + case QW_PHASE_PRE_QUERY: + return "PRE_QUERY"; + case QW_PHASE_POST_QUERY: + return "POST_QUERY"; + case QW_PHASE_PRE_FETCH: + return "PRE_FETCH"; + case QW_PHASE_POST_FETCH: + return "POST_FETCH"; + case QW_PHASE_PRE_CQUERY: + return "PRE_CQUERY"; + case QW_PHASE_POST_CQUERY: + return "POST_CQUERY"; + default: + break; + } + + return "UNKNOWN"; +} + +char *qwBufStatusStr(int32_t bufStatus) { + switch (bufStatus) { + case DS_BUF_LOW: + return "LOW"; + case DS_BUF_FULL: + return "FULL"; + case DS_BUF_EMPTY: + return "EMPTY"; + default: + break; + } + + return "UNKNOWN"; +} + +int32_t qwSetTaskStatus(QW_FPARAMS_DEF, SQWTaskStatus *task, int8_t status) { + int32_t code = 0; + int8_t origStatus = 0; + bool ignore = false; + + while (true) { + origStatus = atomic_load_8(&task->status); + + QW_ERR_RET(qwDbgValidateStatus(QW_FPARAMS(), origStatus, status, &ignore)); + if (ignore) { + break; + } + + if (origStatus != atomic_val_compare_exchange_8(&task->status, origStatus, status)) { + continue; + } + + QW_TASK_DLOG("task status updated from %s to %s", jobTaskStatusStr(origStatus), jobTaskStatusStr(status)); + + break; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAddSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType) { + SQWSchStatus newSch = {0}; + newSch.tasksHash = + taosHashInit(mgmt->cfg.maxSchTaskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + if (NULL == newSch.tasksHash) { + QW_SCH_ELOG("taosHashInit %d failed", mgmt->cfg.maxSchTaskNum); + QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + QW_LOCK(QW_WRITE, &mgmt->schLock); + int32_t code = taosHashPut(mgmt->schHash, &sId, sizeof(sId), &newSch, sizeof(newSch)); + if (0 != code) { + if (!HASH_NODE_EXIST(code)) { + QW_UNLOCK(QW_WRITE, &mgmt->schLock); + + QW_SCH_ELOG("taosHashPut new sch to scheduleHash failed, errno:%d", errno); + taosHashCleanup(newSch.tasksHash); + QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + taosHashCleanup(newSch.tasksHash); + } + QW_UNLOCK(QW_WRITE, &mgmt->schLock); + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAcquireSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch, int32_t nOpt) { + while (true) { + QW_LOCK(rwType, &mgmt->schLock); + *sch = taosHashGet(mgmt->schHash, &sId, sizeof(sId)); + if (NULL == (*sch)) { + QW_UNLOCK(rwType, &mgmt->schLock); + + if (QW_NOT_EXIST_ADD == nOpt) { + QW_ERR_RET(qwAddSchedulerImpl(mgmt, sId, rwType)); + + nOpt = QW_NOT_EXIST_RET_ERR; + + continue; + } else if (QW_NOT_EXIST_RET_ERR == nOpt) { + QW_RET(TSDB_CODE_QRY_SCH_NOT_EXIST); + } else { + QW_SCH_ELOG("unknown notExistOpt:%d", nOpt); + QW_ERR_RET(TSDB_CODE_QRY_APP_ERROR); + } + } + + break; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAcquireAddScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) { + return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_ADD); +} + +int32_t qwAcquireScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) { + return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_RET_ERR); +} + +void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt) { QW_UNLOCK(rwType, &mgmt->schLock); } + +int32_t qwAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, SQWTaskStatus **task) { + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + + QW_LOCK(rwType, &sch->tasksLock); + *task = taosHashGet(sch->tasksHash, id, sizeof(id)); + if (NULL == (*task)) { + QW_UNLOCK(rwType, &sch->tasksLock); + QW_ERR_RET(TSDB_CODE_QRY_TASK_NOT_EXIST); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAddTaskStatusImpl(QW_FPARAMS_DEF, SQWSchStatus *sch, int32_t rwType, int32_t status, SQWTaskStatus **task) { + int32_t code = 0; + + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + + SQWTaskStatus ntask = {0}; + ntask.status = status; + ntask.refId = rId; + + QW_LOCK(QW_WRITE, &sch->tasksLock); + code = taosHashPut(sch->tasksHash, id, sizeof(id), &ntask, sizeof(ntask)); + if (0 != code) { + QW_UNLOCK(QW_WRITE, &sch->tasksLock); + if (HASH_NODE_EXIST(code)) { + if (rwType && task) { + QW_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task)); + } else { + QW_TASK_ELOG("task status already exist, newStatus:%s", jobTaskStatusStr(status)); + QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST); + } + } else { + QW_TASK_ELOG("taosHashPut to tasksHash failed, error:%x - %s", code, tstrerror(code)); + QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + } + QW_UNLOCK(QW_WRITE, &sch->tasksLock); + + QW_TASK_DLOG("task status added, newStatus:%s", jobTaskStatusStr(status)); + + if (rwType && task) { + QW_ERR_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task)); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAddTaskStatus(QW_FPARAMS_DEF, int32_t status) { + SQWSchStatus *tsch = NULL; + int32_t code = 0; + QW_ERR_RET(qwAcquireAddScheduler(mgmt, sId, QW_READ, &tsch)); + + QW_ERR_JRET(qwAddTaskStatusImpl(QW_FPARAMS(), tsch, 0, status, NULL)); + +_return: + + qwReleaseScheduler(QW_READ, mgmt); + + QW_RET(code); +} + +int32_t qwAddAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, int32_t status, + SQWTaskStatus **task) { + return qwAddTaskStatusImpl(QW_FPARAMS(), sch, rwType, status, task); +} + +void qwReleaseTaskStatus(int32_t rwType, SQWSchStatus *sch) { QW_UNLOCK(rwType, &sch->tasksLock); } + +int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + + *ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id)); + if (NULL == (*ctx)) { + QW_TASK_DLOG_E("task ctx not exist, may be dropped"); + QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + + *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); + if (NULL == (*ctx)) { + QW_TASK_DLOG_E("task ctx not exist, may be dropped"); + QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAddTaskCtxImpl(QW_FPARAMS_DEF, bool acquire, SQWTaskCtx **ctx) { + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + + SQWTaskCtx nctx = {0}; + + int32_t code = taosHashPut(mgmt->ctxHash, id, sizeof(id), &nctx, sizeof(SQWTaskCtx)); + if (0 != code) { + if (HASH_NODE_EXIST(code)) { + if (acquire && ctx) { + QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx)); + } else if (ctx) { + QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx)); + } else { + QW_TASK_ELOG_E("task ctx already exist"); + QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST); + } + } else { + QW_TASK_ELOG("taosHashPut to ctxHash failed, error:%x", code); + QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + } + + if (acquire && ctx) { + QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx)); + } else if (ctx) { + QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx)); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qwAddTaskCtx(QW_FPARAMS_DEF) { QW_RET(qwAddTaskCtxImpl(QW_FPARAMS(), false, NULL)); } + +int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { return qwAddTaskCtxImpl(QW_FPARAMS(), true, ctx); } + +void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx) { taosHashRelease(mgmt->ctxHash, ctx); } + +void qwFreeTaskHandle(QW_FPARAMS_DEF, qTaskInfo_t *taskHandle) { + // Note: free/kill may in RC + qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle); + if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) { + qDestroyTask(otaskHandle); + } +} + +int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { + int32_t code = 0; + // Note: free/kill may in RC + qTaskInfo_t taskHandle = atomic_load_ptr(&ctx->taskHandle); + if (taskHandle && atomic_val_compare_exchange_ptr(&ctx->taskHandle, taskHandle, NULL)) { + code = qAsyncKillTask(taskHandle); + atomic_store_ptr(&ctx->taskHandle, taskHandle); + } + + QW_RET(code); +} + +void qwFreeTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { + tmsgReleaseHandle(&ctx->ctrlConnInfo, TAOS_CONN_SERVER); + ctx->ctrlConnInfo.handle = NULL; + ctx->ctrlConnInfo.refId = -1; + + // NO need to release dataConnInfo + + qwFreeTaskHandle(QW_FPARAMS(), &ctx->taskHandle); + + if (ctx->sinkHandle) { + dsDestroyDataSinker(ctx->sinkHandle); + ctx->sinkHandle = NULL; + } + + if (ctx->plan) { + nodesDestroyNode(ctx->plan); + ctx->plan = NULL; + } +} + +int32_t qwDropTaskCtx(QW_FPARAMS_DEF) { + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + SQWTaskCtx octx; + + SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); + if (NULL == ctx) { + QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + } + + octx = *ctx; + + atomic_store_ptr(&ctx->taskHandle, NULL); + atomic_store_ptr(&ctx->sinkHandle, NULL); + atomic_store_ptr(&ctx->plan, NULL); + + QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_DROP); + + if (taosHashRemove(mgmt->ctxHash, id, sizeof(id))) { + QW_TASK_ELOG_E("taosHashRemove from ctx hash failed"); + QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + } + + qwFreeTask(QW_FPARAMS(), &octx); + + QW_TASK_DLOG_E("task ctx dropped"); + + return TSDB_CODE_SUCCESS; +} + +int32_t qwDropTaskStatus(QW_FPARAMS_DEF) { + SQWSchStatus *sch = NULL; + SQWTaskStatus *task = NULL; + int32_t code = 0; + + char id[sizeof(qId) + sizeof(tId)] = {0}; + QW_SET_QTID(id, qId, tId); + + if (qwAcquireScheduler(mgmt, sId, QW_WRITE, &sch)) { + QW_TASK_WLOG_E("scheduler does not exist"); + return TSDB_CODE_SUCCESS; + } + + if (qwAcquireTaskStatus(QW_FPARAMS(), QW_WRITE, sch, &task)) { + qwReleaseScheduler(QW_WRITE, mgmt); + + QW_TASK_WLOG_E("task does not exist"); + return TSDB_CODE_SUCCESS; + } + + if (taosHashRemove(sch->tasksHash, id, sizeof(id))) { + QW_TASK_ELOG_E("taosHashRemove task from hash failed"); + QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + QW_TASK_DLOG_E("task status dropped"); + +_return: + + if (task) { + qwReleaseTaskStatus(QW_WRITE, sch); + } + qwReleaseScheduler(QW_WRITE, mgmt); + + QW_RET(code); +} + +int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status) { + SQWSchStatus *sch = NULL; + SQWTaskStatus *task = NULL; + int32_t code = 0; + + QW_ERR_RET(qwAcquireScheduler(mgmt, sId, QW_READ, &sch)); + QW_ERR_JRET(qwAcquireTaskStatus(QW_FPARAMS(), QW_READ, sch, &task)); + + QW_ERR_JRET(qwSetTaskStatus(QW_FPARAMS(), task, status)); + +_return: + + if (task) { + qwReleaseTaskStatus(QW_READ, sch); + } + qwReleaseScheduler(QW_READ, mgmt); + + QW_RET(code); +} + +int32_t qwDropTask(QW_FPARAMS_DEF) { + QW_ERR_RET(qwDropTaskStatus(QW_FPARAMS())); + QW_ERR_RET(qwDropTaskCtx(QW_FPARAMS())); + + QW_TASK_DLOG_E("task is dropped"); + + return TSDB_CODE_SUCCESS; +} + +void qwSetHbParam(int64_t refId, SQWHbParam **pParam) { + int32_t paramIdx = 0; + int32_t newParamIdx = 0; + + while (true) { + paramIdx = atomic_load_32(&gQwMgmt.paramIdx); + if (paramIdx == tListLen(gQwMgmt.param)) { + newParamIdx = 0; + } else { + newParamIdx = paramIdx + 1; + } + + if (paramIdx == atomic_val_compare_exchange_32(&gQwMgmt.paramIdx, paramIdx, newParamIdx)) { + break; + } + } + + gQwMgmt.param[paramIdx].qwrId = gQwMgmt.qwRef; + gQwMgmt.param[paramIdx].refId = refId; + + *pParam = &gQwMgmt.param[paramIdx]; +} + +void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) { + char dbFName[TSDB_DB_FNAME_LEN]; + char tbName[TSDB_TABLE_NAME_LEN]; + + qGetQueriedTableSchemaVersion(pTaskInfo, dbFName, tbName, &ctx->tbInfo.sversion, &ctx->tbInfo.tversion); + + if (dbFName[0] && tbName[0]) { + sprintf(ctx->tbInfo.tbFName, "%s.%s", dbFName, tbName); + } else { + ctx->tbInfo.tbFName[0] = 0; + } +} + +void qwCloseRef(void) { + taosWLockLatch(&gQwMgmt.lock); + if (atomic_load_32(&gQwMgmt.qwNum) <= 0 && gQwMgmt.qwRef >= 0) { + taosCloseRef(gQwMgmt.qwRef); + gQwMgmt.qwRef = -1; + } + taosWUnLockLatch(&gQwMgmt.lock); +} + +void qwDestroySchStatus(SQWSchStatus *pStatus) { taosHashCleanup(pStatus->tasksHash); } + +void qwDestroyImpl(void *pMgmt) { + SQWorker *mgmt = (SQWorker *)pMgmt; + + taosTmrStop(mgmt->hbTimer); + mgmt->hbTimer = NULL; + taosTmrCleanUp(mgmt->timer); + + // TODO STOP ALL QUERY + + // TODO FREE ALL + + taosHashCleanup(mgmt->ctxHash); + + void *pIter = taosHashIterate(mgmt->schHash, NULL); + while (pIter) { + SQWSchStatus *sch = (SQWSchStatus *)pIter; + qwDestroySchStatus(sch); + pIter = taosHashIterate(mgmt->schHash, pIter); + } + taosHashCleanup(mgmt->schHash); + + taosMemoryFree(mgmt); + + atomic_sub_fetch_32(&gQwMgmt.qwNum, 1); + + qwCloseRef(); +} + +int32_t qwOpenRef(void) { + taosWLockLatch(&gQwMgmt.lock); + if (gQwMgmt.qwRef < 0) { + gQwMgmt.qwRef = taosOpenRef(100, qwDestroyImpl); + if (gQwMgmt.qwRef < 0) { + taosWUnLockLatch(&gQwMgmt.lock); + qError("init qworker ref failed"); + QW_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + } + taosWUnLockLatch(&gQwMgmt.lock); + + return TSDB_CODE_SUCCESS; +} + +int32_t qwUpdateTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type) { + if (ts <= 0) { + return TSDB_CODE_SUCCESS; + } + + int64_t duration = taosGetTimestampUs() - ts; + switch (type) { + case QUERY_QUEUE: + ++mgmt->stat.msgStat.waitTime[0].num; + mgmt->stat.msgStat.waitTime[0].total += duration; + break; + case FETCH_QUEUE: + ++mgmt->stat.msgStat.waitTime[1].num; + mgmt->stat.msgStat.waitTime[1].total += duration; + break; + default: + qError("unsupported queue type %d", type); + return TSDB_CODE_APP_ERROR; + } + + return TSDB_CODE_SUCCESS; +} + +int64_t qwGetTimeInQueue(SQWorker *mgmt, EQueueType type) { + SQWTimeInQ *pStat = NULL; + switch (type) { + case QUERY_QUEUE: + pStat = &mgmt->stat.msgStat.waitTime[0]; + return pStat->num ? (pStat->total / pStat->num) : 0; + case FETCH_QUEUE: + pStat = &mgmt->stat.msgStat.waitTime[1]; + return pStat->num ? (pStat->total / pStat->num) : 0; + default: + qError("unsupported queue type %d", type); + } + + return -1; +} + diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 5cf9f62fd8d55ac88813602ad37ae30a7c12473d..fd16fa53b7a6d9b4c5e460719c3fc09134ef034b 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -1,530 +1,45 @@ -#include "qworker.h" #include "dataSinkMgt.h" #include "executor.h" #include "planner.h" #include "query.h" -#include "qworkerInt.h" -#include "qworkerMsg.h" +#include "qwInt.h" +#include "qwMsg.h" #include "tcommon.h" #include "tmsg.h" #include "tname.h" +#include "qworker.h" -SQWDebug gQWDebug = {.statusEnable = true, .dumpEnable = true}; SQWorkerMgmt gQwMgmt = { .lock = 0, .qwRef = -1, .qwNum = 0, }; -int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore) { - if (!gQWDebug.statusEnable) { - return TSDB_CODE_SUCCESS; - } - - int32_t code = 0; - - if (oriStatus == newStatus) { - if (newStatus == JOB_TASK_STATUS_EXECUTING || newStatus == JOB_TASK_STATUS_FAILED) { - *ignore = true; - return TSDB_CODE_SUCCESS; - } - - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - switch (oriStatus) { - case JOB_TASK_STATUS_NULL: - if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_FAILED && - newStatus != JOB_TASK_STATUS_NOT_START) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_NOT_START: - if (newStatus != JOB_TASK_STATUS_CANCELLED) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_EXECUTING: - if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_SUCCEED && - newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_CANCELLING && - newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_PARTIAL_SUCCEED: - if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_SUCCEED && - newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_FAILED && - newStatus != JOB_TASK_STATUS_DROPPING) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_SUCCEED: - if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING && - newStatus != JOB_TASK_STATUS_FAILED) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_FAILED: - if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - break; - - case JOB_TASK_STATUS_CANCELLING: - if (newStatus != JOB_TASK_STATUS_CANCELLED) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_CANCELLED: - case JOB_TASK_STATUS_DROPPING: - if (newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - break; - - default: - QW_TASK_ELOG("invalid task origStatus:%s", jobTaskStatusStr(oriStatus)); - return TSDB_CODE_QRY_APP_ERROR; - } - - return TSDB_CODE_SUCCESS; - -_return: - - QW_TASK_ELOG("invalid task status update from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus)); - QW_RET(code); -} - -void qwDbgDumpSchInfo(SQWSchStatus *sch, int32_t i) {} - -void qwDbgDumpMgmtInfo(SQWorker *mgmt) { - if (!gQWDebug.dumpEnable) { - return; - } - - QW_LOCK(QW_READ, &mgmt->schLock); - - /*QW_DUMP("total remain schduler num:%d", taosHashGetSize(mgmt->schHash));*/ - - void *key = NULL; - size_t keyLen = 0; - int32_t i = 0; - SQWSchStatus *sch = NULL; - - void *pIter = taosHashIterate(mgmt->schHash, NULL); - while (pIter) { - sch = (SQWSchStatus *)pIter; - qwDbgDumpSchInfo(sch, i); - ++i; - pIter = taosHashIterate(mgmt->schHash, pIter); - } - - QW_UNLOCK(QW_READ, &mgmt->schLock); - - /*QW_DUMP("total remain ctx num:%d", taosHashGetSize(mgmt->ctxHash));*/ -} - -char *qwPhaseStr(int32_t phase) { - switch (phase) { - case QW_PHASE_PRE_QUERY: - return "PRE_QUERY"; - case QW_PHASE_POST_QUERY: - return "POST_QUERY"; - case QW_PHASE_PRE_FETCH: - return "PRE_FETCH"; - case QW_PHASE_POST_FETCH: - return "POST_FETCH"; - case QW_PHASE_PRE_CQUERY: - return "PRE_CQUERY"; - case QW_PHASE_POST_CQUERY: - return "POST_CQUERY"; - default: - break; - } - - return "UNKNOWN"; -} - -char *qwBufStatusStr(int32_t bufStatus) { - switch (bufStatus) { - case DS_BUF_LOW: - return "LOW"; - case DS_BUF_FULL: - return "FULL"; - case DS_BUF_EMPTY: - return "EMPTY"; - default: - break; - } - - return "UNKNOWN"; -} - -int32_t qwSetTaskStatus(QW_FPARAMS_DEF, SQWTaskStatus *task, int8_t status) { - int32_t code = 0; - int8_t origStatus = 0; - bool ignore = false; - - while (true) { - origStatus = atomic_load_8(&task->status); - - QW_ERR_RET(qwDbgValidateStatus(QW_FPARAMS(), origStatus, status, &ignore)); - if (ignore) { - break; - } - - if (origStatus != atomic_val_compare_exchange_8(&task->status, origStatus, status)) { - continue; - } - - QW_TASK_DLOG("task status updated from %s to %s", jobTaskStatusStr(origStatus), jobTaskStatusStr(status)); - - break; - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAddSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType) { - SQWSchStatus newSch = {0}; - newSch.tasksHash = - taosHashInit(mgmt->cfg.maxSchTaskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - if (NULL == newSch.tasksHash) { - QW_SCH_ELOG("taosHashInit %d failed", mgmt->cfg.maxSchTaskNum); - QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - QW_LOCK(QW_WRITE, &mgmt->schLock); - int32_t code = taosHashPut(mgmt->schHash, &sId, sizeof(sId), &newSch, sizeof(newSch)); - if (0 != code) { - if (!HASH_NODE_EXIST(code)) { - QW_UNLOCK(QW_WRITE, &mgmt->schLock); - - QW_SCH_ELOG("taosHashPut new sch to scheduleHash failed, errno:%d", errno); - taosHashCleanup(newSch.tasksHash); - QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - taosHashCleanup(newSch.tasksHash); - } - QW_UNLOCK(QW_WRITE, &mgmt->schLock); - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAcquireSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch, int32_t nOpt) { - while (true) { - QW_LOCK(rwType, &mgmt->schLock); - *sch = taosHashGet(mgmt->schHash, &sId, sizeof(sId)); - if (NULL == (*sch)) { - QW_UNLOCK(rwType, &mgmt->schLock); - - if (QW_NOT_EXIST_ADD == nOpt) { - QW_ERR_RET(qwAddSchedulerImpl(mgmt, sId, rwType)); - - nOpt = QW_NOT_EXIST_RET_ERR; - - continue; - } else if (QW_NOT_EXIST_RET_ERR == nOpt) { - QW_RET(TSDB_CODE_QRY_SCH_NOT_EXIST); - } else { - QW_SCH_ELOG("unknown notExistOpt:%d", nOpt); - QW_ERR_RET(TSDB_CODE_QRY_APP_ERROR); - } - } - - break; - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAcquireAddScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) { - return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_ADD); -} - -int32_t qwAcquireScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) { - return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_RET_ERR); -} - -void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt) { QW_UNLOCK(rwType, &mgmt->schLock); } - -int32_t qwAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, SQWTaskStatus **task) { - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - - QW_LOCK(rwType, &sch->tasksLock); - *task = taosHashGet(sch->tasksHash, id, sizeof(id)); - if (NULL == (*task)) { - QW_UNLOCK(rwType, &sch->tasksLock); - QW_ERR_RET(TSDB_CODE_QRY_TASK_NOT_EXIST); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAddTaskStatusImpl(QW_FPARAMS_DEF, SQWSchStatus *sch, int32_t rwType, int32_t status, SQWTaskStatus **task) { - int32_t code = 0; - - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - - SQWTaskStatus ntask = {0}; - ntask.status = status; - ntask.refId = rId; - - QW_LOCK(QW_WRITE, &sch->tasksLock); - code = taosHashPut(sch->tasksHash, id, sizeof(id), &ntask, sizeof(ntask)); - if (0 != code) { - QW_UNLOCK(QW_WRITE, &sch->tasksLock); - if (HASH_NODE_EXIST(code)) { - if (rwType && task) { - QW_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task)); - } else { - QW_TASK_ELOG("task status already exist, newStatus:%s", jobTaskStatusStr(status)); - QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST); - } - } else { - QW_TASK_ELOG("taosHashPut to tasksHash failed, error:%x - %s", code, tstrerror(code)); - QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - } - QW_UNLOCK(QW_WRITE, &sch->tasksLock); - - QW_TASK_DLOG("task status added, newStatus:%s", jobTaskStatusStr(status)); - - if (rwType && task) { - QW_ERR_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task)); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAddTaskStatus(QW_FPARAMS_DEF, int32_t status) { - SQWSchStatus *tsch = NULL; - int32_t code = 0; - QW_ERR_RET(qwAcquireAddScheduler(mgmt, sId, QW_READ, &tsch)); - - QW_ERR_JRET(qwAddTaskStatusImpl(QW_FPARAMS(), tsch, 0, status, NULL)); - -_return: - - qwReleaseScheduler(QW_READ, mgmt); - - QW_RET(code); -} - -int32_t qwAddAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, int32_t status, - SQWTaskStatus **task) { - return qwAddTaskStatusImpl(QW_FPARAMS(), sch, rwType, status, task); -} - -void qwReleaseTaskStatus(int32_t rwType, SQWSchStatus *sch) { QW_UNLOCK(rwType, &sch->tasksLock); } - -int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - - *ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id)); - if (NULL == (*ctx)) { - QW_TASK_DLOG_E("task ctx not exist, may be dropped"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - - *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); - if (NULL == (*ctx)) { - QW_TASK_DLOG_E("task ctx not exist, may be dropped"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAddTaskCtxImpl(QW_FPARAMS_DEF, bool acquire, SQWTaskCtx **ctx) { - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - - SQWTaskCtx nctx = {0}; - - int32_t code = taosHashPut(mgmt->ctxHash, id, sizeof(id), &nctx, sizeof(SQWTaskCtx)); - if (0 != code) { - if (HASH_NODE_EXIST(code)) { - if (acquire && ctx) { - QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx)); - } else if (ctx) { - QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx)); - } else { - QW_TASK_ELOG_E("task ctx already exist"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST); - } - } else { - QW_TASK_ELOG("taosHashPut to ctxHash failed, error:%x", code); - QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - } - - if (acquire && ctx) { - QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx)); - } else if (ctx) { - QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx)); - } - - return TSDB_CODE_SUCCESS; -} - -int32_t qwAddTaskCtx(QW_FPARAMS_DEF) { QW_RET(qwAddTaskCtxImpl(QW_FPARAMS(), false, NULL)); } - -int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { return qwAddTaskCtxImpl(QW_FPARAMS(), true, ctx); } - -void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx) { taosHashRelease(mgmt->ctxHash, ctx); } - -void qwFreeTaskHandle(QW_FPARAMS_DEF, qTaskInfo_t *taskHandle) { - // Note: free/kill may in RC - qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle); - if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) { - qDestroyTask(otaskHandle); - } -} - -int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { - int32_t code = 0; - // Note: free/kill may in RC - qTaskInfo_t taskHandle = atomic_load_ptr(&ctx->taskHandle); - if (taskHandle && atomic_val_compare_exchange_ptr(&ctx->taskHandle, taskHandle, NULL)) { - code = qAsyncKillTask(taskHandle); - atomic_store_ptr(&ctx->taskHandle, taskHandle); - } - - QW_RET(code); -} - -void qwFreeTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { - tmsgReleaseHandle(&ctx->ctrlConnInfo, TAOS_CONN_SERVER); - ctx->ctrlConnInfo.handle = NULL; - ctx->ctrlConnInfo.refId = -1; - - // NO need to release dataConnInfo - - qwFreeTaskHandle(QW_FPARAMS(), &ctx->taskHandle); - - if (ctx->sinkHandle) { - dsDestroyDataSinker(ctx->sinkHandle); - ctx->sinkHandle = NULL; - } - - if (ctx->plan) { - nodesDestroyNode(ctx->plan); - ctx->plan = NULL; - } -} - -int32_t qwDropTaskCtx(QW_FPARAMS_DEF) { - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - SQWTaskCtx octx; - - SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); - if (NULL == ctx) { - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); - } - - octx = *ctx; - - atomic_store_ptr(&ctx->taskHandle, NULL); - atomic_store_ptr(&ctx->sinkHandle, NULL); - atomic_store_ptr(&ctx->plan, NULL); - - QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_DROP); - - if (taosHashRemove(mgmt->ctxHash, id, sizeof(id))) { - QW_TASK_ELOG_E("taosHashRemove from ctx hash failed"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); - } - - qwFreeTask(QW_FPARAMS(), &octx); - - QW_TASK_DLOG_E("task ctx dropped"); - - return TSDB_CODE_SUCCESS; -} - -int32_t qwDropTaskStatus(QW_FPARAMS_DEF) { - SQWSchStatus *sch = NULL; - SQWTaskStatus *task = NULL; - int32_t code = 0; - - char id[sizeof(qId) + sizeof(tId)] = {0}; - QW_SET_QTID(id, qId, tId); - - if (qwAcquireScheduler(mgmt, sId, QW_WRITE, &sch)) { - QW_TASK_WLOG_E("scheduler does not exist"); - return TSDB_CODE_SUCCESS; - } - - if (qwAcquireTaskStatus(QW_FPARAMS(), QW_WRITE, sch, &task)) { - qwReleaseScheduler(QW_WRITE, mgmt); - QW_TASK_WLOG_E("task does not exist"); - return TSDB_CODE_SUCCESS; - } +int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *req) { + int32_t code = 0; + SSchedulerHbRsp rsp = {0}; + SQWSchStatus *sch = NULL; - if (taosHashRemove(sch->tasksHash, id, sizeof(id))) { - QW_TASK_ELOG_E("taosHashRemove task from hash failed"); - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } + QW_ERR_RET(qwAcquireAddScheduler(mgmt, req->sId, QW_READ, &sch)); - QW_TASK_DLOG_E("task status dropped"); + QW_LOCK(QW_WRITE, &sch->hbConnLock); -_return: + if (qwMsg->connInfo.handle == sch->hbConnInfo.handle) { + tmsgReleaseHandle(&sch->hbConnInfo, TAOS_CONN_SERVER); + sch->hbConnInfo.handle = NULL; + sch->hbConnInfo.ahandle = NULL; - if (task) { - qwReleaseTaskStatus(QW_WRITE, sch); + QW_DLOG("release hb handle due to connection broken, handle:%p", qwMsg->connInfo.handle); + } else { + QW_DLOG("ignore hb connection broken, handle:%p, currentHandle:%p", qwMsg->connInfo.handle, sch->hbConnInfo.handle); } - qwReleaseScheduler(QW_WRITE, mgmt); - - QW_RET(code); -} - -int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status) { - SQWSchStatus *sch = NULL; - SQWTaskStatus *task = NULL; - int32_t code = 0; - - QW_ERR_RET(qwAcquireScheduler(mgmt, sId, QW_READ, &sch)); - QW_ERR_JRET(qwAcquireTaskStatus(QW_FPARAMS(), QW_READ, sch, &task)); - - QW_ERR_JRET(qwSetTaskStatus(QW_FPARAMS(), task, status)); -_return: + QW_UNLOCK(QW_WRITE, &sch->hbConnLock); - if (task) { - qwReleaseTaskStatus(QW_READ, sch); - } qwReleaseScheduler(QW_READ, mgmt); - QW_RET(code); -} - -int32_t qwDropTask(QW_FPARAMS_DEF) { - QW_ERR_RET(qwDropTaskStatus(QW_FPARAMS())); - QW_ERR_RET(qwDropTaskCtx(QW_FPARAMS())); - - QW_TASK_DLOG_E("task is dropped"); - - return TSDB_CODE_SUCCESS; + QW_RET(TSDB_CODE_SUCCESS); } int32_t qwHandleTaskComplete(QW_FPARAMS_DEF, SQWTaskCtx *ctx) { @@ -564,7 +79,11 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryEnd) { if (taskHandle) { code = qExecTask(taskHandle, &pRes, &useconds); if (code) { - QW_TASK_ELOG("qExecTask failed, code:%x - %s", code, tstrerror(code)); + if (code != TSDB_CODE_OPS_NOT_SUPPORT) { + QW_TASK_ELOG("qExecTask failed, code:%x - %s", code, tstrerror(code)); + } else { + QW_TASK_DLOG("qExecTask failed, code:%x - %s", code, tstrerror(code)); + } QW_ERR_RET(code); } } @@ -722,23 +241,9 @@ int32_t qwGetResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, void } -void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) { - char dbFName[TSDB_DB_FNAME_LEN]; - char tbName[TSDB_TABLE_NAME_LEN]; - - qGetQueriedTableSchemaVersion(pTaskInfo, dbFName, tbName, &ctx->tbInfo.sversion, &ctx->tbInfo.tversion); - - if (dbFName[0] && tbName[0]) { - sprintf(ctx->tbInfo.tbFName, "%s.%s", dbFName, tbName); - } else { - ctx->tbInfo.tbFName[0] = 0; - } -} - int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *input, SQWPhaseOutput *output) { int32_t code = 0; SQWTaskCtx *ctx = NULL; - SRpcHandleInfo *dropConnection = NULL; SRpcHandleInfo *cancelConnection = NULL; QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase)); @@ -771,12 +276,10 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu } if (QW_IS_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) { - dropConnection = &ctx->ctrlConnInfo; QW_ERR_JRET(qwDropTask(QW_FPARAMS())); - dropConnection = NULL; - qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); - QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); + //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); + //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED); break; @@ -809,12 +312,10 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu } if (QW_IS_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) { - dropConnection = &ctx->ctrlConnInfo; QW_ERR_JRET(qwDropTask(QW_FPARAMS())); - dropConnection = NULL; - qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); - QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); + //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); + //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED); } @@ -839,11 +340,6 @@ _return: qwReleaseTaskCtx(mgmt, ctx); } - if (dropConnection) { - qwBuildAndSendDropRsp(dropConnection, code); - QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", dropConnection->handle, code, tstrerror(code)); - } - if (cancelConnection) { qwBuildAndSendCancelRsp(cancelConnection, code); QW_TASK_DLOG("cancel rsp send, handle:%p, code:%x - %s", cancelConnection->handle, code, tstrerror(code)); @@ -862,7 +358,7 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp int32_t code = 0; SQWTaskCtx *ctx = NULL; SRpcHandleInfo connInfo = {0}; - SRpcHandleInfo *readyConnection = NULL; + SRpcHandleInfo *rspConnection = NULL; QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase)); @@ -883,7 +379,7 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp } #else connInfo = ctx->ctrlConnInfo; - readyConnection = &connInfo; + rspConnection = &connInfo; QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_READY); #endif @@ -895,8 +391,8 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); } - qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); - QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); + //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code); + //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code)); QW_ERR_JRET(qwDropTask(QW_FPARAMS())); QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED); @@ -916,9 +412,9 @@ _return: qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_PARTIAL_SUCCEED); } - if (readyConnection) { - qwBuildAndSendReadyRsp(readyConnection, code, ctx ? &ctx->tbInfo : NULL); - QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", readyConnection->handle, code, tstrerror(code)); + if (rspConnection) { + qwBuildAndSendQueryRsp(rspConnection, code, ctx ? &ctx->tbInfo : NULL); + QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", rspConnection->handle, code, tstrerror(code)); } if (ctx) { @@ -1009,69 +505,6 @@ _return: QW_RET(TSDB_CODE_SUCCESS); } -int32_t qwProcessReady(QW_FPARAMS_DEF, SQWMsg *qwMsg) { - int32_t code = 0; - SQWTaskCtx *ctx = NULL; - int8_t phase = 0; - bool needRsp = true; - - QW_ERR_JRET(qwAcquireTaskCtx(QW_FPARAMS(), &ctx)); - - QW_LOCK(QW_WRITE, &ctx->lock); - - if (QW_IS_EVENT_PROCESSED(ctx, QW_EVENT_DROP) || QW_IS_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) { - QW_TASK_WLOG_E("task is dropping or already dropped"); - QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED); - } - - if (ctx->phase == QW_PHASE_PRE_QUERY) { - ctx->ctrlConnInfo = qwMsg->connInfo; - QW_SET_EVENT_RECEIVED(ctx, QW_EVENT_READY); - needRsp = false; - QW_TASK_DLOG_E("ready msg will not rsp now"); - goto _return; - } - - QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_READY); - - if (atomic_load_8((int8_t *)&ctx->queryEnd) || atomic_load_8((int8_t *)&ctx->queryFetched)) { - QW_TASK_ELOG("got ready msg at wrong status, queryEnd:%d, queryFetched:%d", atomic_load_8((int8_t *)&ctx->queryEnd), - atomic_load_8((int8_t *)&ctx->queryFetched)); - QW_ERR_JRET(TSDB_CODE_QW_MSG_ERROR); - } - - if (ctx->phase == QW_PHASE_POST_QUERY) { - code = ctx->rspCode; - goto _return; - } - - QW_TASK_ELOG("invalid phase when got ready msg, phase:%s", qwPhaseStr(ctx->phase)); - - QW_ERR_JRET(TSDB_CODE_QRY_TASK_STATUS_ERROR); - -_return: - - if (code && ctx) { - QW_UPDATE_RSP_CODE(ctx, code); - } - - if (code) { - qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_FAILED); - } - - if (ctx) { - QW_UNLOCK(QW_WRITE, &ctx->lock); - qwReleaseTaskCtx(mgmt, ctx); - } - - if (needRsp) { - qwBuildAndSendReadyRsp(&qwMsg->connInfo, code, NULL); - QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code)); - } - - QW_RET(TSDB_CODE_SUCCESS); -} - int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { SQWTaskCtx *ctx = NULL; int32_t code = 0; @@ -1245,11 +678,6 @@ int32_t qwProcessDrop(QW_FPARAMS_DEF, SQWMsg *qwMsg) { QW_ERR_JRET(qwKillTaskHandle(QW_FPARAMS(), ctx)); qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_DROPPING); } else if (ctx->phase > 0) { - if (0 == qwMsg->code) { - qwBuildAndSendDropRsp(&qwMsg->connInfo, code); - QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code)); - } - QW_ERR_JRET(qwDropTask(QW_FPARAMS())); rsped = true; } else { @@ -1280,37 +708,6 @@ _return: qwReleaseTaskCtx(mgmt, ctx); } - if ((TSDB_CODE_SUCCESS != code) && (0 == qwMsg->code)) { - qwBuildAndSendDropRsp(&qwMsg->connInfo, code); - QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code)); - } - - QW_RET(TSDB_CODE_SUCCESS); -} - -int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *req) { - int32_t code = 0; - SSchedulerHbRsp rsp = {0}; - SQWSchStatus *sch = NULL; - - QW_ERR_RET(qwAcquireAddScheduler(mgmt, req->sId, QW_READ, &sch)); - - QW_LOCK(QW_WRITE, &sch->hbConnLock); - - if (qwMsg->connInfo.handle == sch->hbConnInfo.handle) { - tmsgReleaseHandle(&sch->hbConnInfo, TAOS_CONN_SERVER); - sch->hbConnInfo.handle = NULL; - sch->hbConnInfo.ahandle = NULL; - - QW_DLOG("release hb handle due to connection broken, handle:%p", qwMsg->connInfo.handle); - } else { - QW_DLOG("ignore hb connection broken, handle:%p, currentHandle:%p", qwMsg->connInfo.handle, sch->hbConnInfo.handle); - } - - QW_UNLOCK(QW_WRITE, &sch->hbConnLock); - - qwReleaseScheduler(QW_READ, mgmt); - QW_RET(TSDB_CODE_SUCCESS); } @@ -1441,81 +838,6 @@ _return: qwRelease(refId); } -void qwCloseRef(void) { - taosWLockLatch(&gQwMgmt.lock); - if (atomic_load_32(&gQwMgmt.qwNum) <= 0 && gQwMgmt.qwRef >= 0) { - taosCloseRef(gQwMgmt.qwRef); - gQwMgmt.qwRef = -1; - } - taosWUnLockLatch(&gQwMgmt.lock); -} - -void qwDestroySchStatus(SQWSchStatus *pStatus) { taosHashCleanup(pStatus->tasksHash); } - -void qwDestroyImpl(void *pMgmt) { - SQWorker *mgmt = (SQWorker *)pMgmt; - - taosTmrStopA(&mgmt->hbTimer); - taosTmrCleanUp(mgmt->timer); - - // TODO STOP ALL QUERY - - // TODO FREE ALL - - taosHashCleanup(mgmt->ctxHash); - - void *pIter = taosHashIterate(mgmt->schHash, NULL); - while (pIter) { - SQWSchStatus *sch = (SQWSchStatus *)pIter; - qwDestroySchStatus(sch); - pIter = taosHashIterate(mgmt->schHash, pIter); - } - taosHashCleanup(mgmt->schHash); - - taosMemoryFree(mgmt); - - atomic_sub_fetch_32(&gQwMgmt.qwNum, 1); - - qwCloseRef(); -} - -int32_t qwOpenRef(void) { - taosWLockLatch(&gQwMgmt.lock); - if (gQwMgmt.qwRef < 0) { - gQwMgmt.qwRef = taosOpenRef(100, qwDestroyImpl); - if (gQwMgmt.qwRef < 0) { - taosWUnLockLatch(&gQwMgmt.lock); - qError("init qworker ref failed"); - QW_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - } - taosWUnLockLatch(&gQwMgmt.lock); - - return TSDB_CODE_SUCCESS; -} - -void qwSetHbParam(int64_t refId, SQWHbParam **pParam) { - int32_t paramIdx = 0; - int32_t newParamIdx = 0; - - while (true) { - paramIdx = atomic_load_32(&gQwMgmt.paramIdx); - if (paramIdx == tListLen(gQwMgmt.param)) { - newParamIdx = 0; - } else { - newParamIdx = paramIdx + 1; - } - - if (paramIdx == atomic_val_compare_exchange_32(&gQwMgmt.paramIdx, paramIdx, newParamIdx)) { - break; - } - } - - gQwMgmt.param[paramIdx].qwrId = gQwMgmt.qwRef; - gQwMgmt.param[paramIdx].refId = refId; - - *pParam = &gQwMgmt.param[paramIdx]; -} int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb) { if (NULL == qWorkerMgmt || pMsgCb->mgmt == NULL) { @@ -1632,146 +954,30 @@ void qWorkerDestroy(void **qWorkerMgmt) { } } -int32_t qwGetSchTasksStatus(SQWorker *mgmt, uint64_t sId, SSchedulerStatusRsp **rsp) { - /* - SQWSchStatus *sch = NULL; - int32_t taskNum = 0; - - QW_ERR_RET(qwAcquireScheduler(mgmt, sId, QW_READ, &sch)); - - sch->lastAccessTs = taosGetTimestampSec(); - - QW_LOCK(QW_READ, &sch->tasksLock); - - taskNum = taosHashGetSize(sch->tasksHash); - - int32_t size = sizeof(SSchedulerStatusRsp) + sizeof((*rsp)->status[0]) * taskNum; - *rsp = taosMemoryCalloc(1, size); - if (NULL == *rsp) { - QW_SCH_ELOG("calloc %d failed", size); - QW_UNLOCK(QW_READ, &sch->tasksLock); - qwReleaseScheduler(QW_READ, mgmt); - - return TSDB_CODE_QRY_OUT_OF_MEMORY; - } - - void *key = NULL; - size_t keyLen = 0; - int32_t i = 0; - - void *pIter = taosHashIterate(sch->tasksHash, NULL); - while (pIter) { - SQWTaskStatus *taskStatus = (SQWTaskStatus *)pIter; - taosHashGetKey(pIter, &key, &keyLen); - - QW_GET_QTID(key, (*rsp)->status[i].queryId, (*rsp)->status[i].taskId); - (*rsp)->status[i].status = taskStatus->status; - - ++i; - pIter = taosHashIterate(sch->tasksHash, pIter); - } - - QW_UNLOCK(QW_READ, &sch->tasksLock); - qwReleaseScheduler(QW_READ, mgmt); - - (*rsp)->num = taskNum; - */ - return TSDB_CODE_SUCCESS; -} - -int32_t qwUpdateSchLastAccess(SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId) { - SQWSchStatus *sch = NULL; +int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pStat) { + if (NULL == handle || NULL == qWorkerMgmt || NULL == pStat) { + QW_RET(TSDB_CODE_QRY_INVALID_INPUT); + } - /* - QW_ERR_RET(qwAcquireScheduler(QW_READ, mgmt, sId, &sch)); + SQWorker *mgmt = (SQWorker *)qWorkerMgmt; + SDataSinkStat sinkStat = {0}; + + dsDataSinkGetCacheSize(&sinkStat); + pStat->cacheDataSize = sinkStat.cachedSize; + + pStat->queryProcessed = QW_STAT_GET(mgmt->stat.msgStat.queryProcessed); + pStat->cqueryProcessed = QW_STAT_GET(mgmt->stat.msgStat.cqueryProcessed); + pStat->fetchProcessed = QW_STAT_GET(mgmt->stat.msgStat.fetchProcessed); + pStat->dropProcessed = QW_STAT_GET(mgmt->stat.msgStat.dropProcessed); + pStat->hbProcessed = QW_STAT_GET(mgmt->stat.msgStat.hbProcessed); - sch->lastAccessTs = taosGetTimestampSec(); + pStat->numOfQueryInQueue = handle->pMsgCb->qsizeFp(handle->pMsgCb->mgmt, mgmt->nodeId, QUERY_QUEUE); + pStat->numOfFetchInQueue = handle->pMsgCb->qsizeFp(handle->pMsgCb->mgmt, mgmt->nodeId, FETCH_QUEUE); + pStat->timeInQueryQueue = qwGetTimeInQueue((SQWorker *)qWorkerMgmt, QUERY_QUEUE); + pStat->timeInFetchQueue = qwGetTimeInQueue((SQWorker *)qWorkerMgmt, FETCH_QUEUE); - qwReleaseScheduler(QW_READ, mgmt); - */ return TSDB_CODE_SUCCESS; } -int32_t qwGetTaskStatus(SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId, int8_t *taskStatus) { - SQWSchStatus *sch = NULL; - SQWTaskStatus *task = NULL; - int32_t code = 0; - /* - if (qwAcquireScheduler(QW_READ, mgmt, sId, &sch)) { - *taskStatus = JOB_TASK_STATUS_NULL; - return TSDB_CODE_SUCCESS; - } - - if (qwAcquireTask(mgmt, QW_READ, sch, queryId, taskId, &task)) { - qwReleaseScheduler(QW_READ, mgmt); - - *taskStatus = JOB_TASK_STATUS_NULL; - return TSDB_CODE_SUCCESS; - } - - *taskStatus = task->status; - - qwReleaseTask(QW_READ, sch); - qwReleaseScheduler(QW_READ, mgmt); - */ - - QW_RET(code); -} - -int32_t qwCancelTask(SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId) { - SQWSchStatus *sch = NULL; - SQWTaskStatus *task = NULL; - int32_t code = 0; - /* - QW_ERR_RET(qwAcquireAddScheduler(QW_READ, mgmt, sId, &sch)); - - QW_ERR_JRET(qwAcquireAddTask(mgmt, QW_READ, sch, qId, tId, JOB_TASK_STATUS_NOT_START, &task)); - - - QW_LOCK(QW_WRITE, &task->lock); - - task->cancel = true; - - int8_t oriStatus = task->status; - int8_t newStatus = 0; - - if (task->status == JOB_TASK_STATUS_CANCELLED || task->status == JOB_TASK_STATUS_NOT_START || task->status == - JOB_TASK_STATUS_CANCELLING || task->status == JOB_TASK_STATUS_DROPPING) { QW_UNLOCK(QW_WRITE, &task->lock); - - qwReleaseTask(QW_READ, sch); - qwReleaseScheduler(QW_READ, mgmt); - - return TSDB_CODE_SUCCESS; - } else if (task->status == JOB_TASK_STATUS_FAILED || task->status == JOB_TASK_STATUS_SUCCEED || task->status == - JOB_TASK_STATUS_PARTIAL_SUCCEED) { QW_ERR_JRET(qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_CANCELLED)); } else { - QW_ERR_JRET(qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_CANCELLING)); - } - - QW_UNLOCK(QW_WRITE, &task->lock); - - qwReleaseTask(QW_READ, sch); - qwReleaseScheduler(QW_READ, mgmt); - - if (oriStatus == JOB_TASK_STATUS_EXECUTING) { - //TODO call executer to cancel subquery async - } - - return TSDB_CODE_SUCCESS; - - _return: - - if (task) { - QW_UNLOCK(QW_WRITE, &task->lock); - - qwReleaseTask(QW_READ, sch); - } - - if (sch) { - qwReleaseScheduler(QW_READ, mgmt); - } - */ - - QW_RET(code); -} diff --git a/source/libs/qworker/test/qworkerTests.cpp b/source/libs/qworker/test/qworkerTests.cpp index b573828e7694cc2f19ddd2e31fa9b34b590fc6ed..5bb6acee6014fb9352b9ac11304500df33e3ffcd 100644 --- a/source/libs/qworker/test/qworkerTests.cpp +++ b/source/libs/qworker/test/qworkerTests.cpp @@ -108,7 +108,7 @@ void qwtInitLogFile() { tsAsyncLog = 0; qDebugFlag = 159; - strcpy(tsLogDir, "/var/log/taos"); + strcpy(tsLogDir, TD_LOG_DIR_PATH); if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) { printf("failed to open log file in directory:%s\n", tsLogDir); @@ -127,15 +127,6 @@ void qwtBuildQueryReqMsg(SRpcMsg *queryRpc) { queryRpc->contLen = sizeof(SSubQueryMsg) + 100; } -void qwtBuildReadyReqMsg(SResReadyReq *readyMsg, SRpcMsg *readyRpc) { - readyMsg->sId = htobe64(1); - readyMsg->queryId = htobe64(atomic_load_64(&qwtTestQueryId)); - readyMsg->taskId = htobe64(1); - readyRpc->msgType = TDMT_VND_RES_READY; - readyRpc->pCont = readyMsg; - readyRpc->contLen = sizeof(SResReadyReq); -} - void qwtBuildFetchReqMsg(SResFetchReq *fetchMsg, SRpcMsg *fetchRpc) { fetchMsg->sId = htobe64(1); fetchMsg->queryId = htobe64(atomic_load_64(&qwtTestQueryId)); @@ -154,13 +145,6 @@ void qwtBuildDropReqMsg(STaskDropReq *dropMsg, SRpcMsg *dropRpc) { dropRpc->contLen = sizeof(STaskDropReq); } -void qwtBuildStatusReqMsg(SSchTasksStatusReq *statusMsg, SRpcMsg *statusRpc) { - statusMsg->sId = htobe64(1); - statusRpc->pCont = statusMsg; - statusRpc->contLen = sizeof(SSchTasksStatusReq); - statusRpc->msgType = TDMT_VND_TASKS_STATUS; -} - int32_t qwtStringToPlan(const char* str, SSubplan** subplan) { *subplan = (SSubplan *)0x1; return 0; @@ -188,8 +172,7 @@ int32_t qwtPutReqToFetchQueue(void *node, struct SRpcMsg *pMsg) { return 0; } - -int32_t qwtPutReqToQueue(void *node, struct SRpcMsg *pMsg) { +int32_t qwtPutReqToQueue(void *node, EQueueType qtype, struct SRpcMsg *pMsg) { taosWLockLatch(&qwtTestQueryQueueLock); struct SRpcMsg *newMsg = (struct SRpcMsg *)taosMemoryCalloc(1, sizeof(struct SRpcMsg)); memcpy(newMsg, pMsg, sizeof(struct SRpcMsg)); @@ -197,7 +180,7 @@ int32_t qwtPutReqToQueue(void *node, struct SRpcMsg *pMsg) { if (qwtTestQueryQueueWIdx >= qwtTestQueryQueueSize) { qwtTestQueryQueueWIdx = 0; } - + qwtTestQueryQueueNum++; if (qwtTestQueryQueueWIdx == qwtTestQueryQueueRIdx) { @@ -222,10 +205,7 @@ void qwtRpcSendResponse(const SRpcMsg *pRsp) { case TDMT_VND_QUERY_RSP: { SQueryTableRsp *rsp = (SQueryTableRsp *)pRsp->pCont; - if (0 == pRsp->code) { - qwtBuildReadyReqMsg(&qwtreadyMsg, &qwtreadyRpc); - qwtPutReqToFetchQueue((void *)0x1, &qwtreadyRpc); - } else { + if (pRsp->code) { qwtBuildDropReqMsg(&qwtdropMsg, &qwtdropRpc); qwtPutReqToFetchQueue((void *)0x1, &qwtdropRpc); } @@ -233,19 +213,6 @@ void qwtRpcSendResponse(const SRpcMsg *pRsp) { rpcFreeCont(rsp); break; } - case TDMT_VND_RES_READY_RSP: { - SResReadyRsp *rsp = (SResReadyRsp *)pRsp->pCont; - - if (0 == pRsp->code) { - qwtBuildFetchReqMsg(&qwtfetchMsg, &qwtfetchRpc); - qwtPutReqToFetchQueue((void *)0x1, &qwtfetchRpc); - } else { - qwtBuildDropReqMsg(&qwtdropMsg, &qwtdropRpc); - qwtPutReqToFetchQueue((void *)0x1, &qwtdropRpc); - } - rpcFreeCont(rsp); - break; - } case TDMT_VND_FETCH_RSP: { SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)pRsp->pCont; @@ -667,7 +634,7 @@ void *queryThread(void *param) { while (!qwtTestStop) { qwtBuildQueryReqMsg(&queryRpc); - qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); if (qwtTestEnableSleep) { taosUsleep(taosRand()%5); } @@ -679,28 +646,6 @@ void *queryThread(void *param) { return NULL; } -void *readyThread(void *param) { - SRpcMsg readyRpc = {0}; - int32_t code = 0; - uint32_t n = 0; - void *mockPointer = (void *)0x1; - void *mgmt = param; - SResReadyReq readyMsg = {0}; - - while (!qwtTestStop) { - qwtBuildReadyReqMsg(&readyMsg, &readyRpc); - code = qWorkerProcessReadyMsg(mockPointer, mgmt, &readyRpc); - if (qwtTestEnableSleep) { - taosUsleep(taosRand()%5); - } - if (++n % qwtTestPrintNum == 0) { - printf("ready:%d\n", n); - } - } - - return NULL; -} - void *fetchThread(void *param) { SRpcMsg fetchRpc = {0}; int32_t code = 0; @@ -711,7 +656,7 @@ void *fetchThread(void *param) { while (!qwtTestStop) { qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc); - code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc); + code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0); if (qwtTestEnableSleep) { taosUsleep(taosRand()%5); } @@ -733,7 +678,7 @@ void *dropThread(void *param) { while (!qwtTestStop) { qwtBuildDropReqMsg(&dropMsg, &dropRpc); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); if (qwtTestEnableSleep) { taosUsleep(taosRand()%5); } @@ -745,29 +690,6 @@ void *dropThread(void *param) { return NULL; } -void *statusThread(void *param) { - SRpcMsg statusRpc = {0}; - int32_t code = 0; - uint32_t n = 0; - void *mockPointer = (void *)0x1; - void *mgmt = param; - SSchTasksStatusReq statusMsg = {0}; - - while (!qwtTestStop) { - qwtBuildStatusReqMsg(&statusMsg, &statusRpc); - code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc); - if (qwtTestEnableSleep) { - taosUsleep(taosRand()%5); - } - if (++n % qwtTestPrintNum == 0) { - printf("status:%d\n", n); - } - } - - return NULL; -} - - void *qwtclientThread(void *param) { int32_t code = 0; uint32_t n = 0; @@ -779,9 +701,9 @@ void *qwtclientThread(void *param) { while (!qwtTestStop) { qwtTestCaseFinished = false; - + qwtBuildQueryReqMsg(&queryRpc); - qwtPutReqToQueue((void *)0x1, &queryRpc); + qwtPutReqToQueue((void *)0x1, QUERY_QUEUE, &queryRpc); while (!qwtTestCaseFinished) { taosUsleep(1); @@ -835,9 +757,9 @@ void *queryQueueThread(void *param) { } if (TDMT_VND_QUERY == queryRpc->msgType) { - qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc); + qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc, 0); } else if (TDMT_VND_QUERY_CONTINUE == queryRpc->msgType) { - qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc); + qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc, 0); } else { printf("unknown msg in query queue, type:%d\n", queryRpc->msgType); assert(0); @@ -892,19 +814,13 @@ void *fetchQueueThread(void *param) { switch (fetchRpc->msgType) { case TDMT_VND_FETCH: - qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc); - break; - case TDMT_VND_RES_READY: - qWorkerProcessReadyMsg(mockPointer, mgmt, fetchRpc); - break; - case TDMT_VND_TASKS_STATUS: - qWorkerProcessStatusMsg(mockPointer, mgmt, fetchRpc); + qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc, 0); break; case TDMT_VND_CANCEL_TASK: - qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc); + qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc, 0); break; case TDMT_VND_DROP_TASK: - qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc); + qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc, 0); break; default: printf("unknown msg type:%d in fetch queue", fetchRpc->msgType); @@ -934,15 +850,12 @@ TEST(seqTest, normalCase) { int32_t code = 0; void *mockPointer = (void *)0x1; SRpcMsg queryRpc = {0}; - SRpcMsg readyRpc = {0}; SRpcMsg fetchRpc = {0}; SRpcMsg dropRpc = {0}; - SRpcMsg statusRpc = {0}; qwtInitLogFile(); qwtBuildQueryReqMsg(&queryRpc); - qwtBuildReadyReqMsg(&qwtreadyMsg, &readyRpc); qwtBuildFetchReqMsg(&qwtfetchMsg, &fetchRpc); qwtBuildDropReqMsg(&qwtdropMsg, &dropRpc); @@ -960,24 +873,20 @@ TEST(seqTest, normalCase) { SMsgCb msgCb = {0}; msgCb.mgmt = (void *)mockPointer; - msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qwtPutReqToQueue; + msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue; code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); - code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); ASSERT_EQ(code, 0); //code = qWorkerProcessReadyMsg(mockPointer, mgmt, &readyRpc); //ASSERT_EQ(code, 0); - code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc); + code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0); ASSERT_EQ(code, 0); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); - ASSERT_EQ(code, 0); - - qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc); - code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); ASSERT_EQ(code, 0); qWorkerDestroy(&mgmt); @@ -989,41 +898,27 @@ TEST(seqTest, cancelFirst) { void *mockPointer = (void *)0x1; SRpcMsg queryRpc = {0}; SRpcMsg dropRpc = {0}; - SRpcMsg statusRpc = {0}; qwtInitLogFile(); qwtBuildQueryReqMsg(&queryRpc); qwtBuildDropReqMsg(&qwtdropMsg, &dropRpc); - qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc); stubSetStringToPlan(); stubSetRpcSendResponse(); - + SMsgCb msgCb = {0}; msgCb.mgmt = (void *)mockPointer; - msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qwtPutReqToQueue; + msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue; code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); - qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc); - code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); ASSERT_EQ(code, 0); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); - ASSERT_EQ(code, 0); - - qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc); - code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc); - ASSERT_EQ(code, 0); - - code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); ASSERT_TRUE(0 != code); - qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc); - code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc); - ASSERT_EQ(code, 0); - qWorkerDestroy(&mgmt); } @@ -1048,10 +943,10 @@ TEST(seqTest, randCase) { stubSetCreateExecTask(); taosSeedRand(taosGetTimestampSec()); - + SMsgCb msgCb = {0}; msgCb.mgmt = (void *)mockPointer; - msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qwtPutReqToQueue; + msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue; code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); @@ -1063,7 +958,7 @@ TEST(seqTest, randCase) { if (r >= 0 && r < maxr/5) { printf("Query,%d\n", t++); qwtBuildQueryReqMsg(&queryRpc); - code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); } else if (r >= maxr/5 && r < maxr * 2/5) { //printf("Ready,%d\n", t++); //qwtBuildReadyReqMsg(&readyMsg, &readyRpc); @@ -1074,22 +969,19 @@ TEST(seqTest, randCase) { } else if (r >= maxr * 2/5 && r < maxr* 3/5) { printf("Fetch,%d\n", t++); qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc); - code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc); + code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0); if (qwtTestEnableSleep) { taosUsleep(1); } } else if (r >= maxr * 3/5 && r < maxr * 4/5) { printf("Drop,%d\n", t++); qwtBuildDropReqMsg(&dropMsg, &dropRpc); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); if (qwtTestEnableSleep) { taosUsleep(1); } } else if (r >= maxr * 4/5 && r < maxr-1) { printf("Status,%d\n", t++); - qwtBuildStatusReqMsg(&statusMsg, &statusRpc); - code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc); - ASSERT_EQ(code, 0); if (qwtTestEnableSleep) { taosUsleep(1); } @@ -1122,10 +1014,10 @@ TEST(seqTest, multithreadRand) { stubSetGetDataBlock(); taosSeedRand(taosGetTimestampSec()); - + SMsgCb msgCb = {0}; msgCb.mgmt = (void *)mockPointer; - msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qwtPutReqToQueue; + msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue; code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); @@ -1137,7 +1029,6 @@ TEST(seqTest, multithreadRand) { //taosThreadCreate(&(t2), &thattr, readyThread, NULL); taosThreadCreate(&(t3), &thattr, fetchThread, NULL); taosThreadCreate(&(t4), &thattr, dropThread, NULL); - taosThreadCreate(&(t5), &thattr, statusThread, NULL); taosThreadCreate(&(t6), &thattr, fetchQueueThread, mgmt); while (true) { @@ -1189,7 +1080,7 @@ TEST(rcTest, shortExecshortDelay) { SMsgCb msgCb = {0}; msgCb.mgmt = (void *)mockPointer; - msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qwtPutReqToQueue; + msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue; code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); @@ -1273,7 +1164,7 @@ TEST(rcTest, longExecshortDelay) { SMsgCb msgCb = {0}; msgCb.mgmt = (void *)mockPointer; - msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qwtPutReqToQueue; + msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue; code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); @@ -1359,7 +1250,7 @@ TEST(rcTest, shortExeclongDelay) { SMsgCb msgCb = {0}; msgCb.mgmt = (void *)mockPointer; - msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qwtPutReqToQueue; + msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue; code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); @@ -1440,10 +1331,10 @@ TEST(rcTest, dropTest) { stubSetGetDataBlock(); taosSeedRand(taosGetTimestampSec()); - + SMsgCb msgCb = {0}; msgCb.mgmt = (void *)mockPointer; - msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qwtPutReqToQueue; + msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue; code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h index 9dbfeceb5940d4237ead01ff445529c2d7d447ac..1c2e4a358a2c256cf3ed577be568c2e93fe13cbe 100644 --- a/source/libs/scalar/inc/sclInt.h +++ b/source/libs/scalar/inc/sclInt.h @@ -51,7 +51,7 @@ typedef struct SScalarCtx { int32_t doConvertDataType(SValueNode* pValueNode, SScalarParam* out); SColumnInfoData* createColumnInfoData(SDataType* pType, int32_t numOfRows); -void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode); +int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode); #define GET_PARAM_TYPE(_c) ((_c)->columnData->info.type) #define GET_PARAM_BYTES(_c) ((_c)->columnData->info.bytes) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 4317ad325e7e0d7b468dd7929c1f4a7c9ff7c169..195ec8a57791062cbca0e4c1a39ccce1866a5095 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -3553,7 +3553,11 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) { return DEAL_RES_CONTINUE; } - sclConvertToTsValueNode(stat->precision, valueNode); + int32_t code = sclConvertToTsValueNode(stat->precision, valueNode); + if (code) { + stat->code = code; + return DEAL_RES_ERROR; + } return DEAL_RES_CONTINUE; } @@ -3687,7 +3691,7 @@ int32_t fltReviseNodes(SFilterInfo *pInfo, SNode** pNode, SFltTreeStat *pStat) { for (int32_t i = 0; i < nodeNum; ++i) { SValueNode *valueNode = *(SValueNode **)taosArrayGet(pStat->nodeList, i); - sclConvertToTsValueNode(pStat->precision, valueNode); + FLT_ERR_JRET(sclConvertToTsValueNode(pStat->precision, valueNode)); } _return: diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index fb03eaefa4fe79034d731b74de6bd166fa0db83e..d2436b9948f2cf7bfa15d061cdc9bbfdfefd6f08 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -20,17 +20,19 @@ int32_t scalarGetOperatorParamNum(EOperatorType type) { return 2; } -void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) { +int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) { char *timeStr = valueNode->datum.p; - if (convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i) != - TSDB_CODE_SUCCESS) { - valueNode->datum.i = 0; + int32_t code = convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i); + if (code != TSDB_CODE_SUCCESS) { + return code; } taosMemoryFree(timeStr); valueNode->typeData = valueNode->datum.i; valueNode->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP; valueNode->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes; + + return TSDB_CODE_SUCCESS; } @@ -546,6 +548,7 @@ EDealRes sclRewriteBasedOnOptr(SNode** pNode, SScalarCtx *ctx, EOperatorType opT EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { SOperatorNode *node = (SOperatorNode *)*pNode; + int32_t code = 0; if (node->pLeft && (QUERY_NODE_VALUE == nodeType(node->pLeft))) { SValueNode *valueNode = (SValueNode *)node->pLeft; @@ -555,7 +558,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pRight && nodesIsExprNode(node->pRight) && ((SExprNode*)node->pRight)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) { - sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode); + code = sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode); + if (code) { + ctx->code = code; + return DEAL_RES_ERROR; + } } } @@ -567,7 +574,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pLeft && nodesIsExprNode(node->pLeft) && ((SExprNode*)node->pLeft)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) { - sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode); + code = sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode); + if (code) { + ctx->code = code; + return DEAL_RES_ERROR; + } } } diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 0d47595b3e25214482ab8b6442e521ff00ebdc05..587f9b55156d4c7cc24a6f74751dde13417f33c5 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -15,7 +15,11 @@ typedef void (*_trim_fn)(char *, char*, int32_t, int32_t); typedef int16_t (*_len_fn)(char *, int32_t); /** Math functions **/ -static double tlog(double v, double base) { +static double tlog(double v) { + return log(v); +} + +static double tlog2(double v, double base) { double a = log(v); double b = log(base); if (isnan(a) || isinf(a)) { @@ -444,7 +448,8 @@ int32_t concatFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu for (int32_t k = 0; k < numOfRows; ++k) { bool hasNull = false; for (int32_t i = 0; i < inputNum; ++i) { - if (colDataIsNull_s(pInputData[i], k)) { + if (colDataIsNull_s(pInputData[i], k) || + IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[i]))) { colDataAppendNULL(pOutputData, k); hasNull = true; break; @@ -520,7 +525,8 @@ int32_t concatWsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p char *output = outputBuf; for (int32_t k = 0; k < numOfRows; ++k) { - if (colDataIsNull_s(pInputData[0], k)) { + if (colDataIsNull_s(pInputData[0], k) || + IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[0]))) { colDataAppendNULL(pOutputData, k); continue; } @@ -528,7 +534,8 @@ int32_t concatWsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p int16_t dataLen = 0; bool hasNull = false; for (int32_t i = 1; i < inputNum; ++i) { - if (colDataIsNull_s(pInputData[i], k)) { + if (colDataIsNull_s(pInputData[i], k) || + IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[i]))) { hasNull = true; break; } @@ -633,7 +640,7 @@ static int32_t doTrimFunction(SScalarParam *pInput, int32_t inputNum, SScalarPar continue; } - char *input = colDataGetData(pInput[0].columnData, i); + char *input = colDataGetData(pInputData, i); int32_t len = varDataLen(input); int32_t charLen = (type == TSDB_DATA_TYPE_VARCHAR) ? len : len / TSDB_NCHAR_SIZE; trimFn(input, output, type, charLen); @@ -849,6 +856,11 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { int32_t type = GET_PARAM_TYPE(pInput); + char* tz; + int32_t tzLen; + tz = varDataVal(pInput[1].columnData->pData); + tzLen = varDataLen(pInput[1].columnData->pData); + for (int32_t i = 0; i < pInput[0].numOfRows; ++i) { if (colDataIsNull_s(pInput[0].columnData, i)) { colDataAppendNULL(pOutput->columnData, i); @@ -880,9 +892,13 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam * } struct tm *tmInfo = taosLocalTime((const time_t *)&timeVal, NULL); - strftime(buf, sizeof(buf), "%Y-%m-%dT%H:%M:%S%z", tmInfo); + strftime(buf, sizeof(buf), "%Y-%m-%dT%H:%M:%S", tmInfo); int32_t len = (int32_t)strlen(buf); + //add timezone string + snprintf(buf + len, tzLen + 1, "%s", tz); + len += tzLen; + if (hasFraction) { int32_t fracLen = (int32_t)strlen(fraction) + 1; char *tzInfo = strchr(buf, '+'); @@ -893,7 +909,7 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam * memmove(tzInfo + fracLen, tzInfo, strlen(tzInfo)); } - char tmp[32]; + char tmp[32] = {0}; sprintf(tmp, ".%s", fraction); memcpy(tzInfo, tmp, fracLen); len += fracLen; @@ -925,10 +941,9 @@ int32_t toUnixtimestampFunction(SScalarParam *pInput, int32_t inputNum, SScalarP int32_t ret = convertStringToTimestamp(type, input, timePrec, &timeVal); if (ret != TSDB_CODE_SUCCESS) { colDataAppendNULL(pOutput->columnData, i); - continue; + } else { + colDataAppend(pOutput->columnData, i, (char *)&timeVal, false); } - - colDataAppend(pOutput->columnData, i, (char *)&timeVal, false); } pOutput->numOfRows = pInput->numOfRows; @@ -1206,8 +1221,8 @@ int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p if (IS_VAR_DATA_TYPE(type)) { /* datetime format strings */ int32_t ret = convertStringToTimestamp(type, input[k], TSDB_TIME_PRECISION_NANO, &timeVal[k]); if (ret != TSDB_CODE_SUCCESS) { - colDataAppendNULL(pOutput->columnData, i); - continue; + hasNull = true; + break; } } else if (type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_TIMESTAMP) { /* unix timestamp or ts column*/ GET_TYPED_DATA(timeVal[k], int64_t, type, input[k]); @@ -1232,8 +1247,8 @@ int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p } else if (tsDigits == TSDB_TIME_PRECISION_NANO_DIGITS) { timeVal[k] = timeVal[k]; } else { - colDataAppendNULL(pOutput->columnData, i); - continue; + hasNull = true; + break; } } } @@ -1366,7 +1381,11 @@ int32_t powFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutpu } int32_t logFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { - return doScalarFunctionUnique2(pInput, inputNum, pOutput, tlog); + if (inputNum == 1) { + return doScalarFunctionUnique(pInput, inputNum, pOutput, tlog); + } else { + return doScalarFunctionUnique2(pInput, inputNum, pOutput, tlog2); + } } int32_t sqrtFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index 0fb3712c30bb349a406a92f14346370f80112ae6..e844b3cdb6b29612aab571acfb2739e47044770b 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -922,23 +922,13 @@ static void doReleaseVec(SColumnInfoData* pCol, int32_t type) { } } -char *getJsonValue(char *json, char *key){ //todo - json++; // jump type - int16_t cols = kvRowNCols(json); - for (int i = 0; i < cols; ++i) { - SColIdx *pColIdx = kvRowColIdxAt(json, i); - char *data = kvRowColVal(json, pColIdx); - if(i == 0){ - if(*data == TSDB_DATA_TYPE_NULL) { - return NULL; - } - continue; - } - if(memcmp(key, data, varDataTLen(data)) == 0){ - return data + varDataTLen(data); - } +STagVal getJsonValue(char *json, char *key, bool *isExist) { + STagVal val = {.pKey = key}; + bool find = tTagGet(((const STag *)json), &val); // json value is null and not exist is different + if(isExist){ + *isExist = find; } - return NULL; + return val; } void vectorJsonArrow(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord) { @@ -950,6 +940,8 @@ void vectorJsonArrow(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pO pOut->numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows); char *pRightData = colDataGetVarData(pRight->columnData, 0); + char *jsonKey = taosMemoryCalloc(1, varDataLen(pRightData) + 1); + memcpy(jsonKey, varDataVal(pRightData), varDataLen(pRightData)); for (; i >= 0 && i < pLeft->numOfRows; i += step) { if (colDataIsNull_var(pLeft->columnData, i)) { colDataSetNull_var(pOutputCol, i); @@ -957,14 +949,15 @@ void vectorJsonArrow(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pO continue; } char *pLeftData = colDataGetVarData(pLeft->columnData, i); - char *value = getJsonValue(pLeftData, pRightData); - if (!value) { - colDataSetNull_var(pOutputCol, i); - pOutputCol->hasNull = true; - continue; + bool isExist = false; + STagVal value = getJsonValue(pLeftData, jsonKey, &isExist); + char *data = isExist ? tTagValToData(&value, true) : NULL; + colDataAppend(pOutputCol, i, data, data == NULL); + if(isExist && IS_VAR_DATA_TYPE(value.type) && data){ + taosMemoryFree(data); } - colDataAppend(pOutputCol, i, value, false); } + taosMemoryFree(jsonKey); } void vectorMathAdd(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord) { diff --git a/source/libs/scalar/test/filter/filterTests.cpp b/source/libs/scalar/test/filter/filterTests.cpp index 59c3104e96c0320804ba4f17dd0a013146b27a2d..7fb1ffbd64aecb2fee9d7c862f295070dbea8e09 100644 --- a/source/libs/scalar/test/filter/filterTests.cpp +++ b/source/libs/scalar/test/filter/filterTests.cpp @@ -60,7 +60,7 @@ void flttInitLogFile() { tsAsyncLog = 0; qDebugFlag = 159; - strcpy(tsLogDir, "/var/log/taos"); + strcpy(tsLogDir, TD_LOG_DIR_PATH); if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) { printf("failed to open log file in directory:%s\n", tsLogDir); diff --git a/source/libs/scalar/test/scalar/CMakeLists.txt b/source/libs/scalar/test/scalar/CMakeLists.txt index 15d1c2cb4424fded0b04d1c82504768d57b21807..86b936d93ae950e27069835cffcb0e8a99768ac9 100644 --- a/source/libs/scalar/test/scalar/CMakeLists.txt +++ b/source/libs/scalar/test/scalar/CMakeLists.txt @@ -8,7 +8,7 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) ADD_EXECUTABLE(scalarTest ${SOURCE_LIST}) TARGET_LINK_LIBRARIES( scalarTest - PUBLIC os util common gtest qcom function nodes scalar parser + PUBLIC os util common gtest qcom function nodes scalar parser catalog transport ) TARGET_INCLUDE_DIRECTORIES( @@ -18,6 +18,6 @@ TARGET_INCLUDE_DIRECTORIES( PRIVATE "${TD_SOURCE_DIR}/source/libs/scalar/inc" ) add_test( - NAME scalarTest - COMMAND scalarTest + NAME scalarTest + COMMAND scalarTest ) diff --git a/source/libs/scalar/test/scalar/scalarTests.cpp b/source/libs/scalar/test/scalar/scalarTests.cpp index 3fafc83b18365d490003a792748606c8d4fce804..8a29462a2bbde2b1d4bf57d5f9ed50c983aed6e6 100644 --- a/source/libs/scalar/test/scalar/scalarTests.cpp +++ b/source/libs/scalar/test/scalar/scalarTests.cpp @@ -74,7 +74,7 @@ void scltInitLogFile() { tsAsyncLog = 0; qDebugFlag = 159; - strcpy(tsLogDir, "/var/log/taos"); + strcpy(tsLogDir, TD_LOG_DIR_PATH); if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) { printf("failed to open log file in directory:%s\n", tsLogDir); @@ -217,7 +217,7 @@ void scltMakeOpNode(SNode **pNode, EOperatorType opType, int32_t resType, SNode SOperatorNode *onode = (SOperatorNode *)node; onode->node.resType.type = resType; onode->node.resType.bytes = tDataTypes[resType].bytes; - + onode->opType = opType; onode->pLeft = pLeft; onode->pRight = pRight; @@ -1035,7 +1035,7 @@ void makeJsonArrow(SSDataBlock **src, SNode **opNode, void *json, char *key){ SNode *pLeft = NULL, *pRight = NULL; scltMakeValueNode(&pRight, TSDB_DATA_TYPE_BINARY, keyVar); - scltMakeColumnNode(&pLeft, src, TSDB_DATA_TYPE_JSON, kvRowLen(json), 1, json); + scltMakeColumnNode(&pLeft, src, TSDB_DATA_TYPE_JSON, ((STag*)json)->len, 1, json); scltMakeOpNode(opNode, OP_TYPE_JSON_GET_VALUE, TSDB_DATA_TYPE_JSON, pLeft, pRight); } @@ -1111,17 +1111,9 @@ TEST(columnTest, json_column_arith_op) { char rightv[256] = {0}; memcpy(rightv, rightvTmp, strlen(rightvTmp)); - SKVRowBuilder kvRowBuilder; - tdInitKVRowBuilder(&kvRowBuilder); - parseJsontoTagData(rightv, &kvRowBuilder, NULL, 0); - SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder); - char *tmp = (char *)taosMemoryRealloc(row, kvRowLen(row)+1); - if(tmp == NULL){ - ASSERT_TRUE(0); - } - memmove(tmp+1, tmp, kvRowLen(tmp)); - *tmp = TSDB_DATA_TYPE_JSON; - row = tmp; + SArray *tags = taosArrayInit(1, sizeof(STagVal)); + STag* row = NULL; + parseJsontoTagData(rightv, tags, &row, NULL); const int32_t len = 8; EOperatorType op[len] = {OP_TYPE_ADD, OP_TYPE_SUB, OP_TYPE_MULTI, OP_TYPE_DIV, @@ -1175,7 +1167,7 @@ TEST(columnTest, json_column_arith_op) { makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes5[i], op[i]); } - tdDestroyKVRowBuilder(&kvRowBuilder); + taosArrayDestroy(tags); taosMemoryFree(row); } @@ -1195,17 +1187,9 @@ TEST(columnTest, json_column_logic_op) { char rightv[256] = {0}; memcpy(rightv, rightvTmp, strlen(rightvTmp)); - SKVRowBuilder kvRowBuilder; - tdInitKVRowBuilder(&kvRowBuilder); - parseJsontoTagData(rightv, &kvRowBuilder, NULL, 0); - SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder); - char *tmp = (char *)taosMemoryRealloc(row, kvRowLen(row)+1); - if(tmp == NULL){ - ASSERT_TRUE(0); - } - memmove(tmp+1, tmp, kvRowLen(tmp)); - *tmp = TSDB_DATA_TYPE_JSON; - row = tmp; + SArray *tags = taosArrayInit(1, sizeof(STagVal)); + STag* row = NULL; + parseJsontoTagData(rightv, tags, &row, NULL); const int32_t len = 9; const int32_t len1 = 4; @@ -1305,7 +1289,7 @@ TEST(columnTest, json_column_logic_op) { taosMemoryFree(rightData); } - tdDestroyKVRowBuilder(&kvRowBuilder); + taosArrayDestroy(tags); taosMemoryFree(row); } @@ -2498,7 +2482,7 @@ TEST(ScalarFunctionTest, tanFunction_column) { code = tanFunction(pInput, 1, pOutput); ASSERT_EQ(code, TSDB_CODE_SUCCESS); for (int32_t i = 0; i < rowNum; ++i) { - ASSERT_EQ(*((double *)colDataGetData(pOutput->columnData, i)), result[i]); + ASSERT_NEAR(*((double *)colDataGetData(pOutput->columnData, i)), result[i], 1e-15); PRINTF("tiny_int after TAN:%f\n", *((double *)colDataGetData(pOutput->columnData, i))); } scltDestroyDataBlock(pInput); @@ -2517,7 +2501,7 @@ TEST(ScalarFunctionTest, tanFunction_column) { code = tanFunction(pInput, 1, pOutput); ASSERT_EQ(code, TSDB_CODE_SUCCESS); for (int32_t i = 0; i < rowNum; ++i) { - ASSERT_EQ(*((double *)colDataGetData(pOutput->columnData, i)), result[i]); + ASSERT_NEAR(*((double *)colDataGetData(pOutput->columnData, i)), result[i], 1e-15); PRINTF("float after TAN:%f\n", *((double *)colDataGetData(pOutput->columnData, i))); } diff --git a/source/libs/scheduler/inc/schedulerInt.h b/source/libs/scheduler/inc/schedulerInt.h index 6599d00f58d530595435618e9409344970ee531a..44b3e6d396b8750a78ba9e414243f45686a121ec 100644 --- a/source/libs/scheduler/inc/schedulerInt.h +++ b/source/libs/scheduler/inc/schedulerInt.h @@ -204,7 +204,7 @@ typedef struct SSchJob { SSchTask *fetchTask; int32_t errCode; SRWLatch resLock; - void *queryRes; + SQueryExecRes execRes; void *resData; //TODO free it or not int32_t resNumOfRows; SSchResInfo userRes; @@ -297,10 +297,10 @@ void schFreeRpcCtx(SRpcCtx *pCtx); int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp); bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus); int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask); -int32_t schSaveJobQueryRes(SSchJob *pJob, SResReadyRsp *rsp); +int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp); int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRsp *pRsp); void schProcessOnDataFetched(SSchJob *job); -int32_t schGetTaskFromTaskList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask); +int32_t schGetTaskInJob(SSchJob *pJob, uint64_t taskId, SSchTask **pTask); int32_t schUpdateTaskExecNodeHandle(SSchTask *pTask, void *handle, int32_t rspCode); void schFreeRpcCtxVal(const void *arg); int32_t schMakeBrokenLinkVal(SSchJob *pJob, SSchTask *pTask, SRpcBrokenlinkVal *brokenVal, bool isHb); @@ -314,12 +314,11 @@ int32_t schCancelJob(SSchJob *pJob); int32_t schProcessOnJobDropped(SSchJob *pJob, int32_t errCode); uint64_t schGenTaskId(void); void schCloseJobRef(void); -int32_t schExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, - int64_t startTs, SSchResInfo *pRes); -int32_t schAsyncExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, - int64_t startTs, SSchResInfo *pRes); +int32_t schExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, int64_t startTs, SSchResInfo *pRes); +int32_t schAsyncExecJob(void *pTrans, SArray *pNodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, int64_t startTs, SSchResInfo *pRes); int32_t schFetchRows(SSchJob *pJob); int32_t schAsyncFetchRows(SSchJob *pJob); +int32_t schUpdateTaskHandle(SSchJob *pJob, SSchTask *pTask, int32_t msgType, void *handle, int32_t rspCode); #ifdef __cplusplus diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c index af249334b76f47604d4c38e76e40cc3cb582f1ed..dbad053c65ba3572a6d4740e4b991f3760e9631b 100644 --- a/source/libs/scheduler/src/schJob.c +++ b/source/libs/scheduler/src/schJob.c @@ -342,6 +342,36 @@ int32_t schRecordTaskExecNode(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *ad return TSDB_CODE_SUCCESS; } +int32_t schDropTaskExecNode(SSchJob *pJob, SSchTask *pTask, void *handle) { + if (NULL == pTask->execNodes) { + return TSDB_CODE_SUCCESS; + } + + int32_t num = taosArrayGetSize(pTask->execNodes); + for (int32_t i = 0; i < num; ++i) { + SSchNodeInfo* pNode = taosArrayGet(pTask->execNodes, i); + if (pNode->handle == handle) { + taosArrayRemove(pTask->execNodes, i); + break; + } + } + + return TSDB_CODE_SUCCESS; +} + +int32_t schUpdateTaskHandle(SSchJob *pJob, SSchTask *pTask, int32_t msgType, void *handle, int32_t rspCode) { + SCH_SET_TASK_HANDLE(pTask, handle); + + schUpdateTaskExecNodeHandle(pTask, handle, rspCode); + + if (msgType == TDMT_SCH_LINK_BROKEN) { + schDropTaskExecNode(pJob, pTask, handle); + } + + return TSDB_CODE_SUCCESS; +} + + int32_t schRecordQueryDataSrc(SSchJob *pJob, SSchTask *pTask) { if (!SCH_IS_DATA_SRC_QRY_TASK(pTask)) { return TSDB_CODE_SUCCESS; @@ -469,6 +499,34 @@ _return: SCH_RET(code); } +int32_t schSetAddrsFromNodeList(SSchJob *pJob, SSchTask *pTask) { + int32_t addNum = 0; + int32_t nodeNum = 0; + + if (pJob->nodeList) { + nodeNum = taosArrayGetSize(pJob->nodeList); + + for (int32_t i = 0; i < nodeNum && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) { + SQueryNodeAddr *naddr = taosArrayGet(pJob->nodeList, i); + + if (NULL == taosArrayPush(pTask->candidateAddrs, naddr)) { + SCH_TASK_ELOG("taosArrayPush execNode to candidate addrs failed, addNum:%d, errno:%d", addNum, errno); + SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + ++addNum; + } + } + + if (addNum <= 0) { + SCH_TASK_ELOG("no available execNode as candidates, nodeNum:%d", nodeNum); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + return TSDB_CODE_SUCCESS; +} + + int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) { if (NULL != pTask->candidateAddrs) { return TSDB_CODE_SUCCESS; @@ -492,27 +550,7 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) { return TSDB_CODE_SUCCESS; } - int32_t addNum = 0; - int32_t nodeNum = 0; - if (pJob->nodeList) { - nodeNum = taosArrayGetSize(pJob->nodeList); - - for (int32_t i = 0; i < nodeNum && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) { - SQueryNodeAddr *naddr = taosArrayGet(pJob->nodeList, i); - - if (NULL == taosArrayPush(pTask->candidateAddrs, naddr)) { - SCH_TASK_ELOG("taosArrayPush execNode to candidate addrs failed, addNum:%d, errno:%d", addNum, errno); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - ++addNum; - } - } - - if (addNum <= 0) { - SCH_TASK_ELOG("no available execNode as candidates, nodeNum:%d", nodeNum); - SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); - } + SCH_ERR_RET(schSetAddrsFromNodeList(pJob, pTask)); /* for (int32_t i = 0; i < job->dataSrcEps.numOfEps && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) { @@ -735,8 +773,8 @@ _return: int32_t schSetJobQueryRes(SSchJob* pJob, SQueryResult* pRes) { pRes->code = atomic_load_32(&pJob->errCode); pRes->numOfRows = pJob->resNumOfRows; - pRes->res = pJob->queryRes; - pJob->queryRes = NULL; + pRes->res = pJob->execRes; + pJob->execRes.res = NULL; return TSDB_CODE_SUCCESS; } @@ -1001,19 +1039,19 @@ int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) { */ for (int32_t i = 0; i < parentNum; ++i) { - SSchTask *par = *(SSchTask **)taosArrayGet(pTask->parents, i); - int32_t readyNum = atomic_add_fetch_32(&par->childReady, 1); + SSchTask *parent = *(SSchTask **)taosArrayGet(pTask->parents, i); + int32_t readyNum = atomic_add_fetch_32(&parent->childReady, 1); - SCH_LOCK(SCH_WRITE, &par->lock); + SCH_LOCK(SCH_WRITE, &parent->lock); SDownstreamSourceNode source = {.type = QUERY_NODE_DOWNSTREAM_SOURCE, .taskId = pTask->taskId, .schedId = schMgmt.sId, .addr = pTask->succeedAddr}; - qSetSubplanExecutionNode(par->plan, pTask->plan->id.groupId, &source); - SCH_UNLOCK(SCH_WRITE, &par->lock); + qSetSubplanExecutionNode(parent->plan, pTask->plan->id.groupId, &source); + SCH_UNLOCK(SCH_WRITE, &parent->lock); - if (SCH_TASK_READY_FOR_LAUNCH(readyNum, par)) { - SCH_ERR_RET(schLaunchTask(pJob, par)); + if (SCH_TASK_READY_FOR_LAUNCH(readyNum, parent)) { + SCH_ERR_RET(schLaunchTask(pJob, parent)); } } @@ -1067,11 +1105,11 @@ int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRs return TSDB_CODE_SUCCESS; } -int32_t schSaveJobQueryRes(SSchJob *pJob, SResReadyRsp *rsp) { +int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp) { if (rsp->tbFName[0]) { - if (NULL == pJob->queryRes) { - pJob->queryRes = taosArrayInit(pJob->taskNum, sizeof(STbVerInfo)); - if (NULL == pJob->queryRes) { + if (NULL == pJob->execRes.res) { + pJob->execRes.res = taosArrayInit(pJob->taskNum, sizeof(STbVerInfo)); + if (NULL == pJob->execRes.res) { SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } } @@ -1081,13 +1119,14 @@ int32_t schSaveJobQueryRes(SSchJob *pJob, SResReadyRsp *rsp) { tbInfo.sversion = rsp->sversion; tbInfo.tversion = rsp->tversion; - taosArrayPush((SArray *)pJob->queryRes, &tbInfo); + taosArrayPush((SArray *)pJob->execRes.res, &tbInfo); + pJob->execRes.msgType = TDMT_VND_QUERY; } return TSDB_CODE_SUCCESS; } -int32_t schGetTaskFromTaskList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask) { +int32_t schGetTaskFromList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask) { int32_t s = taosHashGetSize(pTaskList); if (s <= 0) { return TSDB_CODE_SUCCESS; @@ -1103,6 +1142,21 @@ int32_t schGetTaskFromTaskList(SHashObj *pTaskList, uint64_t taskId, SSchTask ** return TSDB_CODE_SUCCESS; } +int32_t schGetTaskInJob(SSchJob *pJob, uint64_t taskId, SSchTask **pTask) { + schGetTaskFromList(pJob->execTasks, taskId, pTask); + if (NULL == *pTask) { + schGetTaskFromList(pJob->succTasks, taskId, pTask); + + if (NULL == *pTask) { + SCH_JOB_ELOG("task not found in execList & succList, taskId:%" PRIx64, taskId); + SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + } + } + + return TSDB_CODE_SUCCESS; +} + + int32_t schUpdateTaskExecNodeHandle(SSchTask *pTask, void *handle, int32_t rspCode) { if (rspCode || NULL == pTask->execNodes || taosArrayGetSize(pTask->execNodes) > 1 || taosArrayGetSize(pTask->execNodes) <= 0) { @@ -1296,11 +1350,7 @@ void schFreeJobImpl(void *job) { qExplainFreeCtx(pJob->explainCtx); - if (SCH_IS_QUERY_JOB(pJob)) { - taosArrayDestroy((SArray *)pJob->queryRes); - } else { - tFreeSSubmitRsp((SSubmitRsp*)pJob->queryRes); - } + destroyQueryExecRes(&pJob->execRes); taosMemoryFreeClear(pJob->userRes.queryRes); taosMemoryFreeClear(pJob->resData); diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index 666c24cf01a833528b70a358cddee3b8f71b82f8..0ba91a1c85649463225bd5f6d82edad4b56714c9 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -31,7 +31,7 @@ int32_t schValidateReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgTy case TDMT_VND_EXPLAIN_RSP: return TSDB_CODE_SUCCESS; case TDMT_VND_QUERY_RSP: // query_rsp may be processed later than ready_rsp - if (lastMsgType != reqMsgType && -1 != lastMsgType && TDMT_VND_FETCH != lastMsgType) { + if (lastMsgType != reqMsgType && -1 != lastMsgType) { SCH_TASK_DLOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), TMSG_INFO(msgType)); } @@ -41,22 +41,6 @@ int32_t schValidateReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgTy TMSG_INFO(msgType)); } - SCH_SET_TASK_LASTMSG_TYPE(pTask, -1); - return TSDB_CODE_SUCCESS; - case TDMT_VND_RES_READY_RSP: - reqMsgType = TDMT_VND_QUERY; - if (lastMsgType != reqMsgType && -1 != lastMsgType) { - SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", - (lastMsgType > 0 ? TMSG_INFO(lastMsgType) : "null"), TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) { - SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), - TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - SCH_SET_TASK_LASTMSG_TYPE(pTask, -1); return TSDB_CODE_SUCCESS; case TDMT_VND_FETCH_RSP: @@ -110,6 +94,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch if (schJobNeedToStop(pJob, &status)) { SCH_TASK_ELOG("rsp not processed cause of job status, job status:%s, rspCode:0x%x", jobTaskStatusStr(status), rspCode); + taosMemoryFreeClear(msg); SCH_RET(atomic_load_32(&pJob->errCode)); } @@ -137,6 +122,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch } SCH_ERR_JRET(rspCode); + taosMemoryFreeClear(msg); + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -161,6 +148,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch } SCH_ERR_JRET(rspCode); + taosMemoryFreeClear(msg); + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -173,6 +162,9 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch tDecoderClear(&coder); SCH_ERR_JRET(code); SCH_ERR_JRET(rsp.code); + + pJob->execRes.res = rsp.pMeta; + pJob->execRes.msgType = TDMT_VND_ALTER_TABLE; } SCH_ERR_JRET(rspCode); @@ -180,6 +172,9 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch if (NULL == msg) { SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); } + + taosMemoryFreeClear(msg); + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -212,8 +207,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_TASK_DLOG("submit succeed, affectedRows:%d", rsp->affectedRows); SCH_LOCK(SCH_WRITE, &pJob->resLock); - if (pJob->queryRes) { - SSubmitRsp *sum = pJob->queryRes; + if (pJob->execRes.res) { + SSubmitRsp *sum = pJob->execRes.res; sum->affectedRows += rsp->affectedRows; sum->nBlocks += rsp->nBlocks; sum->pBlocks = taosMemoryRealloc(sum->pBlocks, sum->nBlocks * sizeof(*sum->pBlocks)); @@ -221,34 +216,20 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch taosMemoryFree(rsp->pBlocks); taosMemoryFree(rsp); } else { - pJob->queryRes = rsp; + pJob->execRes.res = rsp; + pJob->execRes.msgType = TDMT_VND_SUBMIT; } SCH_UNLOCK(SCH_WRITE, &pJob->resLock); } + taosMemoryFreeClear(msg); + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); break; } case TDMT_VND_QUERY_RSP: { - SQueryTableRsp rsp = {0}; - if (msg) { - SCH_ERR_JRET(tDeserializeSQueryTableRsp(msg, msgSize, &rsp)); - SCH_ERR_JRET(rsp.code); - } - - SCH_ERR_JRET(rspCode); - - if (NULL == msg) { - SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); - } - - // SCH_ERR_JRET(schBuildAndSendMsg(pJob, pTask, NULL, TDMT_VND_RES_READY)); - - break; - } - case TDMT_VND_RES_READY_RSP: { - SResReadyRsp *rsp = (SResReadyRsp *)msg; + SQueryTableRsp *rsp = (SQueryTableRsp *)msg; SCH_ERR_JRET(rspCode); if (NULL == msg) { @@ -257,6 +238,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_ERR_JRET(rsp->code); SCH_ERR_JRET(schSaveJobQueryRes(pJob, rsp)); + + taosMemoryFreeClear(msg); SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); @@ -308,6 +291,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_ERR_JRET(schProcessOnExplainDone(pJob, pTask, pRsp)); } + taosMemoryFreeClear(msg); + return TSDB_CODE_SUCCESS; } @@ -315,6 +300,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_ERR_JRET(schFetchFromRemote(pJob)); + taosMemoryFreeClear(msg); + return TSDB_CODE_SUCCESS; } @@ -333,6 +320,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_TASK_DLOG("got fetch rsp, rows:%d, complete:%d", htonl(rsp->numOfRows), rsp->completed); + msg = NULL; + schProcessOnDataFetched(pJob); break; } @@ -355,6 +344,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch _return: + taosMemoryFreeClear(msg); + SCH_RET(schProcessOnTaskFailure(pJob, pTask, code)); } @@ -371,27 +362,11 @@ int32_t schHandleCallback(void *param, const SDataBuf *pMsg, int32_t msgType, in SCH_ERR_JRET(TSDB_CODE_QRY_JOB_FREED); } - schGetTaskFromTaskList(pJob->execTasks, pParam->taskId, &pTask); - if (NULL == pTask) { - if (TDMT_VND_EXPLAIN_RSP == msgType) { - schGetTaskFromTaskList(pJob->succTasks, pParam->taskId, &pTask); - } else { - SCH_JOB_ELOG("task not found in execTask list, refId:%" PRIx64 ", taskId:%" PRIx64, pParam->refId, - pParam->taskId); - SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR); - } - } - - if (NULL == pTask) { - SCH_JOB_ELOG("task not found in execList & succList, refId:%" PRIx64 ", taskId:%" PRIx64, pParam->refId, - pParam->taskId); - SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR); - } + SCH_ERR_JRET(schGetTaskInJob(pJob, pParam->taskId, &pTask)); SCH_TASK_DLOG("rsp msg received, type:%s, handle:%p, code:%s", TMSG_INFO(msgType), pMsg->handle, tstrerror(rspCode)); - SCH_SET_TASK_HANDLE(pTask, pMsg->handle); - schUpdateTaskExecNodeHandle(pTask, pMsg->handle, rspCode); + SCH_ERR_JRET(schUpdateTaskHandle(pJob, pTask, msgType, pMsg->handle, rspCode)); SCH_ERR_JRET(schHandleResponseMsg(pJob, pTask, msgType, pMsg->pData, pMsg->len, rspCode)); @@ -429,10 +404,6 @@ int32_t schHandleFetchCallback(void *param, const SDataBuf *pMsg, int32_t code) return schHandleCallback(param, pMsg, TDMT_VND_FETCH_RSP, code); } -int32_t schHandleReadyCallback(void *param, const SDataBuf *pMsg, int32_t code) { - return schHandleCallback(param, pMsg, TDMT_VND_RES_READY_RSP, code); -} - int32_t schHandleExplainCallback(void *param, const SDataBuf *pMsg, int32_t code) { return schHandleCallback(param, pMsg, TDMT_VND_EXPLAIN_RSP, code); } @@ -518,9 +489,6 @@ int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp) { case TDMT_VND_QUERY: *fp = schHandleQueryCallback; break; - case TDMT_VND_RES_READY: - *fp = schHandleReadyCallback; - break; case TDMT_VND_EXPLAIN: *fp = schHandleExplainCallback; break; @@ -933,7 +901,6 @@ _return: int32_t schMakeQueryRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx) { int32_t code = 0; - SMsgSendInfo *pReadyMsgSendInfo = NULL; SMsgSendInfo *pExplainMsgSendInfo = NULL; pCtx->args = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_ENTRY_LOCK); @@ -942,18 +909,10 @@ int32_t schMakeQueryRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx) { SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); } - SCH_ERR_JRET(schGenerateCallBackInfo(pJob, pTask, TDMT_VND_RES_READY, &pReadyMsgSendInfo)); SCH_ERR_JRET(schGenerateCallBackInfo(pJob, pTask, TDMT_VND_EXPLAIN, &pExplainMsgSendInfo)); - int32_t msgType = TDMT_VND_RES_READY_RSP; - SRpcCtxVal ctxVal = {.val = pReadyMsgSendInfo, .clone = schCloneSMsgSendInfo}; - if (taosHashPut(pCtx->args, &msgType, sizeof(msgType), &ctxVal, sizeof(ctxVal))) { - SCH_TASK_ELOG("taosHashPut msg %d to rpcCtx failed", msgType); - SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - msgType = TDMT_VND_EXPLAIN_RSP; - ctxVal.val = pExplainMsgSendInfo; + int32_t msgType = TDMT_VND_EXPLAIN_RSP; + SRpcCtxVal ctxVal = {.val = pExplainMsgSendInfo, .clone = schCloneSMsgSendInfo}; if (taosHashPut(pCtx->args, &msgType, sizeof(msgType), &ctxVal, sizeof(ctxVal))) { SCH_TASK_ELOG("taosHashPut msg %d to rpcCtx failed", msgType); SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); @@ -968,11 +927,6 @@ _return: taosHashCleanup(pCtx->args); - if (pReadyMsgSendInfo) { - taosMemoryFreeClear(pReadyMsgSendInfo->param); - taosMemoryFreeClear(pReadyMsgSendInfo); - } - if (pExplainMsgSendInfo) { taosMemoryFreeClear(pExplainMsgSendInfo->param); taosMemoryFreeClear(pExplainMsgSendInfo); @@ -1128,24 +1082,6 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, persistHandle = true; break; } - - case TDMT_VND_RES_READY: { - msgSize = sizeof(SResReadyReq); - msg = taosMemoryCalloc(1, msgSize); - if (NULL == msg) { - SCH_TASK_ELOG("calloc %d failed", msgSize); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - SResReadyReq *pMsg = msg; - - pMsg->header.vgId = htonl(addr->nodeId); - - pMsg->sId = htobe64(schMgmt.sId); - pMsg->queryId = htobe64(pJob->queryId); - pMsg->taskId = htobe64(pTask->taskId); - break; - } case TDMT_VND_FETCH: { msgSize = sizeof(SResFetchReq); msg = taosMemoryCalloc(1, msgSize); diff --git a/source/libs/scheduler/src/schUtil.c b/source/libs/scheduler/src/schUtil.c index cec754bdcdc76a83bf637448c4303037b8b74447..81c95ea976e0c685fa1585df6dbb42bed75fd0c8 100644 --- a/source/libs/scheduler/src/schUtil.c +++ b/source/libs/scheduler/src/schUtil.c @@ -41,7 +41,7 @@ uint64_t schGenUUID(void) { static int32_t requestSerialId = 0; if (hashId == 0) { - char uid[64]; + char uid[64] = {0}; int32_t code = taosGetSystemUUID(uid, tListLen(uid)); if (code != TSDB_CODE_SUCCESS) { qError("Failed to get the system uid, reason:%s", tstrerror(TAOS_SYSTEM_ERROR(errno))); diff --git a/source/libs/scheduler/src/scheduler.c b/source/libs/scheduler/src/scheduler.c index 3ecc4f4a301fa3a36b17a1d920bcf1c6352507b1..522bd8044d43709f9139092dd705ab123c7799ea 100644 --- a/source/libs/scheduler/src/scheduler.c +++ b/source/libs/scheduler/src/scheduler.c @@ -141,7 +141,7 @@ int32_t schedulerGetTasksStatus(int64_t job, SArray *pSub) { if (pJob->status < JOB_TASK_STATUS_NOT_START || pJob->levelNum <= 0 || NULL == pJob->levels) { qDebug("job not initialized or not executable job, refId:%" PRIx64, job); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); } for (int32_t i = pJob->levelNum - 1; i >= 0; --i) { @@ -155,7 +155,11 @@ int32_t schedulerGetTasksStatus(int64_t job, SArray *pSub) { } } - return TSDB_CODE_SUCCESS; +_return: + + schReleaseJob(job); + + SCH_RET(code); } int32_t scheduleCancelJob(int64_t job) { diff --git a/source/libs/scheduler/test/schedulerTests.cpp b/source/libs/scheduler/test/schedulerTests.cpp index ec5d74372d2df681ce20c58a69dba22eaf7f8239..4bf114ad8febb30c4fac89a391f8e0bc3389a60c 100644 --- a/source/libs/scheduler/test/schedulerTests.cpp +++ b/source/libs/scheduler/test/schedulerTests.cpp @@ -79,7 +79,7 @@ void schtInitLogFile() { tsAsyncLog = 0; qDebugFlag = 159; - strcpy(tsLogDir, "/var/log/taos"); + strcpy(tsLogDir, TD_LOG_DIR_PATH); if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) { printf("failed to open log file in directory:%s\n", tsLogDir); @@ -542,27 +542,6 @@ void* schtRunJobThread(void *aa) { pIter = taosHashIterate(execTasks, pIter); } - - param = (SSchTaskCallbackParam *)taosMemoryCalloc(1, sizeof(*param)); - param->refId = queryJobRefId; - param->queryId = pJob->queryId; - - pIter = taosHashIterate(execTasks, NULL); - while (pIter) { - SSchTask *task = (SSchTask *)pIter; - - param->taskId = task->taskId; - SResReadyRsp rsp = {0}; - dataBuf.pData = &rsp; - dataBuf.len = sizeof(rsp); - - code = schHandleCallback(param, &dataBuf, TDMT_VND_RES_READY_RSP, 0); - assert(code == 0 || code); - - pIter = taosHashIterate(execTasks, pIter); - } - - param = (SSchTaskCallbackParam *)taosMemoryCalloc(1, sizeof(*param)); param->refId = queryJobRefId; param->queryId = pJob->queryId; @@ -583,25 +562,6 @@ void* schtRunJobThread(void *aa) { } - param = (SSchTaskCallbackParam *)taosMemoryCalloc(1, sizeof(*param)); - param->refId = queryJobRefId; - param->queryId = pJob->queryId; - - pIter = taosHashIterate(execTasks, NULL); - while (pIter) { - SSchTask *task = (SSchTask *)pIter; - - param->taskId = task->taskId - 1; - SResReadyRsp rsp = {0}; - dataBuf.pData = &rsp; - dataBuf.len = sizeof(rsp); - - code = schHandleCallback(param, &dataBuf, TDMT_VND_RES_READY_RSP, 0); - assert(code == 0 || code); - - pIter = taosHashIterate(execTasks, pIter); - } - while (true) { if (queryDone) { break; @@ -701,17 +661,6 @@ TEST(queryTest, normalCase) { pIter = taosHashIterate(pJob->execTasks, pIter); } - pIter = taosHashIterate(pJob->execTasks, NULL); - while (pIter) { - SSchTask *task = *(SSchTask **)pIter; - - SResReadyRsp rsp = {0}; - code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0); - printf("code:%d", code); - ASSERT_EQ(code, 0); - pIter = taosHashIterate(pJob->execTasks, pIter); - } - pIter = taosHashIterate(pJob->execTasks, NULL); while (pIter) { SSchTask *task = *(SSchTask **)pIter; @@ -723,17 +672,6 @@ TEST(queryTest, normalCase) { pIter = taosHashIterate(pJob->execTasks, pIter); } - pIter = taosHashIterate(pJob->execTasks, NULL); - while (pIter) { - SSchTask *task = *(SSchTask **)pIter; - - SResReadyRsp rsp = {0}; - code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0); - ASSERT_EQ(code, 0); - - pIter = taosHashIterate(pJob->execTasks, pIter); - } - while (true) { if (queryDone) { break; @@ -804,19 +742,8 @@ TEST(queryTest, readyFirstCase) { SSchJob *pJob = schAcquireJob(job); - - void *pIter = taosHashIterate(pJob->execTasks, NULL); - while (pIter) { - SSchTask *task = *(SSchTask **)pIter; - - SResReadyRsp rsp = {0}; - code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0); - printf("code:%d", code); - ASSERT_EQ(code, 0); - pIter = taosHashIterate(pJob->execTasks, pIter); - } - pIter = taosHashIterate(pJob->execTasks, NULL); + void *pIter = taosHashIterate(pJob->execTasks, NULL); while (pIter) { SSchTask *task = *(SSchTask **)pIter; @@ -827,17 +754,6 @@ TEST(queryTest, readyFirstCase) { pIter = taosHashIterate(pJob->execTasks, pIter); } - pIter = taosHashIterate(pJob->execTasks, NULL); - while (pIter) { - SSchTask *task = *(SSchTask **)pIter; - - SResReadyRsp rsp = {0}; - code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0); - ASSERT_EQ(code, 0); - - pIter = taosHashIterate(pJob->execTasks, pIter); - } - pIter = taosHashIterate(pJob->execTasks, NULL); while (pIter) { SSchTask *task = *(SSchTask **)pIter; @@ -942,10 +858,6 @@ TEST(queryTest, flowCtrlCase) { SQueryTableRsp rsp = {0}; code = schHandleResponseMsg(pJob, task, TDMT_VND_QUERY_RSP, (char *)&rsp, sizeof(rsp), 0); - ASSERT_EQ(code, 0); - } else if (task->lastMsgType == TDMT_VND_RES_READY) { - SResReadyRsp rsp = {0}; - code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0); ASSERT_EQ(code, 0); } else { qDone = true; diff --git a/source/libs/stream/src/tstream.c b/source/libs/stream/src/tstream.c index dc0fbf2bbef00c5bb81bc03182e31edb1f729894..7d406a71441faa4c8c8b15ed1b27164da6ed6f4e 100644 --- a/source/libs/stream/src/tstream.c +++ b/source/libs/stream/src/tstream.c @@ -35,6 +35,24 @@ void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput) { return (void*)buf; } +SStreamDataSubmit* streamSubmitRefClone(SStreamDataSubmit* pSubmit) { + SStreamDataSubmit* pSubmitClone = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM); + if (pSubmitClone == NULL) { + return NULL; + } + streamDataSubmitRefInc(pSubmit); + memcpy(pSubmitClone, pSubmit, sizeof(SStreamDataSubmit)); + return pSubmitClone; +} + +static int32_t streamBuildDispatchMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) { + SStreamDispatchReq req = { + .streamId = pTask->streamId, + .data = data, + }; + return 0; +} + static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) { SStreamTaskExecReq req = { .streamId = pTask->streamId, @@ -59,7 +77,7 @@ static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMs } else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) { // TODO use general name rule of schemaless - char ctbName[TSDB_TABLE_FNAME_LEN + 22]; + char ctbName[TSDB_TABLE_FNAME_LEN + 22] = {0}; // all groupId must be the same in an array SSDataBlock* pBlock = taosArrayGet(data, 0); sprintf(ctbName, "%s:%ld", pTask->shuffleDispatcher.stbFullName, pBlock->info.groupId); @@ -141,13 +159,13 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes) SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)data; ASSERT(pSubmit->type == STREAM_INPUT__DATA_SUBMIT); - qSetStreamInput(exec, pSubmit->data, STREAM_DATA_TYPE_SUBMIT_BLOCK); + qSetStreamInput(exec, pSubmit->data, STREAM_DATA_TYPE_SUBMIT_BLOCK, false); } else if (pTask->inputType == STREAM_INPUT__DATA_BLOCK) { SStreamDataBlock* pBlock = (SStreamDataBlock*)data; ASSERT(pBlock->type == STREAM_INPUT__DATA_BLOCK); SArray* blocks = pBlock->blocks; - qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_DATA_TYPE_SSDATA_BLOCK); + qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_DATA_TYPE_SSDATA_BLOCK, false); } // exec @@ -199,7 +217,6 @@ int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb) { if (pRes == NULL) return -1; while (1) { int8_t execStatus = atomic_val_compare_exchange_8(&pTask->status, TASK_STATUS__IDLE, TASK_STATUS__EXECUTING); - void* exec = pTask->exec.executor; if (execStatus == TASK_STATUS__IDLE) { // first run, from qall, handle failure from last exec pRes = streamExecForQall(pTask, pRes); @@ -407,6 +424,26 @@ int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp) return 0; } +int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) { + if (tStartEncode(pEncoder) < 0) return -1; + if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->sourceTaskId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->sourceVg) < 0) return -1; + tEndEncode(pEncoder); + return 0; +} + +int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { + if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->sourceTaskId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->sourceVg) < 0) return -1; + tEndDecode(pDecoder); + return 0; +} + int32_t tEncodeSStreamTaskExecReq(void** buf, const SStreamTaskExecReq* pReq) { int32_t tlen = 0; tlen += taosEncodeFixedI64(buf, pReq->streamId); diff --git a/source/libs/stream/src/tstreamUpdate.c b/source/libs/stream/src/tstreamUpdate.c index 75319a2354f638d6dab9d871bdd402cfb15ee2c4..ada391b40a76af148e07789375a756a6590648b3 100644 --- a/source/libs/stream/src/tstreamUpdate.c +++ b/source/libs/stream/src/tstreamUpdate.c @@ -42,7 +42,7 @@ static void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count) { } static void windowSBfDelete(SUpdateInfo *pInfo, uint64_t count) { - if (count < pInfo->numSBFs - 1) { + if (count < pInfo->numSBFs) { for (uint64_t i = 0; i < count; ++i) { SScalableBf *pTsSBFs = taosArrayGetP(pInfo->pTsSBFs, 0); tScalableBfDestroy(pTsSBFs); @@ -72,12 +72,14 @@ static int64_t adjustInterval(int64_t interval, int32_t precision) { return val; } -static int64_t adjustWatermark(int64_t interval, int64_t watermark) { - if (watermark <= 0 || watermark > MAX_NUM_SCALABLE_BF * interval) { - watermark = MAX_NUM_SCALABLE_BF * interval; - } else if (watermark < MIN_NUM_SCALABLE_BF * interval) { - watermark = MIN_NUM_SCALABLE_BF * interval; - } +static int64_t adjustWatermark(int64_t adjInterval, int64_t originInt, int64_t watermark) { + if (watermark <= adjInterval) { + watermark = TMAX(originInt/adjInterval, 1) * adjInterval; + } else if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) { + watermark = MAX_NUM_SCALABLE_BF * adjInterval; + }/* else if (watermark < MIN_NUM_SCALABLE_BF * adjInterval) { + watermark = MIN_NUM_SCALABLE_BF * adjInterval; + }*/ // Todo(liuyao) save window info to tdb return watermark; } @@ -94,7 +96,7 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma pInfo->pTsSBFs = NULL; pInfo->minTS = -1; pInfo->interval = adjustInterval(interval, precision); - pInfo->watermark = adjustWatermark(pInfo->interval, watermark); + pInfo->watermark = adjustWatermark(pInfo->interval, interval, watermark); uint64_t bfSize = (uint64_t)(pInfo->watermark / pInfo->interval); @@ -149,13 +151,18 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) { bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) { int32_t res = TSDB_CODE_FAILED; uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets; + TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index); + if (ts < maxTs - pInfo->watermark) { + // this window has been closed. + return true; + } + SScalableBf *pSBf = getSBf(pInfo, ts); // pSBf may be a null pointer if (pSBf) { res = tScalableBfPut(pSBf, &ts, sizeof(TSKEY)); } - TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index); if (maxTs < ts) { taosArraySet(pInfo->pTsBuckets, index, &ts); return false; diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 2e71745f61eff205143581b322b3ee1032524361..4100aa021672602bd55738067febe80db7790e11 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -55,6 +55,8 @@ typedef struct SVotesRespond SVotesRespond; typedef struct SSyncIndexMgr SSyncIndexMgr; typedef struct SRaftCfg SRaftCfg; typedef struct SSyncRespMgr SSyncRespMgr; +typedef struct SSyncSnapshotSender SSyncSnapshotSender; +typedef struct SSyncSnapshotReceiver SSyncSnapshotReceiver; typedef struct SSyncNode { // init by SSyncInfo @@ -148,9 +150,11 @@ typedef struct SSyncNode { SSyncRespMgr* pSyncRespMgr; // restore state - bool restoreFinish; // sem_t restoreSem; - SSnapshot* pSnapshot; + bool restoreFinish; + SSnapshot* pSnapshot; + SSyncSnapshotSender* pSender; + SSyncSnapshotReceiver* pReceiver; } SSyncNode; diff --git a/source/libs/sync/inc/syncRaftCfg.h b/source/libs/sync/inc/syncRaftCfg.h index f4c857bb06068eaec7e9a1d9324b47b505e51eba..1061e8bdc4b248511eb3a580b76056cbc830f02b 100644 --- a/source/libs/sync/inc/syncRaftCfg.h +++ b/source/libs/sync/inc/syncRaftCfg.h @@ -27,6 +27,8 @@ extern "C" { #include "syncInt.h" #include "taosdef.h" +#define CONFIG_FILE_LEN 1024 + typedef struct SRaftCfg { SSyncCfg cfg; TdFilePtr pFile; diff --git a/source/libs/sync/inc/syncRaftLog.h b/source/libs/sync/inc/syncRaftLog.h index 7db62e14d597608f04fd313e597251ec2503f933..df5cd3f36c4138e608e70bd22972d54baff48a50 100644 --- a/source/libs/sync/inc/syncRaftLog.h +++ b/source/libs/sync/inc/syncRaftLog.h @@ -32,20 +32,21 @@ typedef struct SSyncLogStoreData { SWal* pWal; } SSyncLogStoreData; -SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode); -void logStoreDestory(SSyncLogStore* pLogStore); -int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry); -SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index); -int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex); -SyncIndex logStoreLastIndex(SSyncLogStore* pLogStore); -SyncTerm logStoreLastTerm(SSyncLogStore* pLogStore); -int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index); -SyncIndex logStoreGetCommitIndex(SSyncLogStore* pLogStore); -SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore); -cJSON* logStore2Json(SSyncLogStore* pLogStore); -char* logStore2Str(SSyncLogStore* pLogStore); -cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore); -char* logStoreSimple2Str(SSyncLogStore* pLogStore); +SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode); +void logStoreDestory(SSyncLogStore* pLogStore); +cJSON* logStore2Json(SSyncLogStore* pLogStore); +char* logStore2Str(SSyncLogStore* pLogStore); +cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore); +char* logStoreSimple2Str(SSyncLogStore* pLogStore); + +// SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore); +// SyncIndex logStoreLastIndex(SSyncLogStore* pLogStore); +// SyncTerm logStoreLastTerm(SSyncLogStore* pLogStore); +// SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index); +// int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry); +// int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex); +// int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index); +// SyncIndex logStoreGetCommitIndex(SSyncLogStore* pLogStore); // for debug void logStorePrint(SSyncLogStore* pLogStore); diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h index fd2119ce659b553124aa9a310c3790b29363628c..43d1c0c0c38bc9836fdb9e3210f141af44376700 100644 --- a/source/libs/sync/inc/syncSnapshot.h +++ b/source/libs/sync/inc/syncSnapshot.h @@ -23,11 +23,42 @@ extern "C" { #include #include #include +#include "cJSON.h" #include "syncInt.h" #include "taosdef.h" -int32_t takeSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot); -int32_t restoreSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot); +typedef struct SSyncSnapshotSender { + int32_t sending; + int32_t received; + bool finish; + void * pCurrentBlock; + int32_t blockLen; + int64_t sendingMS; + SSnapshot *pSnapshot; + SSyncNode *pSyncNode; +} SSyncSnapshotSender; + +SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode); +void snapshotSenderDestroy(SSyncSnapshotSender *pSender); +int32_t snapshotSend(SSyncSnapshotSender *pSender); +cJSON * snapshotSender2Json(SSyncSnapshotSender *pSender); +char * snapshotSender2Str(SSyncSnapshotSender *pSender); + +typedef struct SSyncSnapshotReceiver { + bool start; + int32_t received; + int32_t progressIndex; + void * pCurrentBlock; + int32_t len; + SSnapshot *pSnapshot; + SSyncNode *pSyncNode; +} SSyncSnapshotReceiver; + +SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode); +void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver); +int32_t snapshotReceive(SSyncSnapshotReceiver *pReceiver); +cJSON * snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver); +char * snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver); #ifdef __cplusplus } diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index 008bc00dbc0590a98c6b5a77c0cb6977bbdcba8d..3afe7b15e213c0da3760c7a8ef1f313d145cd31f 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -89,7 +89,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnAppendEntriesCb== term:%lu", ths->pRaftStore->currentTerm); syncAppendEntriesLog2(logBuf, pMsg); @@ -107,7 +107,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { SyncTerm localPreLogTerm = 0; if (pMsg->prevLogIndex >= SYNC_INDEX_BEGIN && pMsg->prevLogIndex <= ths->pLogStore->getLastIndex(ths->pLogStore)) { - SSyncRaftEntry* pEntry = logStoreGetEntry(ths->pLogStore, pMsg->prevLogIndex); + SSyncRaftEntry* pEntry = ths->pLogStore->getEntry(ths->pLogStore, pMsg->prevLogIndex); assert(pEntry != NULL); localPreLogTerm = pEntry->term; syncEntryDestory(pEntry); @@ -175,7 +175,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { bool conflict = false; SyncIndex extraIndex = pMsg->prevLogIndex + 1; - SSyncRaftEntry* pExtraEntry = logStoreGetEntry(ths->pLogStore, extraIndex); + SSyncRaftEntry* pExtraEntry = ths->pLogStore->getEntry(ths->pLogStore, extraIndex); assert(pExtraEntry != NULL); SSyncRaftEntry* pAppendEntry = syncEntryDeserialize(pMsg->data, pMsg->dataLen); @@ -197,7 +197,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { // notice! reverse roll back! for (SyncIndex index = delEnd; index >= delBegin; --index) { if (ths->pFsm->FpRollBackCb != NULL) { - SSyncRaftEntry* pRollBackEntry = logStoreGetEntry(ths->pLogStore, index); + SSyncRaftEntry* pRollBackEntry = ths->pLogStore->getEntry(ths->pLogStore, index); assert(pRollBackEntry != NULL); // if (pRollBackEntry->msgType != TDMT_VND_SYNC_NOOP) { @@ -365,7 +365,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { } SReConfigCbMeta cbMeta = {0}; - bool isDrop; + bool isDrop; // I am in newConfig if (hit) { @@ -388,7 +388,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { } // always call FpReConfigCb - if (ths->pFsm->FpReConfigCb != NULL) { + if (ths->pFsm->FpReConfigCb != NULL) { cbMeta.code = 0; cbMeta.currentTerm = ths->pRaftStore->currentTerm; cbMeta.index = pEntry->index; diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index 77d85e29151205edd31deed1c40f5dbffca90849..4e6d870e194a223bd35d5671dc17532bd5e8626e 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -38,7 +38,7 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnAppendEntriesReplyCb== term:%lu", ths->pRaftStore->currentTerm); syncAppendEntriesReplyLog2(logBuf, pMsg); @@ -57,7 +57,7 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p // } if (pMsg->term > ths->pRaftStore->currentTerm) { - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "syncNodeOnAppendEntriesReplyCb error term, receive:%lu current:%lu", pMsg->term, ths->pRaftStore->currentTerm); syncNodeLog2(logBuf, ths); diff --git a/source/libs/sync/src/syncIO.c b/source/libs/sync/src/syncIO.c index 39760c32e83eddc060aeb9669fb252eaca816e54..e30a39e6342c4b7df77ee9cfdbe4f29333e36c16 100644 --- a/source/libs/sync/src/syncIO.c +++ b/source/libs/sync/src/syncIO.c @@ -74,7 +74,7 @@ int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { { syncUtilMsgNtoH(pMsg->pCont); - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncIOSendMsg== %s:%d", pEpSet->eps[0].fqdn, pEpSet->eps[0].port); syncRpcMsgLog2(logBuf, pMsg); @@ -89,7 +89,7 @@ int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { int32_t syncIOEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; syncRpcMsgLog2((char *)"==syncIOEqMsg==", pMsg); SRpcMsg *pTemp; diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c index 5809cedb9038758744d20b8e6ee2270bd0720e47..4d556d21dde7e56c2048cc314f86ad0a8949bc37 100644 --- a/source/libs/sync/src/syncIndexMgr.c +++ b/source/libs/sync/src/syncIndexMgr.c @@ -60,7 +60,9 @@ void syncIndexMgrSetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, return; } } - assert(0); + + // maybe config change + // assert(0); } SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId) { @@ -74,7 +76,7 @@ SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaf } cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pSyncIndexMgr != NULL) { diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 99aac7991a47e18d72ee955d55af491f49b87cb8..66806dbd0c8e6b40ac9331884ff0447263f0eaaa 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -815,7 +815,7 @@ int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, S } cJSON* syncNode2Json(const SSyncNode* pSyncNode) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pSyncNode != NULL) { @@ -981,6 +981,7 @@ char* syncNode2SimpleStr(const SSyncNode* pSyncNode) { } void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig, bool* isDrop) { + SSyncCfg oldConfig = pSyncNode->pRaftCfg->cfg; pSyncNode->pRaftCfg->cfg = *newConfig; int32_t ret = 0; @@ -1014,6 +1015,15 @@ void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig, bool* isDro // isDrop *isDrop = true; + bool IamInOld, IamInNew; + for (int i = 0; i < oldConfig.replicaNum; ++i) { + if (strcmp((oldConfig.nodeInfo)[i].nodeFqdn, pSyncNode->myNodeInfo.nodeFqdn) == 0 && + (oldConfig.nodeInfo)[i].nodePort == pSyncNode->myNodeInfo.nodePort) { + *isDrop = false; + break; + } + } + for (int i = 0; i < newConfig->replicaNum; ++i) { if (strcmp((newConfig->nodeInfo)[i].nodeFqdn, pSyncNode->myNodeInfo.nodeFqdn) == 0 && (newConfig->nodeInfo)[i].nodePort == pSyncNode->myNodeInfo.nodePort) { @@ -1328,7 +1338,7 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) { // on message ---- int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg) { // log state - char logBuf[1024]; + char logBuf[1024] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnPingCb== vgId:%d, state: %d, %s, term:%lu electTimerLogicClock:%lu, " "electTimerLogicClockUser:%lu, electTimerMS:%d", @@ -1440,12 +1450,12 @@ static void syncFreeNode(void* param) { const char* syncStr(ESyncState state) { switch (state) { case TAOS_SYNC_STATE_FOLLOWER: - return "FOLLOWER"; + return "follower"; case TAOS_SYNC_STATE_CANDIDATE: - return "CANDIDATE"; + return "candidate"; case TAOS_SYNC_STATE_LEADER: - return "LEADER"; + return "leader"; default: - return "ERROR"; + return "error"; } } diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c index 57cbdaaf795b025af5f2aa36108b28845c91e1b7..fae069f2e6b13c0073c6309f889dc7f8f92c8c6e 100644 --- a/source/libs/sync/src/syncMessage.c +++ b/source/libs/sync/src/syncMessage.c @@ -215,7 +215,7 @@ SyncTimeout* syncTimeoutFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncTimeout2Json(const SyncTimeout* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -442,7 +442,7 @@ SyncPing* syncPingFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncPing2Json(const SyncPing* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -456,7 +456,7 @@ cJSON* syncPing2Json(const SyncPing* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -471,7 +471,7 @@ cJSON* syncPing2Json(const SyncPing* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -702,7 +702,7 @@ SyncPingReply* syncPingReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncPingReply2Json(const SyncPingReply* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -716,7 +716,7 @@ cJSON* syncPingReply2Json(const SyncPingReply* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -731,7 +731,7 @@ cJSON* syncPingReply2Json(const SyncPingReply* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -869,7 +869,7 @@ SyncClientRequest* syncClientRequestFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncClientRequest2Json(const SyncClientRequest* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -995,7 +995,7 @@ SyncRequestVote* syncRequestVoteFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -1009,7 +1009,7 @@ cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1023,7 +1023,7 @@ cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1144,7 +1144,7 @@ SyncRequestVoteReply* syncRequestVoteReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -1158,7 +1158,7 @@ cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1172,7 +1172,7 @@ cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1292,7 +1292,7 @@ SyncAppendEntries* syncAppendEntriesFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -1306,7 +1306,7 @@ cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1321,7 +1321,7 @@ cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1456,7 +1456,7 @@ SyncAppendEntriesReply* syncAppendEntriesReplyFromRpcMsg2(const SRpcMsg* pRpcMsg } cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -1470,7 +1470,7 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1485,7 +1485,7 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1624,7 +1624,7 @@ void syncApplyMsg2OriginalRpcMsg(const SyncApplyMsg* pMsg, SRpcMsg* pOriginalRpc } cJSON* syncApplyMsg2Json(const SyncApplyMsg* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { diff --git a/source/libs/sync/src/syncRaftCfg.c b/source/libs/sync/src/syncRaftCfg.c index daf7992d431d2956dd87bf92ae98355363b44297..3e1931e2c37e626b7ab049299a9b83b8a78a2cf1 100644 --- a/source/libs/sync/src/syncRaftCfg.c +++ b/source/libs/sync/src/syncRaftCfg.c @@ -28,7 +28,7 @@ SRaftCfg *raftCfgOpen(const char *path) { taosLSeekFile(pCfg->pFile, 0, SEEK_SET); - char buf[1024]; + char buf[1024] = {0}; int len = taosReadFile(pCfg->pFile, buf, sizeof(buf)); assert(len > 0); @@ -50,16 +50,24 @@ int32_t raftCfgPersist(SRaftCfg *pRaftCfg) { char *s = raftCfg2Str(pRaftCfg); taosLSeekFile(pRaftCfg->pFile, 0, SEEK_SET); - int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1); - assert(ret == strlen(s) + 1); - taosMemoryFree(s); + char buf[CONFIG_FILE_LEN] = {0}; + memset(buf, 0, sizeof(buf)); + ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN); + snprintf(buf, sizeof(buf), "%s", s); + int64_t ret = taosWriteFile(pRaftCfg->pFile, buf, sizeof(buf)); + assert(ret == sizeof(buf)); + + // int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1); + // assert(ret == strlen(s) + 1); + + taosMemoryFree(s); taosFsyncFile(pRaftCfg->pFile); return 0; } cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pSyncCfg != NULL) { @@ -162,9 +170,17 @@ int32_t raftCfgCreateFile(SSyncCfg *pCfg, int8_t isStandBy, const char *path) { SRaftCfg raftCfg; raftCfg.cfg = *pCfg; raftCfg.isStandBy = isStandBy; - char * s = raftCfg2Str(&raftCfg); - int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1); - assert(ret == strlen(s) + 1); + char *s = raftCfg2Str(&raftCfg); + + char buf[CONFIG_FILE_LEN] = {0}; + memset(buf, 0, sizeof(buf)); + ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN); + snprintf(buf, sizeof(buf), "%s", s); + int64_t ret = taosWriteFile(pFile, buf, sizeof(buf)); + assert(ret == sizeof(buf)); + + // int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1); + // assert(ret == strlen(s) + 1); taosMemoryFree(s); taosCloseFile(&pFile); diff --git a/source/libs/sync/src/syncRaftEntry.c b/source/libs/sync/src/syncRaftEntry.c index 21ee35eaf9c276636d754048095d6b2d44f18796..8755f71654382f3913a3c81b6ee1e9b6e91dbb69 100644 --- a/source/libs/sync/src/syncRaftEntry.c +++ b/source/libs/sync/src/syncRaftEntry.c @@ -107,7 +107,7 @@ SSyncRaftEntry* syncEntryDeserialize(const char* buf, uint32_t len) { } cJSON* syncEntry2Json(const SSyncRaftEntry* pEntry) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pEntry != NULL) { diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index 07a9397a580332f427ab3b206359de3ec0accf40..a6397f8cba24694d6f36847af5e877c72bd1a920 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -16,6 +16,15 @@ #include "syncRaftLog.h" #include "wal.h" +static SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore); +static SyncIndex logStoreLastIndex(SSyncLogStore* pLogStore); +static SyncTerm logStoreLastTerm(SSyncLogStore* pLogStore); +static SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index); +static int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry); +static int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex); +static int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index); +static SyncIndex logStoreGetCommitIndex(SSyncLogStore* pLogStore); + SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) { SSyncLogStore* pLogStore = taosMemoryMalloc(sizeof(SSyncLogStore)); assert(pLogStore != NULL); @@ -78,7 +87,9 @@ SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) { if (index >= SYNC_INDEX_BEGIN && index <= logStoreLastIndex(pLogStore)) { SWalReadHandle* pWalHandle = walOpenReadHandle(pWal); - int32_t code = walReadWithHandle(pWalHandle, index); + ASSERT(pWalHandle != NULL); + + int32_t code = walReadWithHandle(pWalHandle, index); if (code != 0) { int32_t err = terrno; const char* errStr = tstrerror(err); @@ -179,7 +190,7 @@ SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore) { } cJSON* logStore2Json(SSyncLogStore* pLogStore) { - char u64buf[128]; + char u64buf[128] = {0}; SSyncLogStoreData* pData = (SSyncLogStoreData*)pLogStore->data; cJSON* pRoot = cJSON_CreateObject(); @@ -216,7 +227,7 @@ char* logStore2Str(SSyncLogStore* pLogStore) { } cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore) { - char u64buf[128]; + char u64buf[128] = {0}; SSyncLogStoreData* pData = (SSyncLogStoreData*)pLogStore->data; cJSON* pRoot = cJSON_CreateObject(); diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c index d6f2e91de7739efd535a23427168180fe2aabc86..52e815292607d69e7d364f6a11c31c184f07914a 100644 --- a/source/libs/sync/src/syncRaftStore.c +++ b/source/libs/sync/src/syncRaftStore.c @@ -34,7 +34,7 @@ SRaftStore *raftStoreOpen(const char *path) { memset(pRaftStore, 0, sizeof(*pRaftStore)); snprintf(pRaftStore->path, sizeof(pRaftStore->path), "%s", path); - char storeBuf[RAFT_STORE_BLOCK_SIZE]; + char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0}; memset(storeBuf, 0, sizeof(storeBuf)); if (!raftStoreFileExist(pRaftStore->path)) { @@ -84,7 +84,7 @@ int32_t raftStorePersist(SRaftStore *pRaftStore) { assert(pRaftStore != NULL); int32_t ret; - char storeBuf[RAFT_STORE_BLOCK_SIZE]; + char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0}; ret = raftStoreSerialize(pRaftStore, storeBuf, sizeof(storeBuf)); assert(ret == 0); @@ -107,7 +107,7 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) { cJSON *pRoot = cJSON_CreateObject(); - char u64Buf[128]; + char u64Buf[128] = {0}; snprintf(u64Buf, sizeof(u64Buf), "%lu", pRaftStore->currentTerm); cJSON_AddStringToObject(pRoot, "current_term", u64Buf); @@ -117,7 +117,7 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) { cJSON_AddNumberToObject(pRoot, "vote_for_vgid", pRaftStore->voteFor.vgId); uint64_t u64 = pRaftStore->voteFor.addr; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pRoot, "addr_host", host); @@ -184,7 +184,7 @@ void raftStoreSetTerm(SRaftStore *pRaftStore, SyncTerm term) { int32_t raftStoreFromJson(SRaftStore *pRaftStore, cJSON *pJson) { return 0; } cJSON *raftStore2Json(SRaftStore *pRaftStore) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pRaftStore != NULL) { @@ -196,7 +196,7 @@ cJSON *raftStore2Json(SRaftStore *pRaftStore) { cJSON_AddStringToObject(pVoteFor, "addr", u64buf); { uint64_t u64 = pRaftStore->voteFor.addr; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pVoteFor, "addr_host", host); diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index 2fdb8a0e177f0f985c40b136ea29ce9f968c0fad..d17e64d936737ba7ea0dc5f33db407cfdf4bf205 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -75,7 +75,7 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode) { // SyncIndex lastIndex = syncUtilMinIndex(pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore), nextIndex); SyncAppendEntries* pMsg = NULL; - SSyncRaftEntry* pEntry = logStoreGetEntry(pSyncNode->pLogStore, nextIndex); + SSyncRaftEntry* pEntry = pSyncNode->pLogStore->getEntry(pSyncNode->pLogStore, nextIndex); if (pEntry != NULL) { pMsg = syncAppendEntriesBuild(pEntry->bytes, pSyncNode->vgId); assert(pMsg != NULL); diff --git a/source/libs/sync/src/syncRequestVote.c b/source/libs/sync/src/syncRequestVote.c index 619a1546a96ad9642272b7227466d99be833be9f..265677129213c6887012ee72da9066aad25adc09 100644 --- a/source/libs/sync/src/syncRequestVote.c +++ b/source/libs/sync/src/syncRequestVote.c @@ -44,7 +44,7 @@ int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnRequestVoteCb== term:%lu", ths->pRaftStore->currentTerm); syncRequestVoteLog2(logBuf, pMsg); diff --git a/source/libs/sync/src/syncRequestVoteReply.c b/source/libs/sync/src/syncRequestVoteReply.c index a6348dff50132f860ada45e9cc3bddfabd6d62d0..75236aee2bcec1ca9c7ae07165c427edbc1e0a04 100644 --- a/source/libs/sync/src/syncRequestVoteReply.c +++ b/source/libs/sync/src/syncRequestVoteReply.c @@ -39,7 +39,7 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnRequestVoteReplyCb== term:%lu", ths->pRaftStore->currentTerm); syncRequestVoteReplyLog2(logBuf, pMsg); @@ -56,7 +56,7 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg) // } if (pMsg->term > ths->pRaftStore->currentTerm) { - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "syncNodeOnRequestVoteReplyCb error term, receive:%lu current:%lu", pMsg->term, ths->pRaftStore->currentTerm); syncNodePrint2(logBuf, ths); diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 42b2bd993b515789934268f4400fece4f040f7c5..ccb0e6071b82e43bd23a9334e294a421a336e57b 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -15,6 +15,22 @@ #include "syncSnapshot.h" -int32_t takeSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot) { return 0; } +SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode) { return NULL; } -int32_t restoreSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot) { return 0; } \ No newline at end of file +void snapshotSenderDestroy(SSyncSnapshotSender *pSender) {} + +int32_t snapshotSend(SSyncSnapshotSender *pSender) { return 0; } + +cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) { return NULL; } + +char *snapshotSender2Str(SSyncSnapshotSender *pSender) { return NULL; } + +SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode) { return NULL; } + +void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) {} + +int32_t snapshotReceive(SSyncSnapshotReceiver *pReceiver) { return 0; } + +cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) { return NULL; } + +char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver) { return NULL; } diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index cf045a692611a64e75c2f4c595180f1e324e75f9..d754acd9f831ac18ce7e28b5ef2fda4b2d8650db 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -43,7 +43,7 @@ void syncUtilnodeInfo2EpSet(const SNodeInfo* pNodeInfo, SEpSet* pEpSet) { } void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet) { - char host[TSDB_FQDN_LEN]; + char host[TSDB_FQDN_LEN] = {0}; uint16_t port; syncUtilU642Addr(raftId->addr, host, sizeof(host), &port); @@ -62,7 +62,7 @@ void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet) { void syncUtilnodeInfo2raftId(const SNodeInfo* pNodeInfo, SyncGroupId vgId, SRaftId* raftId) { uint32_t ipv4 = taosGetIpv4FromFqdn(pNodeInfo->nodeFqdn); assert(ipv4 != 0xFFFFFFFF); - char ipbuf[128]; + char ipbuf[128] = {0}; tinet_ntoa(ipbuf, ipv4); raftId->addr = syncUtilAddr2U64(ipbuf, pNodeInfo->nodePort); raftId->vgId = vgId; @@ -106,7 +106,7 @@ int32_t syncUtilElectRandomMS(int32_t min, int32_t max) { int32_t syncUtilQuorum(int32_t replicaNum) { return replicaNum / 2 + 1; } cJSON* syncUtilNodeInfo2Json(const SNodeInfo* p) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); cJSON_AddStringToObject(pRoot, "nodeFqdn", p->nodeFqdn); @@ -118,12 +118,12 @@ cJSON* syncUtilNodeInfo2Json(const SNodeInfo* p) { } cJSON* syncUtilRaftId2Json(const SRaftId* p) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", p->addr); cJSON_AddStringToObject(pRoot, "addr", u64buf); - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(p->addr, host, sizeof(host), &port); cJSON_AddStringToObject(pRoot, "host", host); diff --git a/source/libs/sync/src/syncVoteMgr.c b/source/libs/sync/src/syncVoteMgr.c index 1c1f0809bd796f562e74cfd1d6b5e14015abd485..528c2f26c85c17f33f0a783def69ef9f26798b1b 100644 --- a/source/libs/sync/src/syncVoteMgr.c +++ b/source/libs/sync/src/syncVoteMgr.c @@ -90,7 +90,7 @@ void voteGrantedReset(SVotesGranted *pVotesGranted, SyncTerm term) { } cJSON *voteGranted2Json(SVotesGranted *pVotesGranted) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pVotesGranted != NULL) { @@ -220,7 +220,7 @@ void votesRespondReset(SVotesRespond *pVotesRespond, SyncTerm term) { } cJSON *votesRespond2Json(SVotesRespond *pVotesRespond) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pVotesRespond != NULL) { diff --git a/source/libs/sync/test/syncConfigChangeTest.cpp b/source/libs/sync/test/syncConfigChangeTest.cpp index 7efc3f50c0154c5c971808c7bd1e7d4b6d8f84c5..1ab3ce203ad4a3968bc45ab2382108fa7d97f40c 100644 --- a/source/libs/sync/test/syncConfigChangeTest.cpp +++ b/source/libs/sync/test/syncConfigChangeTest.cpp @@ -42,7 +42,7 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { } if (cbMeta.index > beginIndex) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==CommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); @@ -53,7 +53,7 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { } void PreCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); @@ -78,19 +78,21 @@ int32_t GetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) { void RestoreFinishCb(struct SSyncFSM* pFsm) { sTrace("==callback== ==RestoreFinishCb=="); } void ReConfigCb(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) { - sTrace("==callback== ==ReConfigCb== flag:0x%lX, isDrop:%d, index:%ld, code:%d, currentTerm:%lu, term:%lu", cbMeta.flag, cbMeta.isDrop, cbMeta.index, cbMeta.code, cbMeta.currentTerm, cbMeta.term); + sTrace("==callback== ==ReConfigCb== flag:0x%lX, isDrop:%d, index:%ld, code:%d, currentTerm:%lu, term:%lu", + cbMeta.flag, cbMeta.isDrop, cbMeta.index, cbMeta.code, cbMeta.currentTerm, cbMeta.term); } SSyncFSM* createFsm() { SSyncFSM* pFsm = (SSyncFSM*)taosMemoryMalloc(sizeof(SSyncFSM)); + memset(pFsm, 0, sizeof(*pFsm)); + pFsm->FpCommitCb = CommitCb; pFsm->FpPreCommitCb = PreCommitCb; pFsm->FpRollBackCb = RollBackCb; pFsm->FpGetSnapshot = GetSnapshotCb; pFsm->FpRestoreFinishCb = RestoreFinishCb; - pFsm->FpSnapshotApply = NULL; - pFsm->FpSnapshotRead = NULL; + pFsm->FpReConfigCb = ReConfigCb; diff --git a/source/libs/sync/test/syncSnapshotTest.cpp b/source/libs/sync/test/syncSnapshotTest.cpp index 8ccd69890708781dbfb5b4a3ae835acc5c17d15c..820500e2d8f8b57427fec1f20741755a2ddc2d5c 100644 --- a/source/libs/sync/test/syncSnapshotTest.cpp +++ b/source/libs/sync/test/syncSnapshotTest.cpp @@ -75,6 +75,7 @@ int32_t GetSnapshotCb(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) { void initFsm() { pFsm = (SSyncFSM *)taosMemoryMalloc(sizeof(SSyncFSM)); + memset(pFsm, 0, sizeof(*pFsm)); pFsm->FpCommitCb = CommitCb; pFsm->FpPreCommitCb = PreCommitCb; pFsm->FpRollBackCb = RollBackCb; diff --git a/source/libs/tdb/src/db/tdbTable.c b/source/libs/tdb/src/db/tdbTable.c index 7211fe492630b4bf036c52067ca7c7ae175823b9..239aa5d7ef786b0941e857bf9e3f73a655f65d5a 100644 --- a/source/libs/tdb/src/db/tdbTable.c +++ b/source/libs/tdb/src/db/tdbTable.c @@ -16,7 +16,7 @@ #include "tdbInt.h" struct STTB { - TDB *pEnv; + TDB * pEnv; SBTree *pBt; }; @@ -25,11 +25,11 @@ struct STBC { }; int tdbTbOpen(const char *tbname, int keyLen, int valLen, tdb_cmpr_fn_t keyCmprFn, TDB *pEnv, TTB **ppTb) { - TTB *pTb; + TTB * pTb; SPager *pPager; int ret; char fFullName[TDB_FILENAME_LEN]; - SPage *pPage; + SPage * pPage; SPgno pgno; *ppTb = NULL; @@ -145,4 +145,4 @@ int tdbTbcClose(TBC *pTbc) { return 0; } -int tdbTbcIsValid(TBC *pTbc) { return tdbBtcIsValid(&pTbc->btc); } \ No newline at end of file +int tdbTbcIsValid(TBC *pTbc) { return tdbBtcIsValid(&pTbc->btc); } diff --git a/source/libs/tdb/src/inc/tdbInt.h b/source/libs/tdb/src/inc/tdbInt.h index 9f0267da93fca6db1b35844e77fdf8877eb33847..6524e3c9bcd873180378b5cfea2404b1a461ac7b 100644 --- a/source/libs/tdb/src/inc/tdbInt.h +++ b/source/libs/tdb/src/inc/tdbInt.h @@ -55,8 +55,8 @@ typedef u32 SPgno; #define TDB_PUT_U24(p, v) \ do { \ int tv = (v); \ - (p)[2] = tv & 0xff; \ - (p)[1] = (tv >> 8) & 0xff; \ + (p)[1] = tv & 0xff; \ + (p)[2] = (tv >> 8) & 0xff; \ (p)[0] = (tv >> 16) & 0xff; \ } while (0) diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index 18a85865df1da125a25815a1030ab448bd2c6c01..e680e3004283684b0d95c1fd0124f33e99d59d3b 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -104,8 +104,16 @@ typedef SRpcCtxVal STransCtxVal; typedef SRpcInfo STrans; typedef SRpcConnInfo STransHandleInfo; +/*convet from fqdn to ip */ +typedef struct SCvtAddr { + char ip[TSDB_FQDN_LEN]; + char fqdn[TSDB_FQDN_LEN]; + bool cvt; +} SCvtAddr; + typedef struct { - SEpSet epSet; // ip list provided by app + SEpSet epSet; // ip list provided by app + SEpSet origEpSet; void* ahandle; // handle provided by app tmsg_t msgType; // message type int8_t connType; // connection type cli/srv @@ -115,6 +123,7 @@ typedef struct { STransCtx appCtx; // STransMsg* pRsp; // for synchronous API tsem_t* pSem; // for synchronous API + SCvtAddr cvtAddr; int hThrdIdx; } STransConnCtx; @@ -155,7 +164,7 @@ typedef struct { #pragma pack(pop) -typedef enum { Normal, Quit, Release, Register } STransMsgType; +typedef enum { Normal, Quit, Release, Register, Update } STransMsgType; typedef enum { ConnNormal, ConnAcquire, ConnRelease, ConnBroken, ConnInPool } ConnStatus; #define container_of(ptr, type, member) ((type*)((char*)(ptr)-offsetof(type, member))) @@ -209,6 +218,22 @@ SAsyncPool* transCreateAsyncPool(uv_loop_t* loop, int sz, void* arg, AsyncCB cb) void transDestroyAsyncPool(SAsyncPool* pool); int transSendAsync(SAsyncPool* pool, queue* mq); +#define TRANS_DESTROY_ASYNC_POOL_MSG(pool, msgType, freeFunc) \ + do { \ + for (int i = 0; i < pool->nAsync; i++) { \ + uv_async_t* async = &(pool->asyncs[i]); \ + SAsyncItem* item = async->data; \ + while (!QUEUE_IS_EMPTY(&item->qmsg)) { \ + tTrace("destroy msg in async pool "); \ + queue* h = QUEUE_HEAD(&item->qmsg); \ + QUEUE_REMOVE(h); \ + msgType* msg = QUEUE_DATA(h, msgType, q); \ + if (msg != NULL) { \ + freeFunc(msg); \ + } \ + } \ + } \ + } while (0) int transInitBuffer(SConnBuffer* buf); int transClearBuffer(SConnBuffer* buf); int transDestroyBuffer(SConnBuffer* buf); @@ -231,6 +256,7 @@ void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransM void transSendResponse(const STransMsg* msg); void transRegisterMsg(const STransMsg* msg); int transGetConnInfo(void* thandle, STransHandleInfo* pInfo); +void transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn); void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle); void* transInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle); @@ -319,11 +345,29 @@ void transDQDestroy(SDelayQueue* queue); int transDQSched(SDelayQueue* queue, void (*func)(void* arg), void* arg, uint64_t timeoutMs); void transPrintEpSet(SEpSet* pEpSet); +bool transEpSetIsEqual(SEpSet* a, SEpSet* b); /* * init global func */ void transThreadOnce(); +// ref mgt +// handle +typedef struct SExHandle { + void* handle; + int64_t refId; + void* pThrd; +} SExHandle; + +void transInitEnv(); +int32_t transOpenExHandleMgt(int size); +void transCloseExHandleMgt(int32_t mgt); +int64_t transAddExHandle(int32_t mgt, void* p); +int32_t transRemoveExHandle(int32_t mgt, int64_t refId); +SExHandle* transAcquireExHandle(int32_t mgt, int64_t refId); +int32_t transReleaseExHandle(int32_t mgt, int64_t refId); +void transDestoryExHandle(void* handle); + #ifdef __cplusplus } #endif diff --git a/source/libs/transport/inc/transportInt.h b/source/libs/transport/inc/transportInt.h index 8aeae1b5ade26a1a320dae37cbfe67f676f66eeb..c328629c4b1ba18564918ede4b5b9e4ecc62ad83 100644 --- a/source/libs/transport/inc/transportInt.h +++ b/source/libs/transport/inc/transportInt.h @@ -22,13 +22,13 @@ #include "lz4.h" #include "os.h" #include "taoserror.h" +#include "tglobal.h" #include "thash.h" -#include "tref.h" #include "tmsg.h" #include "transLog.h" +#include "tref.h" #include "trpc.h" #include "tutil.h" -#include "tglobal.h" #ifdef __cplusplus extern "C" { @@ -55,9 +55,9 @@ typedef struct { bool (*retry)(int32_t code); int index; - int32_t refCount; void* parent; void* tcphandle; // returned handle from TCP initialization + int32_t refMgt; TdThreadMutex mutex; } SRpcInfo; diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index 9e71c87fa5289d2af6d71639c313d208fe6d9b37..925de2f3219672e40f270b92b754718a93f23f02 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -27,7 +27,17 @@ void (*taosUnRefHandle[])(void* handle) = {transUnrefSrvHandle, transUnrefCliHan void (*transReleaseHandle[])(void* handle) = {transReleaseSrvHandle, transReleaseCliHandle}; +static int32_t transValidLocalFqdn(const char* localFqdn, uint32_t* ip) { + *ip = taosGetIpv4FromFqdn(localFqdn); + if (*ip == 0xFFFFFFFF) { + terrno = TSDB_CODE_RPC_FQDN_ERROR; + return -1; + } + return 0; +} void* rpcOpen(const SRpcInit* pInit) { + transInitEnv(); + SRpcInfo* pRpc = taosMemoryCalloc(1, sizeof(SRpcInfo)); if (pRpc == NULL) { return NULL; @@ -35,7 +45,6 @@ void* rpcOpen(const SRpcInit* pInit) { if (pInit->label) { tstrncpy(pRpc->label, pInit->label, strlen(pInit->label) + 1); } - // register callback handle pRpc->cfp = pInit->cfp; pRpc->retry = pInit->rfp; @@ -48,10 +57,8 @@ void* rpcOpen(const SRpcInit* pInit) { uint32_t ip = 0; if (pInit->connType == TAOS_CONN_SERVER) { - ip = taosGetIpv4FromFqdn(pInit->localFqdn); - if (ip == 0xFFFFFFFF) { - tError("invalid fqdn: %s", pInit->localFqdn); - terrno = TSDB_CODE_RPC_FQDN_ERROR; + if (transValidLocalFqdn(pInit->localFqdn, &ip) != 0) { + tError("invalid fqdn: %s, errmsg: %s", pInit->localFqdn, terrstr()); taosMemoryFree(pRpc); return NULL; } @@ -74,7 +81,9 @@ void* rpcOpen(const SRpcInit* pInit) { void rpcClose(void* arg) { SRpcInfo* pRpc = (SRpcInfo*)arg; (*taosCloseHandle[pRpc->connType])(pRpc->tcphandle); + transCloseExHandleMgt(pRpc->refMgt); taosMemoryFree(pRpc); + return; } @@ -149,6 +158,11 @@ void rpcReleaseHandle(void* handle, int8_t type) { (*transReleaseHandle[type])(handle); } +void rpcSetDefaultAddr(void* thandle, const char* ip, const char* fqdn) { + // later + transSetDefaultAddr(thandle, ip, fqdn); +} + int32_t rpcInit() { // impl later return 0; diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 0313ea583220a72ad94ba5e833385089763551f5..0090701ba567378c4e2aab0869a757663b9d289e 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -15,6 +15,9 @@ #ifdef USE_UV #include "transComm.h" +static int32_t transSCliInst = 0; +static int32_t refMgt = 0; + typedef struct SCliConn { T_REF_DECLARE() uv_connect_t connReq; @@ -63,7 +66,10 @@ typedef struct SCliThrdObj { SDelayQueue* delayQueue; uint64_t nextTimeout; // next timeout void* pTransInst; // - bool quit; + + SCvtAddr cvtAddr; + + bool quit; } SCliThrdObj; typedef struct SCliObj { @@ -103,6 +109,7 @@ static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle o static void cliDestroy(uv_handle_t* handle); static void cliSend(SCliConn* pConn); +void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr); /* * set TCP connection timeout per-socket level */ @@ -116,7 +123,9 @@ static void cliHandleExcept(SCliConn* conn); static void cliHandleReq(SCliMsg* pMsg, SCliThrdObj* pThrd); static void cliHandleQuit(SCliMsg* pMsg, SCliThrdObj* pThrd); static void cliHandleRelease(SCliMsg* pMsg, SCliThrdObj* pThrd); -static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrdObj* pThrd) = {cliHandleReq, cliHandleQuit, cliHandleRelease}; +static void cliHandleUpdate(SCliMsg* pMsg, SCliThrdObj* pThrd); +static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrdObj* pThrd) = {cliHandleReq, cliHandleQuit, cliHandleRelease, + NULL, cliHandleUpdate}; static void cliSendQuit(SCliThrdObj* thrd); static void destroyUserdata(STransMsg* userdata); @@ -697,6 +706,12 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrdObj* pThrd) { transUnrefCliHandle(conn); } } +static void cliHandleUpdate(SCliMsg* pMsg, SCliThrdObj* pThrd) { + STransConnCtx* pCtx = pMsg->ctx; + + pThrd->cvtAddr = pCtx->cvtAddr; + destroyCmsg(pMsg); +} SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrdObj* pThrd) { SCliConn* conn = NULL; @@ -716,7 +731,17 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrdObj* pThrd) { } return conn; } - +void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr) { + if (pCvtAddr->cvt == false) { + return; + } + for (int i = 0; i < pEpSet->numOfEps && pEpSet->numOfEps == 1; i++) { + if (strncmp(pEpSet->eps[i].fqdn, pCvtAddr->fqdn, TSDB_FQDN_LEN) == 0) { + memset(pEpSet->eps[i].fqdn, 0, TSDB_FQDN_LEN); + memcpy(pEpSet->eps[i].fqdn, pCvtAddr->ip, TSDB_FQDN_LEN); + } + } +} void cliHandleReq(SCliMsg* pMsg, SCliThrdObj* pThrd) { uint64_t et = taosGetTimestampUs(); uint64_t el = et - pMsg->st; @@ -726,6 +751,8 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrdObj* pThrd) { STransConnCtx* pCtx = pMsg->ctx; STrans* pTransInst = pThrd->pTransInst; + cliMayCvtFqdnToIp(&pCtx->epSet, &pThrd->cvtAddr); + SCliConn* conn = cliGetConn(pMsg, pThrd); if (conn != NULL) { conn->hThrdIdx = pCtx->hThrdIdx; @@ -822,6 +849,11 @@ void* transInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads, } cli->pThreadObj[i] = pThrd; } + int ref = atomic_add_fetch_32(&transSCliInst, 1); + if (ref == 1) { + refMgt = transOpenExHandleMgt(50000); + } + return cli; } @@ -855,7 +887,6 @@ static SCliThrdObj* createThrdObj() { pThrd->timer.data = pThrd; pThrd->pool = createConnPool(4); - transDQCreate(pThrd->loop, &pThrd->delayQueue); pThrd->quit = false; @@ -869,6 +900,7 @@ static void destroyThrdObj(SCliThrdObj* pThrd) { taosThreadJoin(pThrd->thread, NULL); CLI_RELEASE_UV(pThrd->loop); taosThreadMutexDestroy(&pThrd->msgMtx); + TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SCliMsg, destroyCmsg); transDestroyAsyncPool(pThrd->asyncPool); transDQDestroy(pThrd->delayQueue); @@ -923,11 +955,14 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { SEpSet* pEpSet = &pCtx->epSet; transPrintEpSet(pEpSet); + if (pCtx->retryCount == 0) { + pCtx->origEpSet = pCtx->epSet; + } /* * upper layer handle retry if code equal TSDB_CODE_RPC_NETWORK_UNAVAIL */ tmsg_t msgType = pCtx->msgType; - if ((pTransInst->retry != NULL && (pTransInst->retry(pResp->code))) || + if ((pTransInst->retry != NULL && pEpSet->numOfEps > 1 && (pTransInst->retry(pResp->code))) || (pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || pResp->code == TSDB_CODE_APP_NOT_READY || pResp->code == TSDB_CODE_NODE_NOT_DEPLOYED || pResp->code == TSDB_CODE_SYN_NOT_LEADER)) { pMsg->sent = 0; @@ -948,9 +983,9 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { if (pResp->contLen == 0) { pEpSet->inUse = (++pEpSet->inUse) % pEpSet->numOfEps; } else { - SMEpSet emsg = {0}; - tDeserializeSMEpSet(pResp->pCont, pResp->contLen, &emsg); - pCtx->epSet = emsg.epSet; + SEpSet epSet = {0}; + tDeserializeSEpSet(pResp->pCont, pResp->contLen, &epSet); + pCtx->epSet = epSet; } addConnToPool(pThrd->pool, pConn); tTrace("use remote epset, current in use: %d, retry count:%d, try limit: %d", pEpSet->inUse, pCtx->retryCount + 1, @@ -975,7 +1010,7 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { pCtx->pRsp = NULL; } else { tTrace("%s cli conn %p handle resp", pTransInst->label, pConn); - if (pResp->code != 0) { + if (pResp->code != 0 || pCtx->retryCount == 0 || transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) { pTransInst->cfp(pTransInst->parent, pResp, NULL); } else { pTransInst->cfp(pTransInst->parent, pResp, pEpSet); @@ -992,6 +1027,10 @@ void transCloseClient(void* arg) { } taosMemoryFree(cli->pThreadObj); taosMemoryFree(cli); + int ref = atomic_sub_fetch_32(&transSCliInst, 1); + if (ref == 0) { + transCloseExHandleMgt(refMgt); + } } void transRefCliHandle(void* handle) { if (handle == NULL) { @@ -1088,4 +1127,31 @@ void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransM taosMemoryFree(pSem); } +/* + * + **/ +void transSetDefaultAddr(void* ahandle, const char* ip, const char* fqdn) { + STrans* pTransInst = ahandle; + + SCvtAddr cvtAddr = {0}; + if (ip != NULL && fqdn != NULL) { + memcpy(cvtAddr.ip, ip, strlen(ip)); + memcpy(cvtAddr.fqdn, fqdn, strlen(fqdn)); + cvtAddr.cvt = true; + } + for (int i = 0; i < pTransInst->numOfThreads; i++) { + STransConnCtx* pCtx = taosMemoryCalloc(1, sizeof(STransConnCtx)); + pCtx->hThrdIdx = i; + pCtx->cvtAddr = cvtAddr; + + SCliMsg* cliMsg = taosMemoryCalloc(1, sizeof(SCliMsg)); + cliMsg->ctx = pCtx; + cliMsg->type = Update; + + SCliThrdObj* thrd = ((SCliObj*)pTransInst->tcphandle)->pThreadObj[i]; + tDebug("update epset at thread:%d, threadID:%" PRId64 "", i, thrd->thread); + + transSendAsync(thrd->asyncPool, &(cliMsg->q)); + } +} #endif diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index 526f896ad2cbf0edd5db4f368a92cd8f6ee70707..a04e8b5fca05b7e02b58bbd4b5abfc528a5289e9 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -190,6 +190,7 @@ SAsyncPool* transCreateAsyncPool(uv_loop_t* loop, int sz, void* arg, AsyncCB cb) } return pool; } + void transDestroyAsyncPool(SAsyncPool* pool) { for (int i = 0; i < pool->nAsync; i++) { uv_async_t* async = &(pool->asyncs[i]); @@ -452,10 +453,58 @@ void transPrintEpSet(SEpSet* pEpSet) { tTrace("NULL epset"); return; } - tTrace("epset begin: inUse: %d", pEpSet->inUse); + tTrace("epset begin inUse: %d", pEpSet->inUse); for (int i = 0; i < pEpSet->numOfEps; i++) { tTrace("ip: %s, port: %d", pEpSet->eps[i].fqdn, pEpSet->eps[i].port); } tTrace("epset end"); } +bool transEpSetIsEqual(SEpSet* a, SEpSet* b) { + if (a->numOfEps != b->numOfEps || a->inUse != b->inUse) { + return false; + } + for (int i = 0; i < a->numOfEps; i++) { + if (strncmp(a->eps[i].fqdn, b->eps[i].fqdn, TSDB_FQDN_LEN) != 0 || a->eps[i].port != b->eps[i].port) { + return false; + } + } + return true; +} + +void transInitEnv() { + // + uv_os_setenv("UV_TCP_SINGLE_ACCEPT", "1"); +} +int32_t transOpenExHandleMgt(int size) { + // added into once later + return taosOpenRef(size, transDestoryExHandle); +} +void transCloseExHandleMgt(int32_t mgt) { + // close ref + taosCloseRef(mgt); +} +int64_t transAddExHandle(int32_t mgt, void* p) { + // acquire extern handle + return taosAddRef(mgt, p); +} +int32_t transRemoveExHandle(int32_t mgt, int64_t refId) { + // acquire extern handle + return taosRemoveRef(mgt, refId); +} + +SExHandle* transAcquireExHandle(int32_t mgt, int64_t refId) { + // acquire extern handle + return (SExHandle*)taosAcquireRef(mgt, refId); +} + +int32_t transReleaseExHandle(int32_t mgt, int64_t refId) { + // release extern handle + return taosReleaseRef(mgt, refId); +} +void transDestoryExHandle(void* handle) { + if (handle == NULL) { + return; + } + taosMemoryFree(handle); +} #endif diff --git a/source/libs/transport/src/transSrv.c b/source/libs/transport/src/transSvr.c similarity index 85% rename from source/libs/transport/src/transSrv.c rename to source/libs/transport/src/transSvr.c index 36f5cf98150e5636b43eb35b819d5bcd9288fe6a..608fd00b2cda7c9508275cd4487496295b9e0711 100644 --- a/source/libs/transport/src/transSrv.c +++ b/source/libs/transport/src/transSvr.c @@ -19,16 +19,17 @@ static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT; -static char* notify = "a"; -static int transSrvInst = 0; +static char* notify = "a"; +static int32_t tranSSvrInst = 0; +static int32_t refMgt = 0; typedef struct { int notifyCount; // int init; // init or not STransMsg msg; -} SSrvRegArg; +} SSvrRegArg; -typedef struct SSrvConn { +typedef struct SSvrConn { T_REF_DECLARE() uv_tcp_t* pTcp; uv_write_t pWriter; @@ -42,7 +43,7 @@ typedef struct SSrvConn { void* hostThrd; STransQueue srvMsgs; - SSrvRegArg regArg; + SSvrRegArg regArg; bool broken; // conn broken; ConnStatus status; @@ -55,14 +56,14 @@ typedef struct SSrvConn { char user[TSDB_UNI_LEN]; // user ID for the link char secret[TSDB_PASSWORD_LEN]; char ckey[TSDB_PASSWORD_LEN]; // ciphering key -} SSrvConn; +} SSvrConn; -typedef struct SSrvMsg { - SSrvConn* pConn; +typedef struct SSvrMsg { + SSvrConn* pConn; STransMsg msg; queue q; STransMsgType type; -} SSrvMsg; +} SSvrMsg; typedef struct SWorkThrdObj { TdThread thread; @@ -99,13 +100,6 @@ typedef struct SServerObj { bool inited; } SServerObj; -// handle -typedef struct SExHandle { - void* handle; - int64_t refId; - SWorkThrdObj* pThrd; -} SExHandle; - static void uvAllocConnBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf); static void uvAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf); static void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf); @@ -127,37 +121,37 @@ static void uvWorkAfterTask(uv_work_t* req, int status); static void uvWalkCb(uv_handle_t* handle, void* arg); static void uvFreeCb(uv_handle_t* handle); -static void uvStartSendRespInternal(SSrvMsg* smsg); -static void uvPrepareSendData(SSrvMsg* msg, uv_buf_t* wb); -static void uvStartSendResp(SSrvMsg* msg); +static void uvStartSendRespInternal(SSvrMsg* smsg); +static void uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb); +static void uvStartSendResp(SSvrMsg* msg); -static void uvNotifyLinkBrokenToApp(SSrvConn* conn); +static void uvNotifyLinkBrokenToApp(SSvrConn* conn); -static void destroySmsg(SSrvMsg* smsg); +static void destroySmsg(SSvrMsg* smsg); // check whether already read complete packet -static SSrvConn* createConn(void* hThrd); -static void destroyConn(SSrvConn* conn, bool clear /*clear handle or not*/); -static void destroyConnRegArg(SSrvConn* conn); +static SSvrConn* createConn(void* hThrd); +static void destroyConn(SSvrConn* conn, bool clear /*clear handle or not*/); +static void destroyConnRegArg(SSvrConn* conn); -static int reallocConnRefHandle(SSrvConn* conn); +static int reallocConnRefHandle(SSvrConn* conn); -static void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd); -static void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd); -static void uvHandleResp(SSrvMsg* msg, SWorkThrdObj* thrd); -static void uvHandleRegister(SSrvMsg* msg, SWorkThrdObj* thrd); -static void (*transAsyncHandle[])(SSrvMsg* msg, SWorkThrdObj* thrd) = {uvHandleResp, uvHandleQuit, uvHandleRelease, - uvHandleRegister}; +static void uvHandleQuit(SSvrMsg* msg, SWorkThrdObj* thrd); +static void uvHandleRelease(SSvrMsg* msg, SWorkThrdObj* thrd); +static void uvHandleResp(SSvrMsg* msg, SWorkThrdObj* thrd); +static void uvHandleRegister(SSvrMsg* msg, SWorkThrdObj* thrd); +static void (*transAsyncHandle[])(SSvrMsg* msg, SWorkThrdObj* thrd) = {uvHandleResp, uvHandleQuit, uvHandleRelease, + uvHandleRegister, NULL}; static int32_t exHandlesMgt; -void uvInitEnv(); -void uvOpenExHandleMgt(int size); -void uvCloseExHandleMgt(); -int64_t uvAddExHandle(void* p); -int32_t uvRemoveExHandle(int64_t refId); -int32_t uvReleaseExHandle(int64_t refId); -void uvDestoryExHandle(void* handle); -SExHandle* uvAcquireExHandle(int64_t refId); +// void uvInitEnv(); +// void uvOpenExHandleMgt(int size); +// void uvCloseExHandleMgt(); +// int64_t uvAddExHandle(void* p); +// int32_t uvRemoveExHandle(int64_t refId); +// int32_t uvReleaseExHandle(int64_t refId); +// void uvDestoryExHandle(void* handle); +// SExHandle* uvAcquireExHandle(int64_t refId); static void uvDestroyConn(uv_handle_t* handle); @@ -178,7 +172,7 @@ static bool addHandleToAcceptloop(void* arg); tTrace("server conn %p received release request", conn); \ \ STransMsg tmsg = {.code = 0, .info.handle = (void*)conn, .info.ahandle = NULL}; \ - SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); \ + SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg)); \ srvMsg->msg = tmsg; \ srvMsg->type = Release; \ srvMsg->pConn = conn; \ @@ -210,7 +204,7 @@ static bool addHandleToAcceptloop(void* arg); do { \ if (refId > 0) { \ tTrace("server handle step1"); \ - SExHandle* exh2 = uvAcquireExHandle(refId); \ + SExHandle* exh2 = transAcquireExHandle(refMgt, refId); \ if (exh2 == NULL || refId != exh2->refId) { \ tTrace("server handle %p except, may already freed, ignore msg, ref1: %" PRIu64 ", ref2 : %" PRIu64 "", exh1, \ exh2 ? exh2->refId : 0, refId); \ @@ -218,7 +212,7 @@ static bool addHandleToAcceptloop(void* arg); } \ } else if (refId == 0) { \ tTrace("server handle step2"); \ - SExHandle* exh2 = uvAcquireExHandle(refId); \ + SExHandle* exh2 = transAcquireExHandle(refMgt, refId); \ if (exh2 == NULL || refId != exh2->refId) { \ tTrace("server handle %p except, may already freed, ignore msg, ref1: %" PRIu64 ", ref2 : %" PRIu64 "", exh1, \ refId, exh2 ? exh2->refId : 0); \ @@ -233,18 +227,18 @@ static bool addHandleToAcceptloop(void* arg); } while (0) void uvAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) { - SSrvConn* conn = handle->data; + SSvrConn* conn = handle->data; SConnBuffer* pBuf = &conn->readBuf; transAllocBuffer(pBuf, buf); } // refers specifically to query or insert timeout static void uvHandleActivityTimeout(uv_timer_t* handle) { - SSrvConn* conn = handle->data; + SSvrConn* conn = handle->data; tDebug("%p timeout since no activity", conn); } -static void uvHandleReq(SSrvConn* pConn) { +static void uvHandleReq(SSvrConn* pConn) { SConnBuffer* pBuf = &pConn->readBuf; char* msg = pBuf->buf; uint32_t msgLen = pBuf->len; @@ -300,14 +294,14 @@ static void uvHandleReq(SSrvConn* pConn) { // 2. once send out data, cli conn released to conn pool immediately // 3. not mixed with persist - transMsg.info.handle = (void*)uvAcquireExHandle(pConn->refId); + transMsg.info.handle = (void*)transAcquireExHandle(refMgt, pConn->refId); transMsg.info.refId = pConn->refId; tTrace("server handle %p conn: %p translated to app, refId: %" PRIu64 "", transMsg.info.handle, pConn, pConn->refId); assert(transMsg.info.handle != NULL); if (pHead->noResp == 1) { transMsg.info.refId = -1; } - uvReleaseExHandle(pConn->refId); + transReleaseExHandle(refMgt, pConn->refId); STrans* pTransInst = pConn->pTransInst; (*pTransInst->cfp)(pTransInst->parent, &transMsg, NULL); @@ -316,7 +310,7 @@ static void uvHandleReq(SSrvConn* pConn) { void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) { // opt - SSrvConn* conn = cli->data; + SSvrConn* conn = cli->data; SConnBuffer* pBuf = &conn->readBuf; if (nread > 0) { pBuf->len += nread; @@ -354,17 +348,17 @@ void uvAllocConnBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* b void uvOnTimeoutCb(uv_timer_t* handle) { // opt - SSrvConn* pConn = handle->data; + SSvrConn* pConn = handle->data; tError("server conn %p time out", pConn); } void uvOnSendCb(uv_write_t* req, int status) { - SSrvConn* conn = req->data; + SSvrConn* conn = req->data; // transClearBuffer(&conn->readBuf); if (status == 0) { tTrace("server conn %p data already was written on stream", conn); if (!transQueueEmpty(&conn->srvMsgs)) { - SSrvMsg* msg = transQueuePop(&conn->srvMsgs); + SSvrMsg* msg = transQueuePop(&conn->srvMsgs); // if (msg->type == Release && conn->status != ConnNormal) { // conn->status = ConnNormal; // transUnrefSrvHandle(conn); @@ -376,7 +370,7 @@ void uvOnSendCb(uv_write_t* req, int status) { destroySmsg(msg); // send second data, just use for push if (!transQueueEmpty(&conn->srvMsgs)) { - msg = (SSrvMsg*)transQueueGet(&conn->srvMsgs, 0); + msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0); if (msg->type == Register && conn->status == ConnAcquire) { conn->regArg.notifyCount = 0; conn->regArg.init = 1; @@ -389,7 +383,7 @@ void uvOnSendCb(uv_write_t* req, int status) { transQueuePop(&conn->srvMsgs); taosMemoryFree(msg); - msg = (SSrvMsg*)transQueueGet(&conn->srvMsgs, 0); + msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0); if (msg != NULL) { uvStartSendRespInternal(msg); } @@ -415,10 +409,10 @@ static void uvOnPipeWriteCb(uv_write_t* req, int status) { taosMemoryFree(req); } -static void uvPrepareSendData(SSrvMsg* smsg, uv_buf_t* wb) { +static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) { tTrace("server conn %p prepare to send resp", smsg->pConn); - SSrvConn* pConn = smsg->pConn; + SSvrConn* pConn = smsg->pConn; STransMsg* pMsg = &smsg->msg; if (pMsg->pCont == 0) { pMsg->pCont = (void*)rpcMallocCont(0); @@ -455,17 +449,17 @@ static void uvPrepareSendData(SSrvMsg* smsg, uv_buf_t* wb) { wb->len = len; } -static void uvStartSendRespInternal(SSrvMsg* smsg) { +static void uvStartSendRespInternal(SSvrMsg* smsg) { uv_buf_t wb; uvPrepareSendData(smsg, &wb); - SSrvConn* pConn = smsg->pConn; + SSvrConn* pConn = smsg->pConn; // uv_timer_stop(&pConn->pTimer); uv_write(&pConn->pWriter, (uv_stream_t*)pConn->pTcp, &wb, 1, uvOnSendCb); } -static void uvStartSendResp(SSrvMsg* smsg) { +static void uvStartSendResp(SSvrMsg* smsg) { // impl - SSrvConn* pConn = smsg->pConn; + SSvrConn* pConn = smsg->pConn; if (pConn->broken == true) { // persist by @@ -485,7 +479,7 @@ static void uvStartSendResp(SSrvMsg* smsg) { return; } -static void destroySmsg(SSrvMsg* smsg) { +static void destroySmsg(SSvrMsg* smsg) { if (smsg == NULL) { return; } @@ -499,7 +493,7 @@ static void destroyAllConn(SWorkThrdObj* pThrd) { QUEUE_REMOVE(h); QUEUE_INIT(h); - SSrvConn* c = QUEUE_DATA(h, SSrvConn, queue); + SSvrConn* c = QUEUE_DATA(h, SSvrConn, queue); while (T_REF_VAL_GET(c) >= 2) { transUnrefSrvHandle(c); } @@ -509,7 +503,7 @@ static void destroyAllConn(SWorkThrdObj* pThrd) { void uvWorkerAsyncCb(uv_async_t* handle) { SAsyncItem* item = handle->data; SWorkThrdObj* pThrd = item->pThrd; - SSrvConn* conn = NULL; + SSvrConn* conn = NULL; queue wq; // batch process to avoid to lock/unlock frequently @@ -521,7 +515,7 @@ void uvWorkerAsyncCb(uv_async_t* handle) { queue* head = QUEUE_HEAD(&wq); QUEUE_REMOVE(head); - SSrvMsg* msg = QUEUE_DATA(head, SSrvMsg, q); + SSvrMsg* msg = QUEUE_DATA(head, SSvrMsg, q); if (msg == NULL) { tError("unexcept occurred, continue"); continue; @@ -535,15 +529,15 @@ void uvWorkerAsyncCb(uv_async_t* handle) { SExHandle* exh1 = transMsg.info.handle; int64_t refId = transMsg.info.refId; - SExHandle* exh2 = uvAcquireExHandle(refId); + SExHandle* exh2 = transAcquireExHandle(refMgt, refId); if (exh2 == NULL || exh1 != exh2) { tTrace("server handle except msg %p, ignore it", exh1); - uvReleaseExHandle(refId); + transReleaseExHandle(refMgt, refId); destroySmsg(msg); continue; } msg->pConn = exh1->handle; - uvReleaseExHandle(refId); + transReleaseExHandle(refMgt, refId); (*transAsyncHandle[msg->type])(msg, pThrd); } } @@ -649,7 +643,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) { uv_handle_type pending = uv_pipe_pending_type(pipe); assert(pending == UV_TCP); - SSrvConn* pConn = createConn(pThrd); + SSvrConn* pConn = createConn(pThrd); pConn->pTransInst = pThrd->pTransInst; /* init conn timer*/ @@ -768,10 +762,10 @@ void* transWorkerThread(void* arg) { return NULL; } -static SSrvConn* createConn(void* hThrd) { +static SSvrConn* createConn(void* hThrd) { SWorkThrdObj* pThrd = hThrd; - SSrvConn* pConn = (SSrvConn*)taosMemoryCalloc(1, sizeof(SSrvConn)); + SSvrConn* pConn = (SSvrConn*)taosMemoryCalloc(1, sizeof(SSvrConn)); QUEUE_INIT(&pConn->queue); QUEUE_PUSH(&pThrd->conn, &pConn->queue); @@ -785,8 +779,8 @@ static SSrvConn* createConn(void* hThrd) { SExHandle* exh = taosMemoryMalloc(sizeof(SExHandle)); exh->handle = pConn; exh->pThrd = pThrd; - exh->refId = uvAddExHandle(exh); - uvAcquireExHandle(exh->refId); + exh->refId = transAddExHandle(refMgt, exh); + transAcquireExHandle(refMgt, exh->refId); pConn->refId = exh->refId; transRefSrvHandle(pConn); @@ -794,7 +788,7 @@ static SSrvConn* createConn(void* hThrd) { return pConn; } -static void destroyConn(SSrvConn* conn, bool clear) { +static void destroyConn(SSvrConn* conn, bool clear) { if (conn == NULL) { return; } @@ -808,34 +802,34 @@ static void destroyConn(SSrvConn* conn, bool clear) { // uv_shutdown(req, (uv_stream_t*)conn->pTcp, uvShutDownCb); } } -static void destroyConnRegArg(SSrvConn* conn) { +static void destroyConnRegArg(SSvrConn* conn) { if (conn->regArg.init == 1) { transFreeMsg(conn->regArg.msg.pCont); conn->regArg.init = 0; } } -static int reallocConnRefHandle(SSrvConn* conn) { - uvReleaseExHandle(conn->refId); - uvRemoveExHandle(conn->refId); +static int reallocConnRefHandle(SSvrConn* conn) { + transReleaseExHandle(refMgt, conn->refId); + transRemoveExHandle(refMgt, conn->refId); // avoid app continue to send msg on invalid handle SExHandle* exh = taosMemoryMalloc(sizeof(SExHandle)); exh->handle = conn; exh->pThrd = conn->hostThrd; - exh->refId = uvAddExHandle(exh); - uvAcquireExHandle(exh->refId); + exh->refId = transAddExHandle(refMgt, exh); + transAcquireExHandle(refMgt, exh->refId); conn->refId = exh->refId; return 0; } static void uvDestroyConn(uv_handle_t* handle) { - SSrvConn* conn = handle->data; + SSvrConn* conn = handle->data; if (conn == NULL) { return; } SWorkThrdObj* thrd = conn->hostThrd; - uvReleaseExHandle(conn->refId); - uvRemoveExHandle(conn->refId); + transReleaseExHandle(refMgt, conn->refId); + transRemoveExHandle(refMgt, conn->refId); tDebug("server conn %p destroy", conn); // uv_timer_stop(&conn->pTimer); @@ -883,8 +877,11 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, srv->port = port; uv_loop_init(srv->loop); - taosThreadOnce(&transModuleInit, uvInitEnv); - transSrvInst++; + // taosThreadOnce(&transModuleInit, uvInitEnv); + int ref = atomic_add_fetch_32(&tranSSvrInst, 1); + if (ref == 1) { + refMgt = transOpenExHandleMgt(50000); + } assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0)); #ifdef WINDOWS @@ -923,7 +920,7 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, } if (false == taosValidIpAndPort(srv->ip, srv->port)) { terrno = TAOS_SYSTEM_ERROR(errno); - tError("invalid ip/port, reason: %s", terrstr()); + tError("invalid ip/port, %d:%d, reason: %s", srv->ip, srv->port, terrstr()); goto End; } if (false == addHandleToAcceptloop(srv)) { @@ -944,44 +941,7 @@ End: return NULL; } -void uvInitEnv() { - uv_os_setenv("UV_TCP_SINGLE_ACCEPT", "1"); - uvOpenExHandleMgt(10000); -} -void uvOpenExHandleMgt(int size) { - // added into once later - exHandlesMgt = taosOpenRef(size, uvDestoryExHandle); -} -void uvCloseExHandleMgt() { - // close ref - taosCloseRef(exHandlesMgt); -} -int64_t uvAddExHandle(void* p) { - // acquire extern handle - return taosAddRef(exHandlesMgt, p); -} -int32_t uvRemoveExHandle(int64_t refId) { - // acquire extern handle - return taosRemoveRef(exHandlesMgt, refId); -} - -SExHandle* uvAcquireExHandle(int64_t refId) { - // acquire extern handle - return (SExHandle*)taosAcquireRef(exHandlesMgt, refId); -} - -int32_t uvReleaseExHandle(int64_t refId) { - // release extern handle - return taosReleaseRef(exHandlesMgt, refId); -} -void uvDestoryExHandle(void* handle) { - if (handle == NULL) { - return; - } - taosMemoryFree(handle); -} - -void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd) { +void uvHandleQuit(SSvrMsg* msg, SWorkThrdObj* thrd) { thrd->quit = true; if (QUEUE_IS_EMPTY(&thrd->conn)) { uv_walk(thrd->loop, uvWalkCb, NULL); @@ -990,8 +950,8 @@ void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd) { } taosMemoryFree(msg); } -void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd) { - SSrvConn* conn = msg->pConn; +void uvHandleRelease(SSvrMsg* msg, SWorkThrdObj* thrd) { + SSvrConn* conn = msg->pConn; if (conn->status == ConnAcquire) { reallocConnRefHandle(conn); if (!transQueuePush(&conn->srvMsgs, msg)) { @@ -1004,13 +964,13 @@ void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd) { } destroySmsg(msg); } -void uvHandleResp(SSrvMsg* msg, SWorkThrdObj* thrd) { +void uvHandleResp(SSvrMsg* msg, SWorkThrdObj* thrd) { // send msg to client tDebug("server conn %p start to send resp (2/2)", msg->pConn); uvStartSendResp(msg); } -void uvHandleRegister(SSrvMsg* msg, SWorkThrdObj* thrd) { - SSrvConn* conn = msg->pConn; +void uvHandleRegister(SSvrMsg* msg, SWorkThrdObj* thrd) { + SSvrConn* conn = msg->pConn; tDebug("server conn %p register brokenlink callback", conn); if (conn->status == ConnAcquire) { if (!transQueuePush(&conn->srvMsgs, msg)) { @@ -1036,12 +996,13 @@ void destroyWorkThrd(SWorkThrdObj* pThrd) { } taosThreadJoin(pThrd->thread, NULL); SRV_RELEASE_UV(pThrd->loop); + TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSvrMsg, destroySmsg); transDestroyAsyncPool(pThrd->asyncPool); taosMemoryFree(pThrd->loop); taosMemoryFree(pThrd); } void sendQuitToWorkThrd(SWorkThrdObj* pThrd) { - SSrvMsg* msg = taosMemoryCalloc(1, sizeof(SSrvMsg)); + SSvrMsg* msg = taosMemoryCalloc(1, sizeof(SSvrMsg)); msg->type = Quit; tDebug("server send quit msg to work thread"); transSendAsync(pThrd->asyncPool, &msg->q); @@ -1074,11 +1035,11 @@ void transCloseServer(void* arg) { taosMemoryFree(srv); - transSrvInst--; - if (transSrvInst == 0) { - TdThreadOnce tmpInit = PTHREAD_ONCE_INIT; - memcpy(&transModuleInit, &tmpInit, sizeof(TdThreadOnce)); - uvCloseExHandleMgt(); + int ref = atomic_sub_fetch_32(&tranSSvrInst, 1); + if (ref == 0) { + // TdThreadOnce tmpInit = PTHREAD_ONCE_INIT; + // memcpy(&transModuleInit, &tmpInit, sizeof(TdThreadOnce)); + transCloseExHandleMgt(refMgt); } } @@ -1086,7 +1047,7 @@ void transRefSrvHandle(void* handle) { if (handle == NULL) { return; } - int ref = T_REF_INC((SSrvConn*)handle); + int ref = T_REF_INC((SSvrConn*)handle); tDebug("server conn %p ref count: %d", handle, ref); } @@ -1094,10 +1055,10 @@ void transUnrefSrvHandle(void* handle) { if (handle == NULL) { return; } - int ref = T_REF_DEC((SSrvConn*)handle); + int ref = T_REF_DEC((SSvrConn*)handle); tDebug("server conn %p ref count: %d", handle, ref); if (ref == 0) { - destroyConn((SSrvConn*)handle, true); + destroyConn((SSvrConn*)handle, true); } } @@ -1112,17 +1073,17 @@ void transReleaseSrvHandle(void* handle) { STransMsg tmsg = {.code = 0, .info.handle = exh, .info.ahandle = NULL, .info.refId = refId}; - SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); - srvMsg->msg = tmsg; - srvMsg->type = Release; + SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg)); + m->msg = tmsg; + m->type = Release; tTrace("server conn %p start to release", exh->handle); - transSendAsync(pThrd->asyncPool, &srvMsg->q); - uvReleaseExHandle(refId); + transSendAsync(pThrd->asyncPool, &m->q); + transReleaseExHandle(refMgt, refId); return; _return1: tTrace("server handle %p failed to send to release handle", exh); - uvReleaseExHandle(refId); + transReleaseExHandle(refMgt, refId); return; _return2: tTrace("server handle %p failed to send to release handle", exh); @@ -1140,17 +1101,17 @@ void transSendResponse(const STransMsg* msg) { SWorkThrdObj* pThrd = exh->pThrd; ASYNC_ERR_JRET(pThrd); - SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); - srvMsg->msg = tmsg; - srvMsg->type = Normal; + SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg)); + m->msg = tmsg; + m->type = Normal; tDebug("server conn %p start to send resp (1/2)", exh->handle); - transSendAsync(pThrd->asyncPool, &srvMsg->q); - uvReleaseExHandle(refId); + transSendAsync(pThrd->asyncPool, &m->q); + transReleaseExHandle(refMgt, refId); return; _return1: tTrace("server handle %p failed to send resp", exh); rpcFreeCont(msg->pCont); - uvReleaseExHandle(refId); + transReleaseExHandle(refMgt, refId); return; _return2: tTrace("server handle %p failed to send resp", exh); @@ -1168,18 +1129,18 @@ void transRegisterMsg(const STransMsg* msg) { SWorkThrdObj* pThrd = exh->pThrd; ASYNC_ERR_JRET(pThrd); - SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); - srvMsg->msg = tmsg; - srvMsg->type = Register; + SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg)); + m->msg = tmsg; + m->type = Register; tTrace("server conn %p start to register brokenlink callback", exh->handle); - transSendAsync(pThrd->asyncPool, &srvMsg->q); - uvReleaseExHandle(refId); + transSendAsync(pThrd->asyncPool, &m->q); + transReleaseExHandle(refMgt, refId); return; _return1: tTrace("server handle %p failed to send to register brokenlink", exh); rpcFreeCont(msg->pCont); - uvReleaseExHandle(refId); + transReleaseExHandle(refMgt, refId); return; _return2: tTrace("server handle %p failed to send to register brokenlink", exh); @@ -1192,7 +1153,7 @@ int transGetConnInfo(void* thandle, STransHandleInfo* pInfo) { return -1; } SExHandle* ex = thandle; - SSrvConn* pConn = ex->handle; + SSvrConn* pConn = ex->handle; struct sockaddr_in addr = pConn->addr; pInfo->clientIp = (uint32_t)(addr.sin_addr.s_addr); diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index d2a43c410708249983295dca44ca06f6f75a2b70..a9a8f8a1f40fbfd6fda3ec43d4fd1bfdf61025dc 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -18,6 +18,14 @@ #include "tchecksum.h" #include "walInt.h" +void walRestoreFromSnapshot(SWal *pWal, int64_t ver) { + /*pWal->vers.firstVer = -1;*/ + pWal->vers.lastVer = ver; + pWal->vers.commitVer = ver - 1; + pWal->vers.snapshotVer = ver - 1; + pWal->vers.verInSnapshotting = -1; +} + int32_t walCommit(SWal *pWal, int64_t ver) { ASSERT(pWal->vers.commitVer >= pWal->vers.snapshotVer); ASSERT(pWal->vers.commitVer <= pWal->vers.lastVer); diff --git a/source/os/CMakeLists.txt b/source/os/CMakeLists.txt index b6e131d4ccc670f0d3b35e00483f33f072a314e2..e15627fe6682bb7a94f96d4e7e341a3b3b4c0637 100644 --- a/source/os/CMakeLists.txt +++ b/source/os/CMakeLists.txt @@ -10,7 +10,11 @@ target_include_directories( PUBLIC "${TD_SOURCE_DIR}/contrib/msvcregex" ) # iconv -find_path(IconvApiIncludes iconv.h PATHS) +if(TD_WINDOWS) + find_path(IconvApiIncludes iconv.h "${TD_SOURCE_DIR}/contrib/iconv") +else() + find_path(IconvApiIncludes iconv.h PATHS) +endif(TD_WINDOWS) if(NOT IconvApiIncludes) add_definitions(-DDISALLOW_NCHAR_WITHOUT_ICONV) endif () diff --git a/source/os/src/osDir.c b/source/os/src/osDir.c index 75797048cad5f6290e73edf467c6cd98b197af7e..cfb7b8a0e255cf32301984f9135f2d4711144d74 100644 --- a/source/os/src/osDir.c +++ b/source/os/src/osDir.c @@ -204,7 +204,7 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) { int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen) { wordexp_t full_path; if (0 != wordexp(dirname, &full_path, 0)) { - // printf("failed to expand path:%s since %s", dirname, strerror(errno)); + printf("failed to expand path:%s since %s", dirname, strerror(errno)); wordfree(&full_path); return -1; } diff --git a/source/os/src/osEnv.c b/source/os/src/osEnv.c index 6746025f78be619868e53267588f8f4defe1d5cb..6ae3d8a0c0d655ae6be8bf1a23b36309962b7a65 100644 --- a/source/os/src/osEnv.c +++ b/source/os/src/osEnv.c @@ -70,11 +70,11 @@ void osDefaultInit() { #elif defined(_TD_DARWIN_64) if (configDir[0] == 0) { - strcpy(configDir, "/tmp/taosd"); + strcpy(configDir, "/usr/local/etc/taos"); } strcpy(tsDataDir, "/usr/local/var/lib/taos"); strcpy(tsLogDir, "/usr/local/var/log/taos"); - strcpy(tsTempDir, "/usr/local/etc/taos"); + strcpy(tsTempDir, "/tmp/taosd"); strcpy(tsOsName, "Darwin"); #else diff --git a/source/os/src/osTimezone.c b/source/os/src/osTimezone.c index 872d8e740c651cd494efbe5c783390637e95efe7..dc9527c2f2a052cf2d87b6b6c374019139390beb 100644 --- a/source/os/src/osTimezone.c +++ b/source/os/src/osTimezone.c @@ -32,6 +32,700 @@ #pragma warning(disable : 4091) #include #pragma warning(pop) + +char *win_tz[139][2]={{"China Standard Time", "Asia/Shanghai"}, + {"AUS Central Standard Time", "Australia/Darwin"}, + {"AUS Eastern Standard Time", "Australia/Sydney"}, + {"Afghanistan Standard Time", "Asia/Kabul"}, + {"Alaskan Standard Time", "America/Anchorage"}, + {"Aleutian Standard Time", "America/Adak"}, + {"Altai Standard Time", "Asia/Barnaul"}, + {"Arab Standard Time", "Asia/Riyadh"}, + {"Arabian Standard Time", "Asia/Dubai"}, + {"Arabic Standard Time", "Asia/Baghdad"}, + {"Argentina Standard Time", "America/Buenos_Aires"}, + {"Astrakhan Standard Time", "Europe/Astrakhan"}, + {"Atlantic Standard Time", "America/Halifax"}, + {"Aus Central W. Standard Time", "Australia/Eucla"}, + {"Azerbaijan Standard Time", "Asia/Baku"}, + {"Azores Standard Time", "Atlantic/Azores"}, + {"Bahia Standard Time", "America/Bahia"}, + {"Bangladesh Standard Time", "Asia/Dhaka"}, + {"Belarus Standard Time", "Europe/Minsk"}, + {"Bougainville Standard Time", "Pacific/Bougainville"}, + {"Canada Central Standard Time", "America/Regina"}, + {"Cape Verde Standard Time", "Atlantic/Cape_Verde"}, + {"Caucasus Standard Time", "Asia/Yerevan"}, + {"Cen. Australia Standard Time", "Australia/Adelaide"}, + {"Central America Standard Time", "America/Guatemala"}, + {"Central Asia Standard Time", "Asia/Almaty"}, + {"Central Brazilian Standard Time", "America/Cuiaba"}, + {"Central Europe Standard Time", "Europe/Budapest"}, + {"Central European Standard Time", "Europe/Warsaw"}, + {"Central Pacific Standard Time", "Pacific/Guadalcanal"}, + {"Central Standard Time", "America/Chicago"}, + {"Central Standard Time (Mexico)", "America/Mexico_City"}, + {"Chatham Islands Standard Time", "Pacific/Chatham"}, + {"Cuba Standard Time", "America/Havana"}, + {"Dateline Standard Time", "Etc/GMT+12"}, + {"E. Africa Standard Time", "Africa/Nairobi"}, + {"E. Australia Standard Time", "Australia/Brisbane"}, + {"E. Europe Standard Time", "Europe/Chisinau"}, + {"E. South America Standard Time", "America/Sao_Paulo"}, + {"Easter Island Standard Time", "Pacific/Easter"}, + {"Eastern Standard Time", "America/New_York"}, + {"Eastern Standard Time (Mexico)", "America/Cancun"}, + {"Egypt Standard Time", "Africa/Cairo"}, + {"Ekaterinburg Standard Time", "Asia/Yekaterinburg"}, + {"FLE Standard Time", "Europe/Kiev"}, + {"Fiji Standard Time", "Pacific/Fiji"}, + {"GMT Standard Time", "Europe/London"}, + {"GTB Standard Time", "Europe/Bucharest"}, + {"Georgian Standard Time", "Asia/Tbilisi"}, + {"Greenland Standard Time", "America/Godthab"}, + {"Greenwich Standard Time", "Atlantic/Reykjavik"}, + {"Haiti Standard Time", "America/Port-au-Prince"}, + {"Hawaiian Standard Time", "Pacific/Honolulu"}, + {"India Standard Time", "Asia/Calcutta"}, + {"Iran Standard Time", "Asia/Tehran"}, + {"Israel Standard Time", "Asia/Jerusalem"}, + {"Jordan Standard Time", "Asia/Amman"}, + {"Kaliningrad Standard Time", "Europe/Kaliningrad"}, + {"Korea Standard Time", "Asia/Seoul"}, + {"Libya Standard Time", "Africa/Tripoli"}, + {"Line Islands Standard Time", "Pacific/Kiritimati"}, + {"Lord Howe Standard Time", "Australia/Lord_Howe"}, + {"Magadan Standard Time", "Asia/Magadan"}, + {"Magallanes Standard Time", "America/Punta_Arenas"}, + {"Marquesas Standard Time", "Pacific/Marquesas"}, + {"Mauritius Standard Time", "Indian/Mauritius"}, + {"Middle East Standard Time", "Asia/Beirut"}, + {"Montevideo Standard Time", "America/Montevideo"}, + {"Morocco Standard Time", "Africa/Casablanca"}, + {"Mountain Standard Time", "America/Denver"}, + {"Mountain Standard Time (Mexico)", "America/Chihuahua"}, + {"Myanmar Standard Time", "Asia/Rangoon"}, + {"N. Central Asia Standard Time", "Asia/Novosibirsk"}, + {"Namibia Standard Time", "Africa/Windhoek"}, + {"Nepal Standard Time", "Asia/Katmandu"}, + {"New Zealand Standard Time", "Pacific/Auckland"}, + {"Newfoundland Standard Time", "America/St_Johns"}, + {"Norfolk Standard Time", "Pacific/Norfolk"}, + {"North Asia East Standard Time", "Asia/Irkutsk"}, + {"North Asia Standard Time", "Asia/Krasnoyarsk"}, + {"North Korea Standard Time", "Asia/Pyongyang"}, + {"Omsk Standard Time", "Asia/Omsk"}, + {"Pacific SA Standard Time", "America/Santiago"}, + {"Pacific Standard Time", "America/Los_Angeles"}, + {"Pacific Standard Time (Mexico)", "America/Tijuana"}, + {"Pakistan Standard Time", "Asia/Karachi"}, + {"Paraguay Standard Time", "America/Asuncion"}, + {"Qyzylorda Standard Time", "Asia/Qyzylorda"}, + {"Romance Standard Time", "Europe/Paris"}, + {"Russia Time Zone 10", "Asia/Srednekolymsk"}, + {"Russia Time Zone 11", "Asia/Kamchatka"}, + {"Russia Time Zone 3", "Europe/Samara"}, + {"Russian Standard Time", "Europe/Moscow"}, + {"SA Eastern Standard Time", "America/Cayenne"}, + {"SA Pacific Standard Time", "America/Bogota"}, + {"SA Western Standard Time", "America/La_Paz"}, + {"SE Asia Standard Time", "Asia/Bangkok"}, + {"Saint Pierre Standard Time", "America/Miquelon"}, + {"Sakhalin Standard Time", "Asia/Sakhalin"}, + {"Samoa Standard Time", "Pacific/Apia"}, + {"Sao Tome Standard Time", "Africa/Sao_Tome"}, + {"Saratov Standard Time", "Europe/Saratov"}, + {"Singapore Standard Time", "Asia/Singapore"}, + {"South Africa Standard Time", "Africa/Johannesburg"}, + {"South Sudan Standard Time", "Africa/Juba"}, + {"Sri Lanka Standard Time", "Asia/Colombo"}, + {"Sudan Standard Time", "Africa/Khartoum"}, + {"Syria Standard Time", "Asia/Damascus"}, + {"Taipei Standard Time", "Asia/Taipei"}, + {"Tasmania Standard Time", "Australia/Hobart"}, + {"Tocantins Standard Time", "America/Araguaina"}, + {"Tokyo Standard Time", "Asia/Tokyo"}, + {"Tomsk Standard Time", "Asia/Tomsk"}, + {"Tonga Standard Time", "Pacific/Tongatapu"}, + {"Transbaikal Standard Time", "Asia/Chita"}, + {"Turkey Standard Time", "Europe/Istanbul"}, + {"Turks And Caicos Standard Time", "America/Grand_Turk"}, + {"US Eastern Standard Time", "America/Indianapolis"}, + {"US Mountain Standard Time", "America/Phoenix"}, + {"UTC", "Etc/UTC"}, + {"UTC+12", "Etc/GMT-12"}, + {"UTC+13", "Etc/GMT-13"}, + {"UTC-02", "Etc/GMT+2"}, + {"UTC-08", "Etc/GMT+8"}, + {"UTC-09", "Etc/GMT+9"}, + {"UTC-11", "Etc/GMT+11"}, + {"Ulaanbaatar Standard Time", "Asia/Ulaanbaatar"}, + {"Venezuela Standard Time", "America/Caracas"}, + {"Vladivostok Standard Time", "Asia/Vladivostok"}, + {"Volgograd Standard Time", "Europe/Volgograd"}, + {"W. Australia Standard Time", "Australia/Perth"}, + {"W. Central Africa Standard Time", "Africa/Lagos"}, + {"W. Europe Standard Time", "Europe/Berlin"}, + {"W. Mongolia Standard Time", "Asia/Hovd"}, + {"West Asia Standard Time", "Asia/Tashkent"}, + {"West Bank Standard Time", "Asia/Hebron"}, + {"West Pacific Standard Time", "Pacific/Port_Moresby"}, + {"Yakutsk Standard Time", "Asia/Yakutsk"}, + {"Yukon Standard Time", "America/Whitehorse"}}; +char *tz_win[554][2]={{"Asia/Shanghai", "China Standard Time"}, +{"Africa/Abidjan", "Greenwich Standard Time"}, +{"Africa/Accra", "Greenwich Standard Time"}, +{"Africa/Addis_Ababa", "E. Africa Standard Time"}, +{"Africa/Algiers", "W. Central Africa Standard Time"}, +{"Africa/Asmera", "E. Africa Standard Time"}, +{"Africa/Bamako", "Greenwich Standard Time"}, +{"Africa/Bangui", "W. Central Africa Standard Time"}, +{"Africa/Banjul", "Greenwich Standard Time"}, +{"Africa/Bissau", "Greenwich Standard Time"}, +{"Africa/Blantyre", "South Africa Standard Time"}, +{"Africa/Brazzaville", "W. Central Africa Standard Time"}, +{"Africa/Bujumbura", "South Africa Standard Time"}, +{"Africa/Cairo", "Egypt Standard Time"}, +{"Africa/Casablanca", "Morocco Standard Time"}, +{"Africa/Ceuta", "Romance Standard Time"}, +{"Africa/Conakry", "Greenwich Standard Time"}, +{"Africa/Dakar", "Greenwich Standard Time"}, +{"Africa/Dar_es_Salaam", "E. Africa Standard Time"}, +{"Africa/Djibouti", "E. Africa Standard Time"}, +{"Africa/Douala", "W. Central Africa Standard Time"}, +{"Africa/El_Aaiun", "Morocco Standard Time"}, +{"Africa/Freetown", "Greenwich Standard Time"}, +{"Africa/Gaborone", "South Africa Standard Time"}, +{"Africa/Harare", "South Africa Standard Time"}, +{"Africa/Johannesburg", "South Africa Standard Time"}, +{"Africa/Juba", "South Sudan Standard Time"}, +{"Africa/Kampala", "E. Africa Standard Time"}, +{"Africa/Khartoum", "Sudan Standard Time"}, +{"Africa/Kigali", "South Africa Standard Time"}, +{"Africa/Kinshasa", "W. Central Africa Standard Time"}, +{"Africa/Lagos", "W. Central Africa Standard Time"}, +{"Africa/Libreville", "W. Central Africa Standard Time"}, +{"Africa/Lome", "Greenwich Standard Time"}, +{"Africa/Luanda", "W. Central Africa Standard Time"}, +{"Africa/Lubumbashi", "South Africa Standard Time"}, +{"Africa/Lusaka", "South Africa Standard Time"}, +{"Africa/Malabo", "W. Central Africa Standard Time"}, +{"Africa/Maputo", "South Africa Standard Time"}, +{"Africa/Maseru", "South Africa Standard Time"}, +{"Africa/Mbabane", "South Africa Standard Time"}, +{"Africa/Mogadishu", "E. Africa Standard Time"}, +{"Africa/Monrovia", "Greenwich Standard Time"}, +{"Africa/Nairobi", "E. Africa Standard Time"}, +{"Africa/Ndjamena", "W. Central Africa Standard Time"}, +{"Africa/Niamey", "W. Central Africa Standard Time"}, +{"Africa/Nouakchott", "Greenwich Standard Time"}, +{"Africa/Ouagadougou", "Greenwich Standard Time"}, +{"Africa/Porto-Novo", "W. Central Africa Standard Time"}, +{"Africa/Sao_Tome", "Sao Tome Standard Time"}, +{"Africa/Timbuktu", "Greenwich Standard Time"}, +{"Africa/Tripoli", "Libya Standard Time"}, +{"Africa/Tunis", "W. Central Africa Standard Time"}, +{"Africa/Windhoek", "Namibia Standard Time"}, +{"America/Adak", "Aleutian Standard Time"}, +{"America/Anchorage", "Alaskan Standard Time"}, +{"America/Anguilla", "SA Western Standard Time"}, +{"America/Antigua", "SA Western Standard Time"}, +{"America/Araguaina", "Tocantins Standard Time"}, +{"America/Argentina/La_Rioja", "Argentina Standard Time"}, +{"America/Argentina/Rio_Gallegos", "Argentina Standard Time"}, +{"America/Argentina/Salta", "Argentina Standard Time"}, +{"America/Argentina/San_Juan", "Argentina Standard Time"}, +{"America/Argentina/San_Luis", "Argentina Standard Time"}, +{"America/Argentina/Tucuman", "Argentina Standard Time"}, +{"America/Argentina/Ushuaia", "Argentina Standard Time"}, +{"America/Aruba", "SA Western Standard Time"}, +{"America/Asuncion", "Paraguay Standard Time"}, +{"America/Atka", "Aleutian Standard Time"}, +{"America/Bahia", "Bahia Standard Time"}, +{"America/Bahia_Banderas", "Central Standard Time (Mexico)"}, +{"America/Barbados", "SA Western Standard Time"}, +{"America/Belem", "SA Eastern Standard Time"}, +{"America/Belize", "Central America Standard Time"}, +{"America/Blanc-Sablon", "SA Western Standard Time"}, +{"America/Boa_Vista", "SA Western Standard Time"}, +{"America/Bogota", "SA Pacific Standard Time"}, +{"America/Boise", "Mountain Standard Time"}, +{"America/Buenos_Aires", "Argentina Standard Time"}, +{"America/Cambridge_Bay", "Mountain Standard Time"}, +{"America/Campo_Grande", "Central Brazilian Standard Time"}, +{"America/Cancun", "Eastern Standard Time (Mexico)"}, +{"America/Caracas", "Venezuela Standard Time"}, +{"America/Catamarca", "Argentina Standard Time"}, +{"America/Cayenne", "SA Eastern Standard Time"}, +{"America/Cayman", "SA Pacific Standard Time"}, +{"America/Chicago", "Central Standard Time"}, +{"America/Chihuahua", "Mountain Standard Time (Mexico)"}, +{"America/Coral_Harbour", "SA Pacific Standard Time"}, +{"America/Cordoba", "Argentina Standard Time"}, +{"America/Costa_Rica", "Central America Standard Time"}, +{"America/Creston", "US Mountain Standard Time"}, +{"America/Cuiaba", "Central Brazilian Standard Time"}, +{"America/Curacao", "SA Western Standard Time"}, +{"America/Danmarkshavn", "Greenwich Standard Time"}, +{"America/Dawson", "Yukon Standard Time"}, +{"America/Dawson_Creek", "US Mountain Standard Time"}, +{"America/Denver", "Mountain Standard Time"}, +{"America/Detroit", "Eastern Standard Time"}, +{"America/Dominica", "SA Western Standard Time"}, +{"America/Edmonton", "Mountain Standard Time"}, +{"America/Eirunepe", "SA Pacific Standard Time"}, +{"America/El_Salvador", "Central America Standard Time"}, +{"America/Ensenada", "Pacific Standard Time (Mexico)"}, +{"America/Fort_Nelson", "US Mountain Standard Time"}, +{"America/Fortaleza", "SA Eastern Standard Time"}, +{"America/Glace_Bay", "Atlantic Standard Time"}, +{"America/Godthab", "Greenland Standard Time"}, +{"America/Goose_Bay", "Atlantic Standard Time"}, +{"America/Grand_Turk", "Turks And Caicos Standard Time"}, +{"America/Grenada", "SA Western Standard Time"}, +{"America/Guadeloupe", "SA Western Standard Time"}, +{"America/Guatemala", "Central America Standard Time"}, +{"America/Guayaquil", "SA Pacific Standard Time"}, +{"America/Guyana", "SA Western Standard Time"}, +{"America/Halifax", "Atlantic Standard Time"}, +{"America/Havana", "Cuba Standard Time"}, +{"America/Hermosillo", "US Mountain Standard Time"}, +{"America/Indiana/Knox", "Central Standard Time"}, +{"America/Indiana/Marengo", "US Eastern Standard Time"}, +{"America/Indiana/Petersburg", "Eastern Standard Time"}, +{"America/Indiana/Tell_City", "Central Standard Time"}, +{"America/Indiana/Vevay", "US Eastern Standard Time"}, +{"America/Indiana/Vincennes", "Eastern Standard Time"}, +{"America/Indiana/Winamac", "Eastern Standard Time"}, +{"America/Indianapolis", "US Eastern Standard Time"}, +{"America/Inuvik", "Mountain Standard Time"}, +{"America/Iqaluit", "Eastern Standard Time"}, +{"America/Jamaica", "SA Pacific Standard Time"}, +{"America/Jujuy", "Argentina Standard Time"}, +{"America/Juneau", "Alaskan Standard Time"}, +{"America/Kentucky/Monticello", "Eastern Standard Time"}, +{"America/Knox_IN", "Central Standard Time"}, +{"America/Kralendijk", "SA Western Standard Time"}, +{"America/La_Paz", "SA Western Standard Time"}, +{"America/Lima", "SA Pacific Standard Time"}, +{"America/Los_Angeles", "Pacific Standard Time"}, +{"America/Louisville", "Eastern Standard Time"}, +{"America/Lower_Princes", "SA Western Standard Time"}, +{"America/Maceio", "SA Eastern Standard Time"}, +{"America/Managua", "Central America Standard Time"}, +{"America/Manaus", "SA Western Standard Time"}, +{"America/Marigot", "SA Western Standard Time"}, +{"America/Martinique", "SA Western Standard Time"}, +{"America/Matamoros", "Central Standard Time"}, +{"America/Mazatlan", "Mountain Standard Time (Mexico)"}, +{"America/Mendoza", "Argentina Standard Time"}, +{"America/Menominee", "Central Standard Time"}, +{"America/Merida", "Central Standard Time (Mexico)"}, +{"America/Metlakatla", "Alaskan Standard Time"}, +{"America/Mexico_City", "Central Standard Time (Mexico)"}, +{"America/Miquelon", "Saint Pierre Standard Time"}, +{"America/Moncton", "Atlantic Standard Time"}, +{"America/Monterrey", "Central Standard Time (Mexico)"}, +{"America/Montevideo", "Montevideo Standard Time"}, +{"America/Montreal", "Eastern Standard Time"}, +{"America/Montserrat", "SA Western Standard Time"}, +{"America/Nassau", "Eastern Standard Time"}, +{"America/New_York", "Eastern Standard Time"}, +{"America/Nipigon", "Eastern Standard Time"}, +{"America/Nome", "Alaskan Standard Time"}, +{"America/Noronha", "UTC-02"}, +{"America/North_Dakota/Beulah", "Central Standard Time"}, +{"America/North_Dakota/Center", "Central Standard Time"}, +{"America/North_Dakota/New_Salem", "Central Standard Time"}, +{"America/Ojinaga", "Mountain Standard Time"}, +{"America/Panama", "SA Pacific Standard Time"}, +{"America/Pangnirtung", "Eastern Standard Time"}, +{"America/Paramaribo", "SA Eastern Standard Time"}, +{"America/Phoenix", "US Mountain Standard Time"}, +{"America/Port-au-Prince", "Haiti Standard Time"}, +{"America/Port_of_Spain", "SA Western Standard Time"}, +{"America/Porto_Acre", "SA Pacific Standard Time"}, +{"America/Porto_Velho", "SA Western Standard Time"}, +{"America/Puerto_Rico", "SA Western Standard Time"}, +{"America/Punta_Arenas", "Magallanes Standard Time"}, +{"America/Rainy_River", "Central Standard Time"}, +{"America/Rankin_Inlet", "Central Standard Time"}, +{"America/Recife", "SA Eastern Standard Time"}, +{"America/Regina", "Canada Central Standard Time"}, +{"America/Resolute", "Central Standard Time"}, +{"America/Rio_Branco", "SA Pacific Standard Time"}, +{"America/Santa_Isabel", "Pacific Standard Time (Mexico)"}, +{"America/Santarem", "SA Eastern Standard Time"}, +{"America/Santiago", "Pacific SA Standard Time"}, +{"America/Santo_Domingo", "SA Western Standard Time"}, +{"America/Sao_Paulo", "E. South America Standard Time"}, +{"America/Scoresbysund", "Azores Standard Time"}, +{"America/Shiprock", "Mountain Standard Time"}, +{"America/Sitka", "Alaskan Standard Time"}, +{"America/St_Barthelemy", "SA Western Standard Time"}, +{"America/St_Johns", "Newfoundland Standard Time"}, +{"America/St_Kitts", "SA Western Standard Time"}, +{"America/St_Lucia", "SA Western Standard Time"}, +{"America/St_Thomas", "SA Western Standard Time"}, +{"America/St_Vincent", "SA Western Standard Time"}, +{"America/Swift_Current", "Canada Central Standard Time"}, +{"America/Tegucigalpa", "Central America Standard Time"}, +{"America/Thule", "Atlantic Standard Time"}, +{"America/Thunder_Bay", "Eastern Standard Time"}, +{"America/Tijuana", "Pacific Standard Time (Mexico)"}, +{"America/Toronto", "Eastern Standard Time"}, +{"America/Tortola", "SA Western Standard Time"}, +{"America/Vancouver", "Pacific Standard Time"}, +{"America/Virgin", "SA Western Standard Time"}, +{"America/Whitehorse", "Yukon Standard Time"}, +{"America/Winnipeg", "Central Standard Time"}, +{"America/Yakutat", "Alaskan Standard Time"}, +{"America/Yellowknife", "Mountain Standard Time"}, +{"Antarctica/Casey", "Central Pacific Standard Time"}, +{"Antarctica/Davis", "SE Asia Standard Time"}, +{"Antarctica/DumontDUrville", "West Pacific Standard Time"}, +{"Antarctica/Macquarie", "Tasmania Standard Time"}, +{"Antarctica/Mawson", "West Asia Standard Time"}, +{"Antarctica/McMurdo", "New Zealand Standard Time"}, +{"Antarctica/Palmer", "SA Eastern Standard Time"}, +{"Antarctica/Rothera", "SA Eastern Standard Time"}, +{"Antarctica/South_Pole", "New Zealand Standard Time"}, +{"Antarctica/Syowa", "E. Africa Standard Time"}, +{"Antarctica/Vostok", "Central Asia Standard Time"}, +{"Arctic/Longyearbyen", "W. Europe Standard Time"}, +{"Asia/Aden", "Arab Standard Time"}, +{"Asia/Almaty", "Central Asia Standard Time"}, +{"Asia/Amman", "Jordan Standard Time"}, +{"Asia/Anadyr", "Russia Time Zone 11"}, +{"Asia/Aqtau", "West Asia Standard Time"}, +{"Asia/Aqtobe", "West Asia Standard Time"}, +{"Asia/Ashgabat", "West Asia Standard Time"}, +{"Asia/Ashkhabad", "West Asia Standard Time"}, +{"Asia/Atyrau", "West Asia Standard Time"}, +{"Asia/Baghdad", "Arabic Standard Time"}, +{"Asia/Bahrain", "Arab Standard Time"}, +{"Asia/Baku", "Azerbaijan Standard Time"}, +{"Asia/Bangkok", "SE Asia Standard Time"}, +{"Asia/Barnaul", "Altai Standard Time"}, +{"Asia/Beirut", "Middle East Standard Time"}, +{"Asia/Bishkek", "Central Asia Standard Time"}, +{"Asia/Brunei", "Singapore Standard Time"}, +{"Asia/Calcutta", "India Standard Time"}, +{"Asia/Chita", "Transbaikal Standard Time"}, +{"Asia/Choibalsan", "Ulaanbaatar Standard Time"}, +{"Asia/Chongqing", "China Standard Time"}, +{"Asia/Chungking", "China Standard Time"}, +{"Asia/Colombo", "Sri Lanka Standard Time"}, +{"Asia/Dacca", "Bangladesh Standard Time"}, +{"Asia/Damascus", "Syria Standard Time"}, +{"Asia/Dhaka", "Bangladesh Standard Time"}, +{"Asia/Dili", "Tokyo Standard Time"}, +{"Asia/Dubai", "Arabian Standard Time"}, +{"Asia/Dushanbe", "West Asia Standard Time"}, +{"Asia/Famagusta", "GTB Standard Time"}, +{"Asia/Gaza", "West Bank Standard Time"}, +{"Asia/Harbin", "China Standard Time"}, +{"Asia/Hebron", "West Bank Standard Time"}, +{"Asia/Hong_Kong", "China Standard Time"}, +{"Asia/Hovd", "W. Mongolia Standard Time"}, +{"Asia/Irkutsk", "North Asia East Standard Time"}, +{"Asia/Jakarta", "SE Asia Standard Time"}, +{"Asia/Jayapura", "Tokyo Standard Time"}, +{"Asia/Jerusalem", "Israel Standard Time"}, +{"Asia/Kabul", "Afghanistan Standard Time"}, +{"Asia/Kamchatka", "Russia Time Zone 11"}, +{"Asia/Karachi", "Pakistan Standard Time"}, +{"Asia/Kashgar", "Central Asia Standard Time"}, +{"Asia/Katmandu", "Nepal Standard Time"}, +{"Asia/Khandyga", "Yakutsk Standard Time"}, +{"Asia/Krasnoyarsk", "North Asia Standard Time"}, +{"Asia/Kuala_Lumpur", "Singapore Standard Time"}, +{"Asia/Kuching", "Singapore Standard Time"}, +{"Asia/Kuwait", "Arab Standard Time"}, +{"Asia/Macao", "China Standard Time"}, +{"Asia/Macau", "China Standard Time"}, +{"Asia/Magadan", "Magadan Standard Time"}, +{"Asia/Makassar", "Singapore Standard Time"}, +{"Asia/Manila", "Singapore Standard Time"}, +{"Asia/Muscat", "Arabian Standard Time"}, +{"Asia/Nicosia", "GTB Standard Time"}, +{"Asia/Novokuznetsk", "North Asia Standard Time"}, +{"Asia/Novosibirsk", "N. Central Asia Standard Time"}, +{"Asia/Omsk", "Omsk Standard Time"}, +{"Asia/Oral", "West Asia Standard Time"}, +{"Asia/Phnom_Penh", "SE Asia Standard Time"}, +{"Asia/Pontianak", "SE Asia Standard Time"}, +{"Asia/Pyongyang", "North Korea Standard Time"}, +{"Asia/Qatar", "Arab Standard Time"}, +{"Asia/Qostanay", "Central Asia Standard Time"}, +{"Asia/Qyzylorda", "Qyzylorda Standard Time"}, +{"Asia/Rangoon", "Myanmar Standard Time"}, +{"Asia/Riyadh", "Arab Standard Time"}, +{"Asia/Saigon", "SE Asia Standard Time"}, +{"Asia/Sakhalin", "Sakhalin Standard Time"}, +{"Asia/Samarkand", "West Asia Standard Time"}, +{"Asia/Seoul", "Korea Standard Time"}, +{"Asia/Singapore", "Singapore Standard Time"}, +{"Asia/Srednekolymsk", "Russia Time Zone 10"}, +{"Asia/Taipei", "Taipei Standard Time"}, +{"Asia/Tashkent", "West Asia Standard Time"}, +{"Asia/Tbilisi", "Georgian Standard Time"}, +{"Asia/Tehran", "Iran Standard Time"}, +{"Asia/Tel_Aviv", "Israel Standard Time"}, +{"Asia/Thimbu", "Bangladesh Standard Time"}, +{"Asia/Thimphu", "Bangladesh Standard Time"}, +{"Asia/Tokyo", "Tokyo Standard Time"}, +{"Asia/Tomsk", "Tomsk Standard Time"}, +{"Asia/Ujung_Pandang", "Singapore Standard Time"}, +{"Asia/Ulaanbaatar", "Ulaanbaatar Standard Time"}, +{"Asia/Ulan_Bator", "Ulaanbaatar Standard Time"}, +{"Asia/Urumqi", "Central Asia Standard Time"}, +{"Asia/Ust-Nera", "Vladivostok Standard Time"}, +{"Asia/Vientiane", "SE Asia Standard Time"}, +{"Asia/Vladivostok", "Vladivostok Standard Time"}, +{"Asia/Yakutsk", "Yakutsk Standard Time"}, +{"Asia/Yekaterinburg", "Ekaterinburg Standard Time"}, +{"Asia/Yerevan", "Caucasus Standard Time"}, +{"Atlantic/Azores", "Azores Standard Time"}, +{"Atlantic/Bermuda", "Atlantic Standard Time"}, +{"Atlantic/Canary", "GMT Standard Time"}, +{"Atlantic/Cape_Verde", "Cape Verde Standard Time"}, +{"Atlantic/Faeroe", "GMT Standard Time"}, +{"Atlantic/Jan_Mayen", "W. Europe Standard Time"}, +{"Atlantic/Madeira", "GMT Standard Time"}, +{"Atlantic/Reykjavik", "Greenwich Standard Time"}, +{"Atlantic/South_Georgia", "UTC-02"}, +{"Atlantic/St_Helena", "Greenwich Standard Time"}, +{"Atlantic/Stanley", "SA Eastern Standard Time"}, +{"Australia/ACT", "AUS Eastern Standard Time"}, +{"Australia/Adelaide", "Cen. Australia Standard Time"}, +{"Australia/Brisbane", "E. Australia Standard Time"}, +{"Australia/Broken_Hill", "Cen. Australia Standard Time"}, +{"Australia/Canberra", "AUS Eastern Standard Time"}, +{"Australia/Currie", "Tasmania Standard Time"}, +{"Australia/Darwin", "AUS Central Standard Time"}, +{"Australia/Eucla", "Aus Central W. Standard Time"}, +{"Australia/Hobart", "Tasmania Standard Time"}, +{"Australia/LHI", "Lord Howe Standard Time"}, +{"Australia/Lindeman", "E. Australia Standard Time"}, +{"Australia/Lord_Howe", "Lord Howe Standard Time"}, +{"Australia/Melbourne", "AUS Eastern Standard Time"}, +{"Australia/NSW", "AUS Eastern Standard Time"}, +{"Australia/North", "AUS Central Standard Time"}, +{"Australia/Perth", "W. Australia Standard Time"}, +{"Australia/Queensland", "E. Australia Standard Time"}, +{"Australia/South", "Cen. Australia Standard Time"}, +{"Australia/Sydney", "AUS Eastern Standard Time"}, +{"Australia/Tasmania", "Tasmania Standard Time"}, +{"Australia/Victoria", "AUS Eastern Standard Time"}, +{"Australia/West", "W. Australia Standard Time"}, +{"Australia/Yancowinna", "Cen. Australia Standard Time"}, +{"Brazil/Acre", "SA Pacific Standard Time"}, +{"Brazil/DeNoronha", "UTC-02"}, +{"Brazil/East", "E. South America Standard Time"}, +{"Brazil/West", "SA Western Standard Time"}, +{"CST6CDT", "Central Standard Time"}, +{"Canada/Atlantic", "Atlantic Standard Time"}, +{"Canada/Central", "Central Standard Time"}, +{"Canada/Eastern", "Eastern Standard Time"}, +{"Canada/Mountain", "Mountain Standard Time"}, +{"Canada/Newfoundland", "Newfoundland Standard Time"}, +{"Canada/Pacific", "Pacific Standard Time"}, +{"Canada/Saskatchewan", "Canada Central Standard Time"}, +{"Canada/Yukon", "Yukon Standard Time"}, +{"Chile/Continental", "Pacific SA Standard Time"}, +{"Chile/EasterIsland", "Easter Island Standard Time"}, +{"Cuba", "Cuba Standard Time"}, +{"EST5EDT", "Eastern Standard Time"}, +{"Egypt", "Egypt Standard Time"}, +{"Eire", "GMT Standard Time"}, +{"Etc/GMT", "UTC"}, +{"Etc/GMT+1", "Cape Verde Standard Time"}, +{"Etc/GMT+10", "Hawaiian Standard Time"}, +{"Etc/GMT+11", "UTC-11"}, +{"Etc/GMT+12", "Dateline Standard Time"}, +{"Etc/GMT+2", "UTC-02"}, +{"Etc/GMT+3", "SA Eastern Standard Time"}, +{"Etc/GMT+4", "SA Western Standard Time"}, +{"Etc/GMT+5", "SA Pacific Standard Time"}, +{"Etc/GMT+6", "Central America Standard Time"}, +{"Etc/GMT+7", "US Mountain Standard Time"}, +{"Etc/GMT+8", "UTC-08"}, +{"Etc/GMT+9", "UTC-09"}, +{"Etc/GMT-1", "W. Central Africa Standard Time"}, +{"Etc/GMT-10", "West Pacific Standard Time"}, +{"Etc/GMT-11", "Central Pacific Standard Time"}, +{"Etc/GMT-12", "UTC+12"}, +{"Etc/GMT-13", "UTC+13"}, +{"Etc/GMT-14", "Line Islands Standard Time"}, +{"Etc/GMT-2", "South Africa Standard Time"}, +{"Etc/GMT-3", "E. Africa Standard Time"}, +{"Etc/GMT-4", "Arabian Standard Time"}, +{"Etc/GMT-5", "West Asia Standard Time"}, +{"Etc/GMT-6", "Central Asia Standard Time"}, +{"Etc/GMT-7", "SE Asia Standard Time"}, +{"Etc/GMT-8", "Singapore Standard Time"}, +{"Etc/GMT-9", "Tokyo Standard Time"}, +{"Etc/UCT", "UTC"}, +{"Etc/UTC", "UTC"}, +{"Europe/Amsterdam", "W. Europe Standard Time"}, +{"Europe/Andorra", "W. Europe Standard Time"}, +{"Europe/Astrakhan", "Astrakhan Standard Time"}, +{"Europe/Athens", "GTB Standard Time"}, +{"Europe/Belfast", "GMT Standard Time"}, +{"Europe/Belgrade", "Central Europe Standard Time"}, +{"Europe/Berlin", "W. Europe Standard Time"}, +{"Europe/Bratislava", "Central Europe Standard Time"}, +{"Europe/Brussels", "Romance Standard Time"}, +{"Europe/Bucharest", "GTB Standard Time"}, +{"Europe/Budapest", "Central Europe Standard Time"}, +{"Europe/Busingen", "W. Europe Standard Time"}, +{"Europe/Chisinau", "E. Europe Standard Time"}, +{"Europe/Copenhagen", "Romance Standard Time"}, +{"Europe/Dublin", "GMT Standard Time"}, +{"Europe/Gibraltar", "W. Europe Standard Time"}, +{"Europe/Guernsey", "GMT Standard Time"}, +{"Europe/Helsinki", "FLE Standard Time"}, +{"Europe/Isle_of_Man", "GMT Standard Time"}, +{"Europe/Istanbul", "Turkey Standard Time"}, +{"Europe/Jersey", "GMT Standard Time"}, +{"Europe/Kaliningrad", "Kaliningrad Standard Time"}, +{"Europe/Kiev", "FLE Standard Time"}, +{"Europe/Kirov", "Russian Standard Time"}, +{"Europe/Lisbon", "GMT Standard Time"}, +{"Europe/Ljubljana", "Central Europe Standard Time"}, +{"Europe/London", "GMT Standard Time"}, +{"Europe/Luxembourg", "W. Europe Standard Time"}, +{"Europe/Madrid", "Romance Standard Time"}, +{"Europe/Malta", "W. Europe Standard Time"}, +{"Europe/Mariehamn", "FLE Standard Time"}, +{"Europe/Minsk", "Belarus Standard Time"}, +{"Europe/Monaco", "W. Europe Standard Time"}, +{"Europe/Moscow", "Russian Standard Time"}, +{"Europe/Oslo", "W. Europe Standard Time"}, +{"Europe/Paris", "Romance Standard Time"}, +{"Europe/Podgorica", "Central Europe Standard Time"}, +{"Europe/Prague", "Central Europe Standard Time"}, +{"Europe/Riga", "FLE Standard Time"}, +{"Europe/Rome", "W. Europe Standard Time"}, +{"Europe/Samara", "Russia Time Zone 3"}, +{"Europe/San_Marino", "W. Europe Standard Time"}, +{"Europe/Sarajevo", "Central European Standard Time"}, +{"Europe/Saratov", "Saratov Standard Time"}, +{"Europe/Simferopol", "Russian Standard Time"}, +{"Europe/Skopje", "Central European Standard Time"}, +{"Europe/Sofia", "FLE Standard Time"}, +{"Europe/Stockholm", "W. Europe Standard Time"}, +{"Europe/Tallinn", "FLE Standard Time"}, +{"Europe/Tirane", "Central Europe Standard Time"}, +{"Europe/Tiraspol", "E. Europe Standard Time"}, +{"Europe/Ulyanovsk", "Astrakhan Standard Time"}, +{"Europe/Uzhgorod", "FLE Standard Time"}, +{"Europe/Vaduz", "W. Europe Standard Time"}, +{"Europe/Vatican", "W. Europe Standard Time"}, +{"Europe/Vienna", "W. Europe Standard Time"}, +{"Europe/Vilnius", "FLE Standard Time"}, +{"Europe/Volgograd", "Volgograd Standard Time"}, +{"Europe/Warsaw", "Central European Standard Time"}, +{"Europe/Zagreb", "Central European Standard Time"}, +{"Europe/Zaporozhye", "FLE Standard Time"}, +{"Europe/Zurich", "W. Europe Standard Time"}, +{"GB", "GMT Standard Time"}, +{"GB-Eire", "GMT Standard Time"}, +{"GMT+0", "UTC"}, +{"GMT-0", "UTC"}, +{"GMT0", "UTC"}, +{"Greenwich", "UTC"}, +{"Hongkong", "China Standard Time"}, +{"Iceland", "Greenwich Standard Time"}, +{"Indian/Antananarivo", "E. Africa Standard Time"}, +{"Indian/Chagos", "Central Asia Standard Time"}, +{"Indian/Christmas", "SE Asia Standard Time"}, +{"Indian/Cocos", "Myanmar Standard Time"}, +{"Indian/Comoro", "E. Africa Standard Time"}, +{"Indian/Kerguelen", "West Asia Standard Time"}, +{"Indian/Mahe", "Mauritius Standard Time"}, +{"Indian/Maldives", "West Asia Standard Time"}, +{"Indian/Mauritius", "Mauritius Standard Time"}, +{"Indian/Mayotte", "E. Africa Standard Time"}, +{"Indian/Reunion", "Mauritius Standard Time"}, +{"Iran", "Iran Standard Time"}, +{"Israel", "Israel Standard Time"}, +{"Jamaica", "SA Pacific Standard Time"}, +{"Japan", "Tokyo Standard Time"}, +{"Kwajalein", "UTC+12"}, +{"Libya", "Libya Standard Time"}, +{"MST7MDT", "Mountain Standard Time"}, +{"Mexico/BajaNorte", "Pacific Standard Time (Mexico)"}, +{"Mexico/BajaSur", "Mountain Standard Time (Mexico)"}, +{"Mexico/General", "Central Standard Time (Mexico)"}, +{"NZ", "New Zealand Standard Time"}, +{"NZ-CHAT", "Chatham Islands Standard Time"}, +{"Navajo", "Mountain Standard Time"}, +{"PRC", "China Standard Time"}, +{"PST8PDT", "Pacific Standard Time"}, +{"Pacific/Apia", "Samoa Standard Time"}, +{"Pacific/Auckland", "New Zealand Standard Time"}, +{"Pacific/Bougainville", "Bougainville Standard Time"}, +{"Pacific/Chatham", "Chatham Islands Standard Time"}, +{"Pacific/Easter", "Easter Island Standard Time"}, +{"Pacific/Efate", "Central Pacific Standard Time"}, +{"Pacific/Enderbury", "UTC+13"}, +{"Pacific/Fakaofo", "UTC+13"}, +{"Pacific/Fiji", "Fiji Standard Time"}, +{"Pacific/Funafuti", "UTC+12"}, +{"Pacific/Galapagos", "Central America Standard Time"}, +{"Pacific/Gambier", "UTC-09"}, +{"Pacific/Guadalcanal", "Central Pacific Standard Time"}, +{"Pacific/Guam", "West Pacific Standard Time"}, +{"Pacific/Honolulu", "Hawaiian Standard Time"}, +{"Pacific/Johnston", "Hawaiian Standard Time"}, +{"Pacific/Kiritimati", "Line Islands Standard Time"}, +{"Pacific/Kosrae", "Central Pacific Standard Time"}, +{"Pacific/Kwajalein", "UTC+12"}, +{"Pacific/Majuro", "UTC+12"}, +{"Pacific/Marquesas", "Marquesas Standard Time"}, +{"Pacific/Midway", "UTC-11"}, +{"Pacific/Nauru", "UTC+12"}, +{"Pacific/Niue", "UTC-11"}, +{"Pacific/Norfolk", "Norfolk Standard Time"}, +{"Pacific/Noumea", "Central Pacific Standard Time"}, +{"Pacific/Pago_Pago", "UTC-11"}, +{"Pacific/Palau", "Tokyo Standard Time"}, +{"Pacific/Pitcairn", "UTC-08"}, +{"Pacific/Ponape", "Central Pacific Standard Time"}, +{"Pacific/Port_Moresby", "West Pacific Standard Time"}, +{"Pacific/Rarotonga", "Hawaiian Standard Time"}, +{"Pacific/Saipan", "West Pacific Standard Time"}, +{"Pacific/Samoa", "UTC-11"}, +{"Pacific/Tahiti", "Hawaiian Standard Time"}, +{"Pacific/Tarawa", "UTC+12"}, +{"Pacific/Tongatapu", "Tonga Standard Time"}, +{"Pacific/Truk", "West Pacific Standard Time"}, +{"Pacific/Wake", "UTC+12"}, +{"Pacific/Wallis", "UTC+12"}, +{"Poland", "Central European Standard Time"}, +{"Portugal", "GMT Standard Time"}, +{"ROC", "Taipei Standard Time"}, +{"ROK", "Korea Standard Time"}, +{"Singapore", "Singapore Standard Time"}, +{"Turkey", "Turkey Standard Time"}, +{"UCT", "UTC"}, +{"US/Alaska", "Alaskan Standard Time"}, +{"US/Aleutian", "Aleutian Standard Time"}, +{"US/Arizona", "US Mountain Standard Time"}, +{"US/Central", "Central Standard Time"}, +{"US/Eastern", "Eastern Standard Time"}, +{"US/Hawaii", "Hawaiian Standard Time"}, +{"US/Indiana-Starke", "Central Standard Time"}, +{"US/Michigan", "Eastern Standard Time"}, +{"US/Mountain", "Mountain Standard Time"}, +{"US/Pacific", "Pacific Standard Time"}, +{"US/Samoa", "UTC-11"}, +{"UTC", "UTC"}, +{"Universal", "UTC"}, +{"W-SU", "Russian Standard Time"}, +{"Zulu", "UTC"}}; #elif defined(_TD_DARWIN_64) #include #include @@ -61,19 +755,33 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8 #ifdef WINDOWS char winStr[TD_LOCALE_LEN * 2]; - sprintf(winStr, "TZ=%s", buf); - putenv(winStr); - tzset(); - /* - * get CURRENT time zone. - * system current time zone is affected by daylight saving time(DST) - * - * e.g., the local time zone of London in DST is GMT+01:00, - * otherwise is GMT+00:00 - */ + memset(winStr, 0, sizeof(winStr)); + for (size_t i = 0; i < 554; i++) { + if (strcmp(tz_win[i][0],buf) == 0) { + char keyPath[100]; + char keyValue[100]; + DWORD keyValueSize = sizeof(keyValue); + sprintf(keyPath, "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Time Zones\\%s",tz_win[i][1]); + RegGetValue(HKEY_LOCAL_MACHINE, keyPath, "Display", RRF_RT_ANY, NULL, (PVOID)&keyValue, &keyValueSize); + if (keyValueSize > 0) { + keyValue[4] = (keyValue[4] == '+' ? '-' : '+'); + keyValue[10] = 0; + sprintf(winStr, "TZ=%s:00", &(keyValue[1])); + } + break; + } + } + char *p = strchr(inTimezoneStr, '+'); + if (p == NULL) p = strchr(inTimezoneStr, '-'); + if (p == NULL) { + sprintf(winStr, "TZ=UTC+00:00:00"); + } else { + sprintf(winStr, "TZ=UTC%c%c%c:%c%c:00", (p[0] == '+' ? '-' : '+'), p[1], p[2], p[3], p[4]); + } + _putenv(winStr); + _tzset(); #ifdef _MSC_VER #if _MSC_VER >= 1900 - // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019 int64_t timezone = _timezone; int32_t daylight = _daylight; char **tzname = _tzname; @@ -83,11 +791,6 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8 int32_t tz = (int32_t)((-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR); *tsTimezone = tz; tz += daylight; - /* - * format: - * (CST, +0800) - * (BST, +0100) - */ sprintf(outTimezoneStr, "%s (%s, %s%02d00)", buf, tzname[daylight], tz >= 0 ? "+" : "-", abs(tz)); *outDaylight = daylight; @@ -117,14 +820,36 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8 } void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) { -#ifdef WINDOWS - char *tz = getenv("TZ"); - if (tz == NULL || strlen(tz) == 0) { +#ifdef WINDOWS + char value[100]; + DWORD bufferSize = sizeof(value); + char *buf = getenv("TZ"); + if (buf == NULL || strlen(buf) == 0) { + RegGetValue(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Control\\TimeZoneInformation", "TimeZoneKeyName", RRF_RT_ANY, NULL, (PVOID)&value, &bufferSize); strcpy(outTimezoneStr, "not configured"); + if (bufferSize > 0) { + for (size_t i = 0; i < 139; i++) { + if (strcmp(win_tz[i][0],value) == 0) { + strcpy(outTimezoneStr, win_tz[i][1]); + break; + } + } + } } else { - strcpy(outTimezoneStr, tz); + strcpy(outTimezoneStr, buf); } - +#ifdef _MSC_VER +#if _MSC_VER >= 1900 + // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019 + int64_t timezone = _timezone; + int32_t daylight = _daylight; + char **tzname = _tzname; +#endif +#endif + int32_t tz = (int32_t)((-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR); + *tsTimezone = tz; + tz += daylight; + sprintf(outTimezoneStr, "%s (%s, %s%02d00)", outTimezoneStr, tzname[daylight], tz >= 0 ? "+" : "-", abs(tz)); #elif defined(_TD_DARWIN_64) char buf[4096] = {0}; char *tz = NULL; diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 6eb4f9310ba058f5f7f210058050d6df1eea3887..1e6aec147f491ea7cf9f93c7513b9de464bf8a7b 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -74,6 +74,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_REPEAT_INIT, "Repeat initialization TAOS_DEFINE_ERROR(TSDB_CODE_DUP_KEY, "Cannot add duplicate keys to hash") TAOS_DEFINE_ERROR(TSDB_CODE_NEED_RETRY, "Retry needed") TAOS_DEFINE_ERROR(TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE, "Out of memory in rpc queue") +TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TIMESTAMP, "Invalid timestamp format") +TAOS_DEFINE_ERROR(TSDB_CODE_MSG_DECODE_ERROR, "Msg decode error") TAOS_DEFINE_ERROR(TSDB_CODE_REF_NO_MEMORY, "Ref out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_REF_FULL, "too many Ref Objs") @@ -88,6 +90,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RPC_AUTH_FAILURE, "Authentication failur TAOS_DEFINE_ERROR(TSDB_CODE_RPC_NETWORK_UNAVAIL, "Unable to establish connection") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_FQDN_ERROR, "Unable to resolve FQDN") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_PORT_EADDRINUSE, "Port already in use") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INDIRECT_NETWORK_UNAVAIL, "Unable to establish connection") //client TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_OPERATION, "Invalid operation") @@ -185,9 +188,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_SNODE_ALREADY_EXIST, "Snode already exists" TAOS_DEFINE_ERROR(TSDB_CODE_MND_SNODE_NOT_EXIST, "Snode not there") TAOS_DEFINE_ERROR(TSDB_CODE_MND_BNODE_ALREADY_EXIST, "Bnode already exists") TAOS_DEFINE_ERROR(TSDB_CODE_MND_BNODE_NOT_EXIST, "Bnode not there") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_FEW_MNODES, "Too few mnodes") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_MNODE_DEPLOYED, "Mnode deployed in this dnode") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_CANT_DROP_MASTER, "Can't drop mnode which is master") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_FEW_MNODES, "The replicas of mnode cannot less than 1") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_MNODES, "The replicas of mnode cannot exceed 3") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_CANT_DROP_MASTER, "Can't drop mnode which is leader") // mnode-acct TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_ALREADY_EXIST, "Account already exists") @@ -242,7 +245,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_SINGLE_STB_MODE_DB, "Database is single st // mnode-infoSchema TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SYS_TABLENAME, "Invalid system table name") - // mnode-func TAOS_DEFINE_ERROR(TSDB_CODE_MND_FUNC_ALREADY_EXIST, "Func already exists") TAOS_DEFINE_ERROR(TSDB_CODE_MND_FUNC_NOT_EXIST, "Func not exists") @@ -269,7 +271,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TOPIC, "Invalid topic") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TOPIC_QUERY, "Topic with invalid query") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TOPIC_OPTION, "Topic with invalid option") TAOS_DEFINE_ERROR(TSDB_CODE_MND_CONSUMER_NOT_EXIST, "Consumer not exist") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_CONSUMER_NOT_READY, "Consumer waiting for rebalance") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_CGROUP_USED, "Consumer group being used by some consumer") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOPIC_SUBSCRIBED, "Topic subscribed cannot be dropped") TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_ALREADY_EXIST, "Stream already exists") @@ -323,9 +325,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, "Invalid table ID") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_TYPE, "Invalid table type") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION, "Invalid table schema version") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_ALREADY_EXIST, "Table already exists") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_NOT_EXIST, "Table not exists") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_ALREADY_EXIST, "Stable already exists") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_NOT_EXIST, "Stable not exists") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_NOT_EXIST, "Table not exists") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_ALREADY_EXIST, "Stable already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_NOT_EXIST, "Stable not exists") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CONFIG, "Invalid configuration") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INIT_FAILED, "Tsdb init failed") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_DISKSPACE, "No diskspace for tsdb") @@ -447,9 +449,10 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QW_MSG_ERROR, "Invalid msg order") // parser TAOS_DEFINE_ERROR(TSDB_CODE_PAR_TABLE_NOT_EXIST, "Table does not exist") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_PERMISSION_DENIED, "Permission denied") +TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error") //planner -TAOS_DEFINE_ERROR(TSDB_CODE_PLAN_INTERNAL_ERROR, "planner internal error") +TAOS_DEFINE_ERROR(TSDB_CODE_PLAN_INTERNAL_ERROR, "Planner internal error") //udf TAOS_DEFINE_ERROR(TSDB_CODE_UDF_STOPPING, "udf is stopping") @@ -467,6 +470,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_UDF_INVALID_OUTPUT_TYPE, "udf invalid output ty TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_PROTOCOL_TYPE, "Invalid line protocol type") TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_PRECISION_TYPE, "Invalid timestamp precision type") TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_DATA, "Invalid data type") +TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_DB_CONF, "Invalid schemaless db config") #ifdef TAOS_ERROR_C }; diff --git a/source/util/src/tlist.c b/source/util/src/tlist.c index 1d17b4a9e17aa7cafdd89ba273770e8751f09066..b1c018805157fe05ef6be97fa7be6df0255d5d5b 100644 --- a/source/util/src/tlist.c +++ b/source/util/src/tlist.c @@ -95,7 +95,7 @@ SListNode *tdListPopTail(SList *list) { SListNode *tdListGetHead(SList *list) { return TD_DLIST_HEAD(list); } -SListNode *tsListGetTail(SList *list) { return TD_DLIST_TAIL(list); } +SListNode *tdListGetTail(SList *list) { return TD_DLIST_TAIL(list); } SListNode *tdListPopNode(SList *list, SListNode *node) { TD_DLIST_POP(list, node); diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index e8a1ceb18b5acdf4c113aacb85f72c0f52b005cd..353e94a49096822fe581d7faa0df8a29a6494c12 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -39,7 +39,7 @@ #define LOG_BUF_MUTEX(x) ((x)->buffMutex) typedef struct { - char *buffer; + char * buffer; int32_t buffStart; int32_t buffEnd; int32_t buffSize; @@ -58,7 +58,7 @@ typedef struct { int32_t openInProgress; pid_t pid; char logName[LOG_FILE_NAME_LEN]; - SLogBuff *logHandle; + SLogBuff * logHandle; TdThreadMutex logMutex; } SLogObj; @@ -96,6 +96,7 @@ int32_t fsDebugFlag = 135; int32_t metaDebugFlag = 135; int32_t fnDebugFlag = 135; int32_t smaDebugFlag = 135; +int32_t idxDebugFlag = 135; int64_t dbgEmptyW = 0; int64_t dbgWN = 0; @@ -103,7 +104,7 @@ int64_t dbgSmallWN = 0; int64_t dbgBigWN = 0; int64_t dbgWSize = 0; -static void *taosAsyncOutputLog(void *param); +static void * taosAsyncOutputLog(void *param); static int32_t taosPushLogBuffer(SLogBuff *pLogBuf, const char *msg, int32_t msgLen); static SLogBuff *taosLogBuffNew(int32_t bufSize); static void taosCloseLogByFd(TdFilePtr pFile); @@ -490,7 +491,7 @@ void taosDumpData(unsigned char *msg, int32_t len) { if (!osLogSpaceAvailable()) return; taosUpdateLogNums(DEBUG_DUMP); - char temp[256]; + char temp[256] = {0}; int32_t i, pos = 0, c = 0; for (i = 0; i < len; ++i) { @@ -701,7 +702,7 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) { int32_t compressSize = 163840; int32_t ret = 0; int32_t len = 0; - char *data = taosMemoryMalloc(compressSize); + char * data = taosMemoryMalloc(compressSize); // gzFile dstFp = NULL; // srcFp = fopen(srcFileName, "r"); @@ -759,6 +760,7 @@ void taosSetAllDebugFlag(int32_t flag) { fsDebugFlag = flag; fnDebugFlag = flag; smaDebugFlag = flag; + idxDebugFlag = flag; uInfo("all debug flag are set to %d", flag); } diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c index 6a10794ea154306f3c26b9666482a7c3a5b61958..37935087fad693eed254549977182ccaca1085f2 100644 --- a/source/util/src/tqueue.c +++ b/source/util/src/tqueue.c @@ -26,6 +26,7 @@ typedef struct STaosQnode STaosQnode; typedef struct STaosQnode { STaosQnode *next; STaosQueue *queue; + int64_t timestamp; int32_t size; int8_t itype; int8_t reserved[3]; @@ -144,6 +145,7 @@ void *taosAllocateQitem(int32_t size, EQItype itype) { STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size); pNode->size = size; pNode->itype = itype; + pNode->timestamp = taosGetTimestampUs(); if (pNode == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -393,7 +395,7 @@ void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue) { int32_t taosGetQueueNumber(STaosQset *qset) { return qset->numOfQueues; } -int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp) { +int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp) { STaosQnode *pNode = NULL; int32_t code = 0; @@ -415,6 +417,7 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FI *ppItem = pNode->item; if (ahandle) *ahandle = queue->ahandle; if (itemFp) *itemFp = queue->itemFp; + if (ts) *ts = pNode->timestamp; queue->head = pNode->next; if (queue->head == NULL) queue->tail = NULL; diff --git a/source/util/src/tsched.c b/source/util/src/tsched.c index ee1f4185613dd85f0e60d86ebd0487b07b3ceee9..691a0d34d42ca3ab04be5daf61414016436a6bb1 100644 --- a/source/util/src/tsched.c +++ b/source/util/src/tsched.c @@ -23,19 +23,19 @@ #define DUMP_SCHEDULER_TIME_WINDOW 30000 // every 30sec, take a snap shot of task queue. typedef struct { - char label[TSDB_LABEL_LEN]; - tsem_t emptySem; - tsem_t fullSem; + char label[TSDB_LABEL_LEN]; + tsem_t emptySem; + tsem_t fullSem; TdThreadMutex queueMutex; - int32_t fullSlot; - int32_t emptySlot; - int32_t queueSize; - int32_t numOfThreads; - TdThread *qthread; - SSchedMsg *queue; - bool stop; - void *pTmrCtrl; - void *pTimer; + int32_t fullSlot; + int32_t emptySlot; + int32_t queueSize; + int32_t numOfThreads; + TdThread *qthread; + SSchedMsg *queue; + bool stop; + void *pTmrCtrl; + void *pTimer; } SSchedQueue; static void *taosProcessSchedQueue(void *param); @@ -218,7 +218,8 @@ void taosCleanUpScheduler(void *param) { taosThreadMutexDestroy(&pSched->queueMutex); if (pSched->pTimer) { - taosTmrStopA(&pSched->pTimer); + taosTmrStop(pSched->pTimer); + pSched->pTimer = NULL; } if (pSched->queue) taosMemoryFree(pSched->queue); diff --git a/source/util/src/tstrbuild.c b/source/util/src/tstrbuild.c index 2aae588046402e37569f5a2bde5ed5f72fa24346..c87b889e82ece82c251ddabad1964bc1f0b3ab2f 100644 --- a/source/util/src/tstrbuild.c +++ b/source/util/src/tstrbuild.c @@ -69,13 +69,13 @@ void taosStringBuilderAppendString(SStringBuilder* sb, const char* str) { void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendStringLen(sb, "null", 4); } void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) { - char buf[64]; + char buf[64] = {0}; size_t len = snprintf(buf, sizeof(buf), "%" PRId64, v); taosStringBuilderAppendStringLen(sb, buf, TMIN(len, sizeof(buf))); } void taosStringBuilderAppendDouble(SStringBuilder* sb, double v) { - char buf[512]; + char buf[512] = {0}; size_t len = snprintf(buf, sizeof(buf), "%.9lf", v); taosStringBuilderAppendStringLen(sb, buf, TMIN(len, sizeof(buf))); } diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c index dc48fc3f8d2b2e803e8f1593d5471184fa99e059..68f96c0385b6c25a4736343917e875f84d4e2c9e 100644 --- a/source/util/src/tworker.c +++ b/source/util/src/tworker.c @@ -75,19 +75,20 @@ static void *tQWorkerThreadFp(SQWorker *worker) { void *msg = NULL; void *ahandle = NULL; int32_t code = 0; + int64_t ts = 0; taosBlockSIGPIPE(); setThreadName(pool->name); uDebug("worker:%s:%d is running", pool->name, worker->id); while (1) { - if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ahandle, &fp) == 0) { + if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ts, &ahandle, &fp) == 0) { uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset); break; } if (fp != NULL) { - SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num}; + SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num, .timestamp = ts}; (*fp)(&info, msg); } } @@ -162,7 +163,7 @@ int32_t tWWorkerInit(SWWorkerPool *pool) { worker->pool = pool; } - uInfo("worker:%s is initialized, max:%d", pool->name, pool->max); + uDebug("worker:%s is initialized, max:%d", pool->name, pool->max); return 0; } @@ -189,7 +190,7 @@ void tWWorkerCleanup(SWWorkerPool *pool) { taosMemoryFreeClear(pool->workers); taosThreadMutexDestroy(&pool->mutex); - uInfo("worker:%s is closed", pool->name); + uDebug("worker:%s is closed", pool->name); } static void *tWWorkerThreadFp(SWWorker *worker) { diff --git a/tests/pytest/cluster/clusterSetup.py b/tests/pytest/cluster/clusterSetup.py index 87414303f850bcbd78468238e7b76aa3dbb3326e..809e0e9d25ed79246cbd4d83d39f262b0a678cd0 100644 --- a/tests/pytest/cluster/clusterSetup.py +++ b/tests/pytest/cluster/clusterSetup.py @@ -92,13 +92,13 @@ class Node: self.conn.run("yes|./install.sh") def configTaosd(self, taosConfigKey, taosConfigValue): - self.conn.run("sudo echo '%s %s' >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) + self.conn.run("sudo echo %s %s >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) def removeTaosConfig(self, taosConfigKey, taosConfigValue): self.conn.run("sudo sed -in-place -e '/%s %s/d' %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) def configHosts(self, ip, name): - self.conn.run("echo '%s %s' >> %s" % (ip, name, '/etc/hosts')) + self.conn.run("echo %s %s >> %s" % (ip, name, '/etc/hosts')) def removeData(self): try: diff --git a/tests/pytest/dockerCluster/basic.py b/tests/pytest/dockerCluster/basic.py index 871d69790d328f3dcea9fdfdac27a6abc3bb14bd..5188aa4a80a8faacfbc4056958bde2363a796449 100644 --- a/tests/pytest/dockerCluster/basic.py +++ b/tests/pytest/dockerCluster/basic.py @@ -113,7 +113,7 @@ class BuildDockerCluser: def cfg(self, option, value, nodeIndex): cfgPath = "%s/node%d/cfg/taos.cfg" % (self.dockerDir, nodeIndex) - cmd = "echo '%s %s' >> %s" % (option, value, cfgPath) + cmd = "echo %s %s >> %s" % (option, value, cfgPath) self.execCmd(cmd) def updateLocalhosts(self): @@ -122,7 +122,7 @@ class BuildDockerCluser: print(result) if result is None or result.isspace(): print("==========") - cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts" + cmd = "echo 172.27.0.7 tdnode1 >> /etc/hosts" display = "echo %s" % cmd self.execCmd(display) self.execCmd(cmd) diff --git a/tests/pytest/fulltest.bat b/tests/pytest/fulltest.bat index 5758691c8872adbfd82e9e5a5cc10c2043b23922..fd74f2ad029c982a3a3dd98ae0c8df264bab9c66 100644 --- a/tests/pytest/fulltest.bat +++ b/tests/pytest/fulltest.bat @@ -1,2 +1,22 @@ -python .\test.py -f insert\basic.py \ No newline at end of file +python .\test.py -f insert\basic.py +python .\test.py -f insert\int.py +python .\test.py -f insert\float.py +python .\test.py -f insert\bigint.py +python .\test.py -f insert\bool.py +python .\test.py -f insert\double.py +python .\test.py -f insert\smallint.py +python .\test.py -f insert\tinyint.py +python .\test.py -f insert\date.py +python .\test.py -f insert\binary.py +python .\test.py -f insert\nchar.py + +python .\test.py -f query\filter.py +python .\test.py -f query\filterCombo.py +python .\test.py -f query\queryNormal.py +python .\test.py -f query\queryError.py +python .\test.py -f query\filterAllIntTypes.py +python .\test.py -f query\filterFloatAndDouble.py +python .\test.py -f query\filterOtherTypes.py +python .\test.py -f query\querySort.py +python .\test.py -f query\queryJoin.py \ No newline at end of file diff --git a/tests/pytest/manualTest/TD-5114/rollingUpgrade.py b/tests/pytest/manualTest/TD-5114/rollingUpgrade.py index f634eb1208b69f263ea89db2440db40ec3e085e6..b2d5171972b9e5e0025c4e46e8dd1f257ed48e24 100644 --- a/tests/pytest/manualTest/TD-5114/rollingUpgrade.py +++ b/tests/pytest/manualTest/TD-5114/rollingUpgrade.py @@ -38,7 +38,7 @@ class Node: def buildTaosd(self): try: print(self.conn) - # self.conn.run('echo "1234" > /home/chr/installtest/test.log') + # self.conn.run('echo 1234 > /home/chr/installtest/test.log') self.conn.run("cd /home/chr/installtest/ && tar -xvf %s " %self.verName) self.conn.run("cd /home/chr/installtest/%s && ./install.sh " % self.installPath) except Exception as e: @@ -49,7 +49,7 @@ class Node: def rebuildTaosd(self): try: print(self.conn) - # self.conn.run('echo "1234" > /home/chr/installtest/test.log') + # self.conn.run('echo 1234 > /home/chr/installtest/test.log') self.conn.run("cd /home/chr/installtest/%s && ./install.sh " % self.installPath) except Exception as e: print("Build Taosd error for node %d " % self.index) @@ -108,7 +108,7 @@ class oneNode: # install TDengine at 192.168.103/104/141 try: node = Node(id, username, IP, passwd, version) - node.conn.run('echo "start taosd"') + node.conn.run('echo start taosd') node.buildTaosd() # clear DataPath , if need clear data node.clearData() @@ -128,7 +128,7 @@ class oneNode: # start TDengine try: node = Node(id, username, IP, passwd, version) - node.conn.run('echo "restart taosd"') + node.conn.run('echo restart taosd') # clear DataPath , if need clear data node.clearData() node.restartTaosd() @@ -149,14 +149,14 @@ class oneNode: verName = "TDengine-enterprise-server-%s-Linux-x64.tar.gz" % version # installPath = "TDengine-enterprise-server-%s" % self.version node131 = Node(131, 'ubuntu', '192.168.1.131', 'tbase125!', '2.0.20.0') - node131.conn.run('echo "upgrade cluster"') + node131.conn.run('echo upgrade cluster') node131.conn.run('sshpass -p tbase125! scp /nas/TDengine/v%s/enterprise/%s root@192.168.1.%d:/home/chr/installtest/' % (version,verName,id)) node131.conn.close() # upgrade TDengine at 192.168.103/104/141 try: node = Node(id, username, IP, passwd, version) - node.conn.run('echo "start taosd"') - node.conn.run('echo "1234" > /home/chr/test.log') + node.conn.run('echo start taosd') + node.conn.run('echo 1234 > /home/chr/test.log') node.buildTaosd() time.sleep(5) node.startTaosd() @@ -176,7 +176,7 @@ class oneNode: # backCluster TDengine at 192.168.103/104/141 try: node = Node(id, username, IP, passwd, version) - node.conn.run('echo "rollback taos"') + node.conn.run('echo rollback taos') node.rebuildTaosd() time.sleep(5) node.startTaosd() diff --git a/tests/pytest/stream/test3.py b/tests/pytest/stream/test3.py new file mode 100644 index 0000000000000000000000000000000000000000..b45521a9476961394c1cf4b2454d6fb9e2368c68 --- /dev/null +++ b/tests/pytest/stream/test3.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + #for i in range(100): + tdSql.prepare() + dbname = tdCom.getLongName(10, "letters") + tdSql.execute('create database if not exists djnhawvlgq vgroups 1') + tdSql.execute('use djnhawvlgq') + tdSql.execute('create table if not exists downsampling_stb (ts timestamp, c1 int, c2 double, c3 varchar(100), c4 bool) tags (t1 int, t2 double, t3 varchar(100), t4 bool);') + tdSql.execute('create table downsampling_ct1 using downsampling_stb tags(10, 10.1, "Beijing", True);') + tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 nchar(20), c5 nchar(20)) tags (t1 int);') + tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);') + tdSql.execute('create table if not exists data_filter_stb (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 tinyint unsigned, t11 smallint unsigned, t12 int unsigned, t13 bigint unsigned)') + tdSql.execute('create table if not exists data_filter_ct1 using data_filter_stb tags (1, 2, 3, 4, 5.5, 6.6, "binary7", "nchar8", true, 11, 12, 13, 14)') + tdSql.execute('create stream data_filter_stream into output_data_filter_stb as select * from data_filter_stb where ts >= 1653648072973+1s and c1 = 1 or c2 > 1 and c3 != 4 or c4 <= 3 and c5 <> 0 or c6 is not Null or c7 is Null or c8 between "na" and "nchar4" and c8 not between "bi" and "binary" and c8 match "nchar[19]" and c8 nmatch "nchar[25]" or c9 in (1, 2, 3) or c10 not in (6, 7) and c8 like "nch%" and c7 not like "bina_" and c11 <= 10 or c12 is Null or c13 >= 4;') + tdSql.execute('insert into data_filter_ct1 values (1653648072973, 1, 1, 1, 3, 1.1, 1.1, "binary1", "nchar1", true, 1, 2, 3, 4);') + tdSql.execute('insert into data_filter_ct1 values (1653648072973+1s, 2, 2, 1, 3, 1.1, 1.1, "binary2", "nchar2", true, 2, 3, 4, 5);') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/test-all.bat b/tests/pytest/test-all.bat index 437472f7b88ac0d470d23a72bb1905727a63d097..4e3ece9b565f5fecce55798684d98875e1ffb7cc 100644 --- a/tests/pytest/test-all.bat +++ b/tests/pytest/test-all.bat @@ -14,12 +14,14 @@ for /F "usebackq tokens=*" %%i in (fulltest.bat) do ( echo Processing %%i set /a a+=1 call %%i ARG1 -w 1 -m %1 > result_!a!.txt 2>error_!a!.txt - if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. ) + if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && goto :end ) else ( call :colorEcho 0a "Success" &echo. ) ) -exit +goto :end :colorEcho echo off "%~2" findstr /v /a:%1 /R "^$" "%~2" nul -del "%~2" > nul 2>&1i \ No newline at end of file +del "%~2" > nul 2>&1i + +:end \ No newline at end of file diff --git a/tests/pytest/test.py b/tests/pytest/test.py index 9d146462f28f77fca6a6ada08fb3972770ef855d..30ab6ae3cc14e2d36f4979f03bdc99871cfcd8fa 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -18,6 +18,7 @@ import getopt import subprocess import time from distutils.log import warn as printf +import platform from util.log import * from util.dnodes import * @@ -36,8 +37,10 @@ if __name__ == "__main__": stop = 0 restart = False windows = 0 - opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrw', [ - 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'windows']) + if platform.system().lower() == 'windows': + windows = 1 + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghr', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart']) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -64,9 +67,6 @@ if __name__ == "__main__": if key in ['-m', '--master']: masterIp = value - if key in ['-w', '--windows']: - windows = 1 - if key in ['-l', '--logSql']: if (value.upper() == "TRUE"): logSql = True @@ -146,7 +146,7 @@ if __name__ == "__main__": else: pass tdDnodes.deploy(1,{}) - tdDnodes.startWin(1) + tdDnodes.start(1) else: remote_conn = Connection("root@%s"%host) with remote_conn.cd('/var/lib/jenkins/workspace/TDinternal/community/tests/pytest'): diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 35abc4802f9de2080a6b6a166daf833c9cf04578..7b00e6f331f6053c96ce56b5a79219b6967c6ecd 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -14,23 +14,97 @@ import random import string from util.sql import tdSql - +from util.dnodes import tdDnodes +import requests +import time +import socket class TDCom: def init(self, conn, logSql): tdSql.init(conn.cursor(), logSql) - def cleanTb(self): + def preDefine(self): + header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} + sql_url = "http://127.0.0.1:6041/rest/sql" + sqlt_url = "http://127.0.0.1:6041/rest/sqlt" + sqlutc_url = "http://127.0.0.1:6041/rest/sqlutc" + influx_url = "http://127.0.0.1:6041/influxdb/v1/write" + telnet_url = "http://127.0.0.1:6041/opentsdb/v1/put/telnet" + return header, sql_url, sqlt_url, sqlutc_url, influx_url, telnet_url + + def genTcpParam(self): + MaxBytes = 1024*1024 + host ='127.0.0.1' + port = 6046 + return MaxBytes, host, port + + def tcpClient(self, input): + MaxBytes = tdCom.genTcpParam()[0] + host = tdCom.genTcpParam()[1] + port = tdCom.genTcpParam()[2] + sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + sock.connect((host, port)) + sock.send(input.encode()) + sock.close() + + def restApiPost(self, sql): + requests.post(self.preDefine()[1], sql.encode("utf-8"), headers = self.preDefine()[0]) + + def createDb(self, dbname="test", db_update_tag=0, api_type="taosc"): + if api_type == "taosc": + if db_update_tag == 0: + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} precision 'us'") + else: + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} precision 'us' update 1") + elif api_type == "restful": + if db_update_tag == 0: + self.restApiPost(f"drop database if exists {dbname}") + self.restApiPost(f"create database if not exists {dbname} precision 'us'") + else: + self.restApiPost(f"drop database if exists {dbname}") + self.restApiPost(f"create database if not exists {dbname} precision 'us' update 1") + tdSql.execute(f'use {dbname}') + + def genUrl(self, url_type, dbname, precision): + if url_type == "influxdb": + if precision is None: + url = self.preDefine()[4] + "?" + "db=" + dbname + else: + url = self.preDefine()[4] + "?" + "db=" + dbname + "&precision=" + precision + elif url_type == "telnet": + url = self.preDefine()[5] + "/" + dbname + else: + url = self.preDefine()[1] + return url + + def schemalessApiPost(self, sql, url_type="influxdb", dbname="test", precision=None): + if url_type == "influxdb": + url = self.genUrl(url_type, dbname, precision) + elif url_type == "telnet": + url = self.genUrl(url_type, dbname, precision) + res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0]) + return res + + def cleanTb(self, type="taosc"): + ''' + type is taosc or restful + ''' query_sql = "show stables" res_row_list = tdSql.query(query_sql, True) stb_list = map(lambda x: x[0], res_row_list) for stb in stb_list: - tdSql.execute(f'drop table if exists {stb}') + if type == "taosc": + tdSql.execute(f'drop table if exists `{stb}`') + if not stb[0].isdigit(): + tdSql.execute(f'drop table if exists {stb}') + elif type == "restful": + self.restApiPost(f"drop table if exists `{stb}`") + if not stb[0].isdigit(): + self.restApiPost(f"drop table if exists {stb}") - query_sql = "show tables" - res_row_list = tdSql.query(query_sql, True) - tb_list = map(lambda x: x[0], res_row_list) - for tb in tb_list: - tdSql.execute(f'drop table if exists {tb}') + def dateToTs(self, datetime_input): + return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) def getLongName(self, len, mode = "mixed"): """ @@ -47,6 +121,52 @@ class TDCom: chars = ''.join(random.choice(string.ascii_letters.lower() + string.digits) for i in range(len)) return chars + def restartTaosd(self, index=1, db_name="db"): + tdDnodes.stop(index) + tdDnodes.startWithoutSleep(index) + tdSql.execute(f"use {db_name}") + + def typeof(self, variate): + v_type=None + if type(variate) is int: + v_type = "int" + elif type(variate) is str: + v_type = "str" + elif type(variate) is float: + v_type = "float" + elif type(variate) is bool: + v_type = "bool" + elif type(variate) is list: + v_type = "list" + elif type(variate) is tuple: + v_type = "tuple" + elif type(variate) is dict: + v_type = "dict" + elif type(variate) is set: + v_type = "set" + return v_type + + def splitNumLetter(self, input_mix_str): + nums, letters = "", "" + for i in input_mix_str: + if i.isdigit(): + nums += i + elif i.isspace(): + pass + else: + letters += i + return nums, letters + + def smlPass(self, func): + smlChildTableName = "no" + def wrapper(*args): + # if tdSql.getVariable("smlChildTableName")[0].upper() == "ID": + if smlChildTableName.upper() == "ID": + return func(*args) + else: + pass + return wrapper + def close(self): self.cursor.close() diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 12e13c9b5ca3cb0e0241a8eb3db58909a6b07779..21d235ee5c3502ff3248681ada0a6f2c99a805ea 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -17,6 +17,10 @@ import os.path import platform import subprocess from time import sleep +import base64 +import json +import copy +from fabric2 import Connection from util.log import * @@ -111,6 +115,7 @@ class TDDnode: self.deployed = 0 self.testCluster = False self.valgrind = 0 + self.remoteIP = "" self.cfgDict = { "walLevel": "2", "fsync": "1000", @@ -137,8 +142,9 @@ class TDDnode: "telemetryReporting": "0" } - def init(self, path): + def init(self, path, remoteIP = ""): self.path = path + self.remoteIP = remoteIP def setTestCluster(self, value): self.testCluster = value @@ -162,6 +168,29 @@ class TDDnode: def addExtraCfg(self, option, value): self.cfgDict.update({option: value}) + def remoteExec(self, updateCfgDict, execCmd): + try: + config = eval(self.remoteIP) + remote_conn = Connection(host=config["host"], port=config["port"], user=config["user"], connect_kwargs={'password':config["password"]}) + remote_top_dir = config["path"] + except Exception as r: + remote_conn = Connection(host=self.remoteIP, port=22, user='root', connect_kwargs={'password':'123456'}) + remote_top_dir = '~/test' + valgrindStr = '' + if (self.valgrind==1): + valgrindStr = '-g' + remoteCfgDict = copy.deepcopy(updateCfgDict) + if ("logDir" in remoteCfgDict): + del remoteCfgDict["logDir"] + if ("dataDir" in remoteCfgDict): + del remoteCfgDict["dataDir"] + if ("cfgDir" in remoteCfgDict): + del remoteCfgDict["cfgDir"] + remoteCfgDictStr = base64.b64encode(json.dumps(remoteCfgDict).encode()).decode() + execCmdStr = base64.b64encode(execCmd.encode()).decode() + with remote_conn.cd((remote_top_dir+sys.path[0].replace(self.path, '')).replace('\\','/')): + remote_conn.run("python3 ./test.py %s -d %s -e %s"%(valgrindStr,remoteCfgDictStr,execCmdStr)) + def deploy(self, *updatecfgDict): self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index) self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index) @@ -229,8 +258,11 @@ class TDDnode: self.cfg(value, key) else: self.addExtraCfg(key, value) - for key, value in self.cfgDict.items(): - self.cfg(key, value) + if (self.remoteIP == ""): + for key, value in self.cfgDict.items(): + self.cfg(key, value) + else: + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)"%self.index) self.deployed = 1 tdLog.debug( @@ -247,7 +279,7 @@ class TDDnode: paths = [] for root, dirs, files in os.walk(projPath): - if ((tool) in files): + if ((tool) in files or ("%s.exe"%tool) in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): paths.append(os.path.join(root, tool)) @@ -268,117 +300,68 @@ class TDDnode: tdLog.exit("dnode:%d is not deployed" % (self.index)) if self.valgrind == 0: - cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( - binPath, self.cfgDir) + if platform.system().lower() == 'windows': + cmd = "mintty -h never -w hide %s -c %s" % ( + binPath, self.cfgDir) + else: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) else: valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir - cmd = "nohup %s %s -c %s 2>&1 & " % ( - valgrindCmdline, binPath, self.cfgDir) + if platform.system().lower() == 'windows': + cmd = "mintty -h never -w hide %s %s -c %s" % ( + valgrindCmdline, binPath, self.cfgDir) + else: + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) print(cmd) - if os.system(cmd) != 0: - tdLog.exit(cmd) - self.running = 1 - tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) - if self.valgrind == 0: - time.sleep(0.1) - key = 'from offline to online' - bkey = bytes(key, encoding="utf8") - logFile = self.logDir + "/taosdlog.0" - i = 0 - while not os.path.exists(logFile): - sleep(0.1) - i += 1 - if i > 50: - break - popen = subprocess.Popen( - 'tail -f ' + logFile, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) - pid = popen.pid - # print('Popen.pid:' + str(pid)) - timeout = time.time() + 60 * 2 - while True: - line = popen.stdout.readline().strip() - if bkey in line: - popen.kill() - break - if time.time() > timeout: - tdLog.exit('wait too long for taosd start') - tdLog.debug("the dnode:%d has been started." % (self.index)) - else: - tdLog.debug( - "wait 10 seconds for the dnode:%d to start." % - (self.index)) - time.sleep(10) - - # time.sleep(5) - def startWin(self): - binPath = self.getPath("taosd.exe") - - if (binPath == ""): - tdLog.exit("taosd.exe not found!") - else: - tdLog.info("taosd.exe found: %s" % binPath) - - taosadapterBinPath = self.getPath("taosadapter.exe") - if (taosadapterBinPath == ""): - tdLog.info("taosAdapter.exe not found!") - else: - tdLog.info("taosAdapter.exe found in %s" % taosadapterBuildPath) - - if self.deployed == 0: - tdLog.exit("dnode:%d is not deployed" % (self.index)) - - cmd = "mintty -h never %s -c %s" % ( - binPath, self.cfgDir) - - if (taosadapterBinPath != ""): - taosadapterCmd = "mintty -h never -w hide %s --monitor.writeToTD=false " % ( - taosadapterBinPath) - if os.system(taosadapterCmd) != 0: - tdLog.exit(taosadapterCmd) - - if os.system(cmd) != 0: - tdLog.exit(cmd) - - self.running = 1 - tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) - if self.valgrind == 0: - time.sleep(0.1) - key = 'from offline to online' - bkey = bytes(key, encoding="utf8") - logFile = self.logDir + "/taosdlog.0" - i = 0 - while not os.path.exists(logFile): - sleep(0.1) - i += 1 - if i > 50: - break - popen = subprocess.Popen( - 'tail -n +0 -f ' + logFile, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) - pid = popen.pid - # print('Popen.pid:' + str(pid)) - timeout = time.time() + 60 * 2 - while True: - line = popen.stdout.readline().strip() - if bkey in line: - popen.kill() - break - if time.time() > timeout: - tdLog.exit('wait too long for taosd start') - tdLog.debug("the dnode:%d has been started." % (self.index)) + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)\ntdDnodes.start(%d)"%(self.index, self.index)) + self.running = 1 else: - tdLog.debug( - "wait 10 seconds for the dnode:%d to start." % - (self.index)) - time.sleep(10) + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + print("dnode:%d is running with %s " % (self.index, cmd)) + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + if self.valgrind == 0: + time.sleep(0.1) + key = 'from offline to online' + bkey = bytes(key, encoding="utf8") + logFile = self.logDir + "/taosdlog.0" + i = 0 + while not os.path.exists(logFile): + sleep(0.1) + i += 1 + if i > 50: + break + tailCmdStr = 'tail -f ' + if platform.system().lower() == 'windows': + tailCmdStr = 'tail -n +0 -f ' + popen = subprocess.Popen( + tailCmdStr + logFile, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True) + pid = popen.pid + # print('Popen.pid:' + str(pid)) + timeout = time.time() + 60 * 2 + while True: + line = popen.stdout.readline().strip() + if bkey in line: + popen.kill() + break + if time.time() > timeout: + tdLog.exit('wait too long for taosd start') + tdLog.debug("the dnode:%d has been started." % (self.index)) + else: + tdLog.debug( + "wait 10 seconds for the dnode:%d to start." % + (self.index)) + time.sleep(10) def startWithoutSleep(self): binPath = self.getPath() @@ -402,12 +385,20 @@ class TDDnode: print(cmd) - if os.system(cmd) != 0: - tdLog.exit(cmd) + if (self.remoteIP == ""): + if os.system(cmd) != 0: + tdLog.exit(cmd) + else: + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)\ntdDnodes.startWithoutSleep(%d)"%(self.index, self.index)) + self.running = 1 tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) def stop(self): + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.stop(%d)"%self.index) + tdLog.info("stop dnode%d"%self.index) + return if self.valgrind == 0: toBeKilled = "taosd" else: @@ -424,9 +415,10 @@ class TDDnode: time.sleep(1) processID = subprocess.check_output( psCmd, shell=True).decode("utf-8") - for port in range(6030, 6041): - fuserCmd = "fuser -k -n tcp %d" % port - os.system(fuserCmd) + if not platform.system().lower() == 'windows': + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) if self.valgrind: time.sleep(2) @@ -434,6 +426,9 @@ class TDDnode: tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) def forcestop(self): + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.forcestop(%d)"%self.index) + return if self.valgrind == 0: toBeKilled = "taosd" else: @@ -498,8 +493,11 @@ class TDDnodes: self.dnodes.append(TDDnode(9)) self.dnodes.append(TDDnode(10)) self.simDeployed = False + self.testCluster = False + self.valgrind = 0 + self.killValgrind = 1 - def init(self, path): + def init(self, path, remoteIP = ""): psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): @@ -509,19 +507,20 @@ class TDDnodes: processID = subprocess.check_output( psCmd, shell=True).decode("utf-8") - psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -9 %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") + if self.killValgrind == 1: + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -9 %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") binPath = self.dnodes[0].getPath() + "/../../../" - tdLog.debug("binPath %s" % (binPath)) + # tdLog.debug("binPath %s" % (binPath)) binPath = os.path.realpath(binPath) - tdLog.debug("binPath real path %s" % (binPath)) + # tdLog.debug("binPath real path %s" % (binPath)) # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) # tdLog.debug(cmd) @@ -544,7 +543,7 @@ class TDDnodes: self.path = os.path.realpath(path) for i in range(len(self.dnodes)): - self.dnodes[i].init(self.path) + self.dnodes[i].init(self.path, remoteIP) self.sim = TDSimClient(self.path) def setTestCluster(self, value): @@ -553,6 +552,9 @@ class TDDnodes: def setValgrind(self, value): self.valgrind = value + def setKillValgrind(self, value): + self.killValgrind = value + def deploy(self, index, *updatecfgDict): self.sim.setTestCluster(self.testCluster) @@ -573,10 +575,6 @@ class TDDnodes: self.check(index) self.dnodes[index - 1].start() - def startWin(self, index): - self.check(index) - self.dnodes[index - 1].startWin() - def startWithoutSleep(self, index): self.check(index) self.dnodes[index - 1].startWithoutSleep() @@ -630,14 +628,15 @@ class TDDnodes: processID = subprocess.check_output( psCmd, shell=True).decode("utf-8") - psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") + if self.killValgrind == 1: + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") # if os.system(cmd) != 0 : # tdLog.exit(cmd) diff --git a/tests/pytest/util/types.py b/tests/pytest/util/types.py new file mode 100644 index 0000000000000000000000000000000000000000..218a4770269328a5ef7161cc56c0e0dc0c420f73 --- /dev/null +++ b/tests/pytest/util/types.py @@ -0,0 +1,38 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from enum import Enum + +class TDSmlProtocolType(Enum): + ''' + Schemaless Protocol types + 0 - unknown + 1 - InfluxDB Line Protocol + 2 - OpenTSDB Telnet Protocl + 3 - OpenTSDB JSON Protocol + ''' + UNKNOWN = 0 + LINE = 1 + TELNET = 2 + JSON = 3 + +class TDSmlTimestampType(Enum): + NOT_CONFIGURED = 0 + HOUR = 1 + MINUTE = 2 + SECOND = 3 + MILLI_SECOND = 4 + MICRO_SECOND = 5 + NANO_SECOND = 6 + + diff --git a/tests/pytest/wal/addOldWalTest.py b/tests/pytest/wal/addOldWalTest.py index 2f4dcd5ce807cf7bbadfa480af6ed6342058a78a..36056d1bc2d0bef786cf4a4092521867f861b93b 100644 --- a/tests/pytest/wal/addOldWalTest.py +++ b/tests/pytest/wal/addOldWalTest.py @@ -31,7 +31,7 @@ class TDTestCase: def createOldDirAndAddWal(self): oldDir = tdDnodes.getDnodesRootDir() + "dnode1/data/vnode/vnode2/wal/old" - os.system("sudo echo 'test' >> %s/wal" % oldDir) + os.system("sudo echo test >> %s/wal" % oldDir) def run(self): diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c index 2ded58a979ad16e06f03ab8d4f828f1c10731df3..7dd7621d0b429caeb2e54c0215b29c4a0b396124 100644 --- a/tests/script/api/batchprepare.c +++ b/tests/script/api/batchprepare.c @@ -10,6 +10,9 @@ #include "../../../include/client/taos.h" #define FUNCTION_TEST_IDX 1 +#define TIME_PRECISION_MILLI 0 +#define TIME_PRECISION_MICRO 1 +#define TIME_PRECISION_NANO 2 int32_t shortColList[] = {TSDB_DATA_TYPE_TIMESTAMP, TSDB_DATA_TYPE_INT}; int32_t fullColList[] = {TSDB_DATA_TYPE_TIMESTAMP, TSDB_DATA_TYPE_BOOL, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_UTINYINT, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_USMALLINT, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_UINT, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_UBIGINT, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_BINARY, TSDB_DATA_TYPE_NCHAR}; @@ -32,6 +35,8 @@ typedef enum { BP_BIND_COL, } BP_BIND_TYPE; +#define BP_BIND_TYPE_STR(t) (((t) == BP_BIND_COL) ? "column" : "tag") + OperInfo operInfo[] = { {">", 2, false}, {">=", 2, false}, @@ -57,11 +62,12 @@ FuncInfo funcInfo[] = { {"min", 1}, }; +#define BP_STARTUP_TS 1591060628000 + char *bpStbPrefix = "st"; char *bpTbPrefix = "t"; int32_t bpDefaultStbId = 1; - - +int64_t bpTs; //char *operatorList[] = {">", ">=", "<", "<=", "=", "<>", "in", "not in"}; //char *varoperatorList[] = {">", ">=", "<", "<=", "=", "<>", "in", "not in", "like", "not like", "match", "nmatch"}; @@ -188,8 +194,10 @@ typedef struct { bool printCreateTblSql; bool printQuerySql; bool printStmtSql; + bool printVerbose; bool autoCreateTbl; bool numericParam; + uint8_t precision; int32_t rowNum; //row num for one table int32_t bindColNum; int32_t bindTagNum; @@ -209,12 +217,15 @@ typedef struct { int32_t caseRunNum; // total run case num } CaseCtrl; -#if 1 +#if 0 CaseCtrl gCaseCtrl = { // default + .precision = TIME_PRECISION_MICRO, .bindNullNum = 0, .printCreateTblSql = false, .printQuerySql = true, .printStmtSql = true, + .printVerbose = false, + .printRes = false, .autoCreateTbl = false, .numericParam = false, .rowNum = 0, @@ -230,7 +241,6 @@ CaseCtrl gCaseCtrl = { // default .funcIdxListNum = 0, .funcIdxList = NULL, .checkParamNum = false, - .printRes = false, .runTimes = 0, .caseIdx = -1, .caseNum = -1, @@ -240,26 +250,35 @@ CaseCtrl gCaseCtrl = { // default #endif -#if 0 +#if 1 CaseCtrl gCaseCtrl = { + .precision = TIME_PRECISION_MILLI, .bindNullNum = 0, - .printCreateTblSql = true, + .printCreateTblSql = false, .printQuerySql = true, .printStmtSql = true, + .printVerbose = false, + .printRes = true, .autoCreateTbl = false, + .numericParam = false, .rowNum = 0, .bindColNum = 0, .bindTagNum = 0, .bindRowNum = 0, + .bindColTypeNum = 0, + .bindColTypeList = NULL, .bindTagTypeNum = 0, .bindTagTypeList = NULL, + .optrIdxListNum = 0, + .optrIdxList = NULL, + .funcIdxListNum = 0, + .funcIdxList = NULL, .checkParamNum = false, - .printRes = false, .runTimes = 0, - .caseIdx = 1, - .caseNum = 1, + .caseIdx = -1, + .caseNum = -1, .caseRunIdx = -1, - .caseRunNum = 1, + .caseRunNum = -1, }; #endif @@ -891,7 +910,6 @@ int32_t prepareColData(BP_BIND_TYPE bType, BindData *data, int32_t bindIdx, int3 int32_t prepareInsertData(BindData *data) { - static int64_t tsData = 1591060628000; uint64_t allRowNum = gCurCase->rowNum * gCurCase->tblNum; data->colNum = 0; @@ -918,7 +936,7 @@ int32_t prepareInsertData(BindData *data) { } for (int32_t i = 0; i < allRowNum; ++i) { - data->tsData[i] = tsData++; + data->tsData[i] = bpTs++; data->boolData[i] = (bool)(i % 2); data->tinyData[i] = (int8_t)i; data->utinyData[i] = (uint8_t)(i+1); @@ -956,7 +974,6 @@ int32_t prepareInsertData(BindData *data) { } int32_t prepareQueryCondData(BindData *data, int32_t tblIdx) { - static int64_t tsData = 1591060628000; uint64_t bindNum = gCurCase->rowNum / gCurCase->bindRowNum; data->colNum = 0; @@ -982,7 +999,7 @@ int32_t prepareQueryCondData(BindData *data, int32_t tblIdx) { } for (int32_t i = 0; i < bindNum; ++i) { - data->tsData[i] = tsData + tblIdx*gCurCase->rowNum + rand()%gCurCase->rowNum; + data->tsData[i] = bpTs + tblIdx*gCurCase->rowNum + rand()%gCurCase->rowNum; data->boolData[i] = (bool)(tblIdx*gCurCase->rowNum + rand() % gCurCase->rowNum); data->tinyData[i] = (int8_t)(tblIdx*gCurCase->rowNum + rand() % gCurCase->rowNum); data->utinyData[i] = (uint8_t)(tblIdx*gCurCase->rowNum + rand() % gCurCase->rowNum); @@ -1014,7 +1031,6 @@ int32_t prepareQueryCondData(BindData *data, int32_t tblIdx) { int32_t prepareQueryMiscData(BindData *data, int32_t tblIdx) { - static int64_t tsData = 1591060628000; uint64_t bindNum = gCurCase->rowNum / gCurCase->bindRowNum; data->colNum = 0; @@ -1040,7 +1056,7 @@ int32_t prepareQueryMiscData(BindData *data, int32_t tblIdx) { } for (int32_t i = 0; i < bindNum; ++i) { - data->tsData[i] = tsData + tblIdx*gCurCase->rowNum + rand()%gCurCase->rowNum; + data->tsData[i] = bpTs + tblIdx*gCurCase->rowNum + rand()%gCurCase->rowNum; data->boolData[i] = (bool)(tblIdx*gCurCase->rowNum + rand() % gCurCase->rowNum); data->tinyData[i] = (int8_t)(tblIdx*gCurCase->rowNum + rand() % gCurCase->rowNum); data->utinyData[i] = (uint8_t)(tblIdx*gCurCase->rowNum + rand() % gCurCase->rowNum); @@ -1202,39 +1218,7 @@ int32_t bpAppendValueString(char *buf, int type, void *value, int32_t valueLen, } -int32_t bpBindParam(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) { - static int32_t n = 0; - - if (gCurCase->bindRowNum > 1) { - if (0 == (n++%2)) { - if (taos_stmt_bind_param_batch(stmt, bind)) { - printf("!!!taos_stmt_bind_param_batch error:%s\n", taos_stmt_errstr(stmt)); - exit(1); - } - } else { - for (int32_t i = 0; i < gCurCase->bindColNum; ++i) { - if (taos_stmt_bind_single_param_batch(stmt, bind++, i)) { - printf("!!!taos_stmt_bind_single_param_batch error:%s\n", taos_stmt_errstr(stmt)); - exit(1); - } - } - } - } else { - if (0 == (n++%2)) { - if (taos_stmt_bind_param_batch(stmt, bind)) { - printf("!!!taos_stmt_bind_param_batch error:%s\n", taos_stmt_errstr(stmt)); - exit(1); - } - } else { - if (taos_stmt_bind_param(stmt, bind)) { - printf("!!!taos_stmt_bind_param error:%s\n", taos_stmt_errstr(stmt)); - exit(1); - } - } - } - return 0; -} void bpCheckIsInsert(TAOS_STMT *stmt, int32_t insert) { int32_t isInsert = 0; @@ -1280,15 +1264,12 @@ void bpCheckAffectedRowsOnce(TAOS_STMT *stmt, int32_t expectedNum) { } void bpCheckQueryResult(TAOS_STMT *stmt, TAOS *taos, char *stmtSql, TAOS_MULTI_BIND* bind) { - TAOS_RES* res = taos_stmt_use_result(stmt); - int32_t sqlResNum = 0; - int32_t stmtResNum = 0; - bpFetchRows(res, gCaseCtrl.printRes, &stmtResNum); - + // query using sql char sql[1024]; int32_t len = 0; char* p = stmtSql; char* s = NULL; + int32_t sqlResNum = 0; for (int32_t i = 0; true; ++i, p=s+1) { s = strchr(p, '?'); @@ -1313,6 +1294,12 @@ void bpCheckQueryResult(TAOS_STMT *stmt, TAOS *taos, char *stmtSql, TAOS_MULTI_B } bpExecQuery(taos, sql, gCaseCtrl.printRes, &sqlResNum); + + // query using stmt + TAOS_RES* res = taos_stmt_use_result(stmt); + int32_t stmtResNum = 0; + bpFetchRows(res, gCaseCtrl.printRes, &stmtResNum); + if (sqlResNum != stmtResNum) { printf("!!!sql res num %d mis-match stmt res num %d\n", sqlResNum, stmtResNum); exit(1); @@ -1321,9 +1308,165 @@ void bpCheckQueryResult(TAOS_STMT *stmt, TAOS *taos, char *stmtSql, TAOS_MULTI_B printf("***sql res num match stmt res num %d\n", stmtResNum); } +void bpCheckColTagFields(TAOS_STMT *stmt, int32_t fieldNum, TAOS_FIELD_E* pFields, int32_t expecteNum, TAOS_MULTI_BIND* pBind, BP_BIND_TYPE type) { + int32_t code = 0; + + if (fieldNum != expecteNum) { + printf("!!!%s field num %d mis-match expect num %d\n", BP_BIND_TYPE_STR(type), fieldNum, expecteNum); + exit(1); + } + + if (type == BP_BIND_COL) { + if (pFields[0].precision != gCaseCtrl.precision) { + printf("!!!db precision %d mis-match expect %d\n", pFields[0].precision, gCaseCtrl.precision); + exit(1); + } + } + + for (int32_t i = 0; i < fieldNum; ++i) { + if (pFields[i].type != pBind[i].buffer_type) { + printf("!!!%s %dth field type %d mis-match expect type %d\n", BP_BIND_TYPE_STR(type), i, pFields[i].type, pBind[i].buffer_type); + exit(1); + } + + if (pFields[i].type == TSDB_DATA_TYPE_BINARY) { + if (pFields[i].bytes != (pBind[i].buffer_length + 2)) { + printf("!!!%s %dth field len %d mis-match expect len %d\n", BP_BIND_TYPE_STR(type), i, pFields[i].bytes, (pBind[i].buffer_length + 2)); + exit(1); + } + } else if (pFields[i].type == TSDB_DATA_TYPE_NCHAR) { + if (pFields[i].bytes != (pBind[i].buffer_length * 4 + 2)) { + printf("!!!%s %dth field len %d mis-match expect len %d\n", BP_BIND_TYPE_STR(type), i, pFields[i].bytes, (pBind[i].buffer_length + 2)); + exit(1); + } + } else if (pFields[i].bytes != pBind[i].buffer_length) { + printf("!!!%s %dth field len %d mis-match expect len %d\n", BP_BIND_TYPE_STR(type), i, pFields[i].bytes, pBind[i].buffer_length); + exit(1); + } + } + + if (type == BP_BIND_COL) { + int fieldType = 0; + int fieldBytes = 0; + for (int32_t i = 0; i < fieldNum; ++i) { + code = taos_stmt_get_param(stmt, i, &fieldType, &fieldBytes); + if (code) { + printf("!!!taos_stmt_get_param error:%s\n", taos_stmt_errstr(stmt)); + exit(1); + } + + if (pFields[i].type != fieldType) { + printf("!!!%s %dth field type %d mis-match expect type %d\n", BP_BIND_TYPE_STR(type), i, fieldType, pFields[i].type); + exit(1); + } + + if (pFields[i].bytes != fieldBytes) { + printf("!!!%s %dth field len %d mis-match expect len %d\n", BP_BIND_TYPE_STR(type), i, fieldBytes, pFields[i].bytes); + exit(1); + } + } + } + + if (gCaseCtrl.printVerbose) { + printf("%s fields check passed\n", BP_BIND_TYPE_STR(type)); + } +} + + +void bpCheckTagFields(TAOS_STMT *stmt, TAOS_MULTI_BIND* pBind) { + int32_t code = 0; + int fieldNum = 0; + TAOS_FIELD_E* pFields = NULL; + code = taos_stmt_get_tag_fields(stmt, &fieldNum, &pFields); + if (code != 0){ + printf("!!!taos_stmt_get_tag_fields error:%s\n", taos_stmt_errstr(stmt)); + exit(1); + } + + bpCheckColTagFields(stmt, fieldNum, pFields, gCurCase->bindTagNum, pBind, BP_BIND_TAG); +} + +void bpCheckColFields(TAOS_STMT *stmt, TAOS_MULTI_BIND* pBind) { + if (gCurCase->testType == TTYPE_QUERY) { + return; + } + + int32_t code = 0; + int fieldNum = 0; + TAOS_FIELD_E* pFields = NULL; + code = taos_stmt_get_col_fields(stmt, &fieldNum, &pFields); + if (code != 0){ + printf("!!!taos_stmt_get_col_fields error:%s\n", taos_stmt_errstr(stmt)); + exit(1); + } + + bpCheckColTagFields(stmt, fieldNum, pFields, gCurCase->bindColNum, pBind, BP_BIND_COL); +} + +void bpShowBindParam(TAOS_MULTI_BIND *bind, int32_t num) { + for (int32_t i = 0; i < num; ++i) { + TAOS_MULTI_BIND* b = &bind[i]; + printf("Bind %d: type[%d],buf[%p],buflen[%d],len[%],null[%d],num[%d]\n", + i, b->buffer_type, b->buffer, b->buffer_length, b->length ? *b->length : 0, b->is_null ? *b->is_null : 0, b->num); + } +} + +int32_t bpBindParam(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) { + static int32_t n = 0; + + bpCheckColFields(stmt, bind); + + if (gCurCase->bindRowNum > 1) { + if (0 == (n++%2)) { + if (taos_stmt_bind_param_batch(stmt, bind)) { + printf("!!!taos_stmt_bind_param_batch error:%s\n", taos_stmt_errstr(stmt)); + bpShowBindParam(bind, gCurCase->bindColNum); + exit(1); + } + } else { + for (int32_t i = 0; i < gCurCase->bindColNum; ++i) { + if (taos_stmt_bind_single_param_batch(stmt, bind+i, i)) { + printf("!!!taos_stmt_bind_single_param_batch %d error:%s\n", taos_stmt_errstr(stmt), i); + bpShowBindParam(bind, gCurCase->bindColNum); + exit(1); + } + } + } + } else { + if (0 == (n++%2)) { + if (taos_stmt_bind_param_batch(stmt, bind)) { + printf("!!!taos_stmt_bind_param_batch error:%s\n", taos_stmt_errstr(stmt)); + bpShowBindParam(bind, gCurCase->bindColNum); + exit(1); + } + } else { + if (taos_stmt_bind_param(stmt, bind)) { + printf("!!!taos_stmt_bind_param error:%s\n", taos_stmt_errstr(stmt)); + bpShowBindParam(bind, gCurCase->bindColNum); + exit(1); + } + } + } + + return 0; +} + int32_t bpSetTableNameTags(BindData *data, int32_t tblIdx, char *tblName, TAOS_STMT *stmt) { + int32_t code = 0; if (gCurCase->bindTagNum > 0) { - return taos_stmt_set_tbname_tags(stmt, tblName, data->pTags + tblIdx * gCurCase->bindTagNum); + if ((rand() % 2) == 0) { + code = taos_stmt_set_tbname(stmt, tblName); + if (code != 0){ + printf("!!!taos_stmt_set_tbname error:%s\n", taos_stmt_errstr(stmt)); + exit(1); + } + + bpCheckTagFields(stmt, data->pTags + tblIdx * gCurCase->bindTagNum); + + return taos_stmt_set_tags(stmt, data->pTags + tblIdx * gCurCase->bindTagNum); + } else { + return taos_stmt_set_tbname_tags(stmt, tblName, data->pTags + tblIdx * gCurCase->bindTagNum); + } } else { return taos_stmt_set_tbname(stmt, tblName); } @@ -1755,7 +1898,7 @@ int insertAUTOTest1(TAOS_STMT *stmt, TAOS *taos) { if (gCurCase->tblNum > 1) { char buf[32]; sprintf(buf, "t%d", t); - code = taos_stmt_set_tbname_tags(stmt, buf, data.pTags + t * gCurCase->bindTagNum); + code = bpSetTableNameTags(&data, t, buf, stmt); if (code != 0){ printf("!!!taos_stmt_set_tbname_tags error:%s\n", taos_stmt_errstr(stmt)); exit(1); @@ -2223,14 +2366,48 @@ void generateCreateTableSQL(char *buf, int32_t tblIdx, int32_t colNum, int32_t * } } +char *bpPrecisionStr(uint8_t precision) { + switch (precision) { + case TIME_PRECISION_MILLI: + return "ms"; + case TIME_PRECISION_MICRO: + return "us"; + case TIME_PRECISION_NANO: + return "ns"; + default: + return "unknwon"; + } +} + +void bpSetStartupTs() { + switch (gCaseCtrl.precision) { + case TIME_PRECISION_MILLI: + bpTs = BP_STARTUP_TS; + break; + case TIME_PRECISION_MICRO: + bpTs = BP_STARTUP_TS * 1000; + break; + case TIME_PRECISION_NANO: + bpTs = BP_STARTUP_TS * 1000000; + break; + default: + bpTs = BP_STARTUP_TS; + break; + } +} + void prepare(TAOS *taos, int32_t colNum, int32_t *colList, int prepareStb) { TAOS_RES *result; int code; + char createDbSql[128] = {0}; result = taos_query(taos, "drop database demo"); taos_free_result(result); - result = taos_query(taos, "create database demo keep 36500"); + sprintf(createDbSql, "create database demo keep 36500 precision \"%s\"", bpPrecisionStr(gCaseCtrl.precision)); + printf("\tCreate Database SQL:%s\n", createDbSql); + + result = taos_query(taos, createDbSql); code = taos_errno(result); if (code != 0) { printf("!!!failed to create database, reason:%s\n", taos_errstr(result)); @@ -2278,6 +2455,8 @@ int32_t runCase(TAOS *taos, int32_t caseIdx, int32_t caseRunIdx, bool silent) { CaseCfg cfg = gCase[caseIdx]; CaseCfg cfgBk; gCurCase = &cfg; + + bpSetStartupTs(); if ((gCaseCtrl.bindColTypeNum || gCaseCtrl.bindColNum) && (gCurCase->colNum != gFullColNum)) { return 1; @@ -2413,22 +2592,28 @@ void* runCaseList(TAOS *taos) { } void runAll(TAOS *taos) { -#if 1 - - strcpy(gCaseCtrl.caseCatalog, "Normal Test"); + strcpy(gCaseCtrl.caseCatalog, "Default Test"); printf("%s Begin\n", gCaseCtrl.caseCatalog); runCaseList(taos); + strcpy(gCaseCtrl.caseCatalog, "Micro DB precision Test"); + printf("%s Begin\n", gCaseCtrl.caseCatalog); + gCaseCtrl.precision = TIME_PRECISION_MICRO; + runCaseList(taos); + gCaseCtrl.precision = TIME_PRECISION_MILLI; + strcpy(gCaseCtrl.caseCatalog, "Nano DB precision Test"); + printf("%s Begin\n", gCaseCtrl.caseCatalog); + gCaseCtrl.precision = TIME_PRECISION_NANO; + runCaseList(taos); + gCaseCtrl.precision = TIME_PRECISION_MILLI; + strcpy(gCaseCtrl.caseCatalog, "Auto Create Table Test"); gCaseCtrl.autoCreateTbl = true; printf("%s Begin\n", gCaseCtrl.caseCatalog); runCaseList(taos); gCaseCtrl.autoCreateTbl = false; - -#endif -/* strcpy(gCaseCtrl.caseCatalog, "Null Test"); printf("%s Begin\n", gCaseCtrl.caseCatalog); gCaseCtrl.bindNullNum = 1; @@ -2441,6 +2626,7 @@ void runAll(TAOS *taos) { runCaseList(taos); gCaseCtrl.bindRowNum = 0; +#if 0 strcpy(gCaseCtrl.caseCatalog, "Row Num Test"); printf("%s Begin\n", gCaseCtrl.caseCatalog); gCaseCtrl.rowNum = 1000; @@ -2448,23 +2634,21 @@ void runAll(TAOS *taos) { runCaseList(taos); gCaseCtrl.rowNum = 0; gCaseCtrl.printRes = true; -*/ strcpy(gCaseCtrl.caseCatalog, "Runtimes Test"); printf("%s Begin\n", gCaseCtrl.caseCatalog); gCaseCtrl.runTimes = 2; runCaseList(taos); gCaseCtrl.runTimes = 0; +#endif -#if 1 strcpy(gCaseCtrl.caseCatalog, "Check Param Test"); printf("%s Begin\n", gCaseCtrl.caseCatalog); gCaseCtrl.checkParamNum = true; runCaseList(taos); gCaseCtrl.checkParamNum = false; -#endif -/* +#if 0 strcpy(gCaseCtrl.caseCatalog, "Bind Col Num Test"); printf("%s Begin\n", gCaseCtrl.caseCatalog); gCaseCtrl.bindColNum = 6; @@ -2476,7 +2660,7 @@ void runAll(TAOS *taos) { gCaseCtrl.bindColTypeNum = tListLen(bindColTypeList); gCaseCtrl.bindColTypeList = bindColTypeList; runCaseList(taos); -*/ +#endif printf("All Test End\n"); } diff --git a/tests/script/general/alter/cached_schema_after_alter.sim b/tests/script/general/alter/cached_schema_after_alter.sim index 96ee4390845450d53508cc90c48a3148a0a827dd..043f360856e4b4f0533bf4dc5e4be7cea71c3325 100644 --- a/tests/script/general/alter/cached_schema_after_alter.sim +++ b/tests/script/general/alter/cached_schema_after_alter.sim @@ -1,9 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 2 system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect $db = csaa_db diff --git a/tests/script/general/alter/dnode.sim b/tests/script/general/alter/dnode.sim index 7b31218fc231cfdbb79ca97573cfc6f6f149037d..64e8a17de02c956a937aa1001ac4d5873a6bed21 100644 --- a/tests/script/general/alter/dnode.sim +++ b/tests/script/general/alter/dnode.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 2 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======== step1 diff --git a/tests/script/general/alter/table.sim b/tests/script/general/alter/table.sim index 06704eeca6b3149b47ddc2ffb90aaab9df934bd8..9ca2f60bdc37f827e0832dc59399bf73732d7748 100644 --- a/tests/script/general/alter/table.sim +++ b/tests/script/general/alter/table.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 2 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======== step1 @@ -256,6 +252,7 @@ endi print ======== step8 sql alter table tb add column h binary(10) +sql select * from tb sql describe tb if $data00 != ts then return -1 @@ -308,7 +305,7 @@ endi if $data80 != h then return -1 endi -if $data81 != BINARY then +if $data81 != VARCHAR then return -1 endi if $data82 != 10 then @@ -375,7 +372,7 @@ endi if $data80 != h then return -1 endi -if $data81 != BINARY then +if $data81 != VARCHAR then return -1 endi if $data82 != 10 then @@ -451,7 +448,7 @@ endi if $data70 != h then return -1 endi -if $data71 != BINARY then +if $data71 != VARCHAR then return -1 endi if $data72 != 10 then @@ -500,7 +497,7 @@ endi if $data60 != h then return -1 endi -if $data61 != BINARY then +if $data61 != VARCHAR then return -1 endi if $data62 != 10 then @@ -543,7 +540,7 @@ endi if $data50 != h then return -1 endi -if $data51 != BINARY then +if $data51 != VARCHAR then return -1 endi if $data52 != 10 then @@ -580,7 +577,7 @@ endi if $data40 != h then return -1 endi -if $data41 != BINARY then +if $data41 != VARCHAR then return -1 endi if $data42 != 10 then @@ -611,7 +608,7 @@ endi if $data30 != h then return -1 endi -if $data31 != BINARY then +if $data31 != VARCHAR then return -1 endi if $data32 != 10 then @@ -636,7 +633,7 @@ endi if $data20 != h then return -1 endi -if $data21 != BINARY then +if $data21 != VARCHAR then return -1 endi if $data22 != 10 then diff --git a/tests/script/general/alter/testSuite.sim b/tests/script/general/alter/testSuite.sim deleted file mode 100644 index cfac68144c080593499159eec81325924e7f25e6..0000000000000000000000000000000000000000 --- a/tests/script/general/alter/testSuite.sim +++ /dev/null @@ -1,7 +0,0 @@ -run general/alter/cached_schema_after_alter.sim -run general/alter/count.sim -run general/alter/import.sim -run general/alter/insert1.sim -run general/alter/insert2.sim -run general/alter/metrics.sim -run general/alter/table.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 623182fddf6e517ebeab028dd4183cda8264dbb4..bb446fb248602842b579817dbdbbcc02f0a0680f 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -55,8 +55,10 @@ ./test.sh -f tsim/bnode/basic1.sim # ---- mnode -#./test.sh -f tsim/mnode/basic1.sim +./test.sh -f tsim/mnode/basic1.sim ./test.sh -f tsim/mnode/basic2.sim +./test.sh -f tsim/mnode/basic3.sim +#./test.sh -f tsim/mnode/basic4.sim # ---- show ./test.sh -f tsim/show/basic.sim @@ -88,18 +90,26 @@ ./test.sh -f tsim/tmq/topic.sim # --- stable -./test.sh -f tsim/stable/alter1.sim ./test.sh -f tsim/stable/disk.sim ./test.sh -f tsim/stable/dnode3.sim ./test.sh -f tsim/stable/metrics.sim ./test.sh -f tsim/stable/refcount.sim -#./test.sh -f tsim/stable/show.sim +./test.sh -f tsim/stable/show.sim ./test.sh -f tsim/stable/values.sim ./test.sh -f tsim/stable/vnode3.sim ./test.sh -f tsim/stable/column_add.sim ./test.sh -f tsim/stable/column_drop.sim -#./test.sh -f tsim/stable/column_modify.sim - +./test.sh -f tsim/stable/column_modify.sim +./test.sh -f tsim/stable/tag_add.sim +./test.sh -f tsim/stable/tag_drop.sim +./test.sh -f tsim/stable/tag_modify.sim +./test.sh -f tsim/stable/tag_rename.sim +./test.sh -f tsim/stable/alter_comment.sim +./test.sh -f tsim/stable/alter_count.sim +./test.sh -f tsim/stable/alter_insert1.sim +./test.sh -f tsim/stable/alter_insert2.sim +./test.sh -f tsim/stable/alter_import.sim +./test.sh -f tsim/stable/tag_filter.sim # --- for multi process mode ./test.sh -f tsim/user/basic1.sim -m @@ -114,10 +124,19 @@ #./test.sh -f tsim/mnode/basic1.sim -m # --- sma -./test.sh -f tsim/sma/tsmaCreateInsertData.sim +#./test.sh -f tsim/sma/tsmaCreateInsertData.sim ./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim # --- valgrind ./test.sh -f tsim/valgrind/checkError.sim -v +# --- sync +./test.sh -f tsim/sync/3Replica1VgElect.sim +./test.sh -f tsim/sync/3Replica5VgElect.sim +./test.sh -f tsim/sync/oneReplica1VgElect.sim +./test.sh -f tsim/sync/oneReplica5VgElect.sim + +# --- catalog +./test.sh -f tsim/catalog/alterInCurrent.sim + #======================b1-end=============== diff --git a/tests/script/tsim/bnode/basic1.sim b/tests/script/tsim/bnode/basic1.sim index b1db6efc72afce083d9594987ccee3d10ab83ef4..80608453b8cf1243f27583a719f315462a4412d4 100644 --- a/tests/script/tsim/bnode/basic1.sim +++ b/tests/script/tsim/bnode/basic1.sim @@ -24,7 +24,7 @@ if $data00 != 1 then return -1 endi -if $data02 != LEADER then +if $data02 != leader then return -1 endi @@ -71,7 +71,7 @@ if $data00 != 1 then return -1 endi -if $data02 != LEADER then +if $data02 != leader then return -1 endi diff --git a/tests/script/tsim/catalog/alterInCurrent.sim b/tests/script/tsim/catalog/alterInCurrent.sim new file mode 100644 index 0000000000000000000000000000000000000000..3cb337bbe1930104a21d3d31bf4d5d34a2515352 --- /dev/null +++ b/tests/script/tsim/catalog/alterInCurrent.sim @@ -0,0 +1,70 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 + +print ========= start dnode1 as LEADER +system sh/exec.sh -n dnode1 -s start +sql connect + +print ======== drop column in normal table +sql drop database if exists db1; +sql create database db1; +sql use db1; +sql create table t1 (ts timestamp, f1 int, f2 int); +sql insert into t1 values (1591060628000, 1, 2); +sql alter table t1 drop column f2; +sql insert into t1 values (1591060628001, 2); + +print ======== add column in normal table +sql drop database db1; +sql create database db1; +sql use db1; +sql create table t1 (ts timestamp, f1 int); +sql insert into t1 values (1591060628000, 1); +sql alter table t1 add column f2 int; +sql insert into t1 values (1591060628001, 2, 2); + + +print ======== drop column in super table +sql drop database db1; +sql create database db1; +sql use db1; +sql create stable st1 (ts timestamp, f1 int, f2 int) tags (t1 int); +sql create table t1 using st1 tags(1); +sql insert into t1 values (1591060628000, 1, 2); +sql alter table st1 drop column f2; +sql insert into t1 values (1591060628001, 2); + + +print ======== add column in super table +sql drop database db1; +sql create database db1; +sql use db1; +sql create stable st1 (ts timestamp, f1 int) tags (t1 int); +sql create table t1 using st1 tags(1); +sql insert into t1 values (1591060628000, 1); +sql alter table st1 add column f2 int; +sql insert into t1 values (1591060628001, 2, 2); + + +print ======== add tag in super table +sql drop database db1; +sql create database db1; +sql use db1; +sql create stable st1 (ts timestamp, f1 int) tags (t1 int); +sql create table t1 using st1 tags(1); +sql insert into t1 values (1591060628000, 1); +sql alter table st1 add tag t2 int; +sql create table t2 using st1 tags(2, 2); + + +print ======== drop tag in super table +sql drop database db1; +sql create database db1; +sql use db1; +sql create stable st1 (ts timestamp, f1 int) tags (t1 int, t2 int); +sql create table t1 using st1 tags(1, 1); +sql insert into t1 values (1591060628000, 1); +sql alter table st1 drop tag t2; +sql create table t2 using st1 tags(2); + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/db/alter_option.sim b/tests/script/tsim/db/alter_option.sim index aeb04293f2d5df29a07e629d32df3d96cb5d16b1..f3adb4535ec0a2b6dae2de6a277ab3a913bb711a 100644 --- a/tests/script/tsim/db/alter_option.sim +++ b/tests/script/tsim/db/alter_option.sim @@ -131,43 +131,43 @@ endi sleep 3000 #sql show db.vgroups -#if $data[0][4] == LEADER then -# if $data[0][6] != FOLLOWER then +#if $data[0][4] == leader then +# if $data[0][6] != follower then # return -1 # endi -# if $data[0][8] != FOLLOWER then +# if $data[0][8] != follower then # return -1 # endi #endi -#if $data[0][6] == LEADER then -# if $data[0][4] != FOLLOWER then +#if $data[0][6] == leader then +# if $data[0][4] != follower then # return -1 # endi -# if $data[0][8] != FOLLOWER then +# if $data[0][8] != follower then # return -1 # endi #endi -#if $data[0][8] == LEADER then -# if $data[0][4] != FOLLOWER then +#if $data[0][8] == leader then +# if $data[0][4] != follower then # return -1 # endi -# if $data[0][6] != FOLLOWER then +# if $data[0][6] != follower then # return -1 # endi #endi # -#if $data[0][4] != LEADER then -# if $data[0][4] != FOLLOWER then +#if $data[0][4] != leader then +# if $data[0][4] != follower then # return -1 # endi #endi -#if $data[0][6] != LEADER then -# if $data[0][6] != FOLLOWER then +#if $data[0][6] != leader then +# if $data[0][6] != follower then # return -1 # endi #endi -#if $data[0][8] != LEADER then -# if $data[0][8] != FOLLOWER then +#if $data[0][8] != leader then +# if $data[0][8] != follower then # return -1 # endi #endi diff --git a/tests/script/tsim/db/alter_replica_13.sim b/tests/script/tsim/db/alter_replica_13.sim new file mode 100644 index 0000000000000000000000000000000000000000..4d45b9296709b3f3367e025cca1fc3c8a31faea4 --- /dev/null +++ b/tests/script/tsim/db/alter_replica_13.sim @@ -0,0 +1,140 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start +sql connect + +print =============== step1: create dnodes +sql create dnode $hostname port 7200 + +$x = 0 +step1: + $ = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +if $rows != 2 then + return -1 +endi +if $data(1)[4] != ready then + goto step1 +endi +if $data(2)[4] != ready then + goto step1 +endi + +print =============== step2: create database +sql create database db vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi +if $data(db)[4] != 1 then + return -1 +endi + +sql show dnodes +if $data(2)[2] != 1 then + return -1 +endi + +# vnodes +sql show dnodes +if $data(2)[2] != 1 then + return -1 +endi + +# v1_dnode +sql show db.vgroups +if $data(2)[3] != 2 then + return -1 +endi + +sql_error alter database db replica 3 +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd" +sql create table db.ctb using db.stb tags(101, "102") +sql insert into db.ctb values(now, 1, "2") +sql select * from db.stb +if $rows != 1 then + return -1 +endi + +print =============== step3: create dnodes +sql create dnode $hostname port 7300 +sql create dnode $hostname port 7400 + +$x = 0 +step3: + $x = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> rows: $rows +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +print ===> $data30 $data31 $data32 $data33 $data24 $data35 +if $rows != 4 then + return -1 +endi +if $data(1)[4] != ready then + goto step3 +endi +if $data(2)[4] != ready then + goto step3 +endi +if $data(3)[4] != ready then + goto step3 +endi +if $data(4)[4] != ready then + goto step3 +endi + +print ============= step4: alter database +sql alter database db replica 3 + +$x = 0 +step4: + $x = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql show db.vgroups +print ===> rows: $rows +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +if $data[0][4] != leader then + goto step4 +endi +if $data[0][6] != follower then + goto step4 +endi +if $data[0][8] != follower then + goto step4 +endi + +print ============= step5: stop dnode 2 + +sql select * from db.stb +if $rows != 1 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/dnode/basic1.sim b/tests/script/tsim/dnode/basic1.sim index d5c791e902aef3404f854287cef6224767080f82..a5b5427e036e1f74a2287a2d4995c5936fd149f5 100644 --- a/tests/script/tsim/dnode/basic1.sim +++ b/tests/script/tsim/dnode/basic1.sim @@ -27,7 +27,7 @@ if $data00 != 1 then return -1 endi -if $data02 != LEADER then +if $data02 != leader then return -1 endi @@ -74,7 +74,7 @@ if $data00 != 1 then return -1 endi -if $data02 != LEADER then +if $data02 != leader then return -1 endi diff --git a/tests/script/tsim/insert/update0.sim b/tests/script/tsim/insert/update0.sim index 3cb5e4008e3a57e3178721b7e3f5458ef07be52b..0ba3e98c913e1c37c50c351bed7d7385a1cad0d3 100644 --- a/tests/script/tsim/insert/update0.sim +++ b/tests/script/tsim/insert/update0.sim @@ -9,7 +9,7 @@ sql create database d0 keep 365000d,365000d,365000d sql use d0 print =============== create super table and register rsma -sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1 delay 2; +sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1; sql show stables if $rows != 1 then diff --git a/tests/script/tsim/mnode/basic1.sim b/tests/script/tsim/mnode/basic1.sim index 198f36cdd2e6f27fdaccb9f680f7d5cd705e1213..d93d4ca53fc2b6c56e0a8eccf7d3ac3ea74ec0ee 100644 --- a/tests/script/tsim/mnode/basic1.sim +++ b/tests/script/tsim/mnode/basic1.sim @@ -15,7 +15,7 @@ if $data00 != 1 then return -1 endi -if $data02 != LEADER then +if $data02 != leader then return -1 endi @@ -36,13 +36,14 @@ if $data(2)[4] != ready then goto step1 endi -print =============== create drop mnode 1 sql_error create mnode on dnode 1 sql_error drop mnode on dnode 1 + +print =============== create mnode 2 sql create mnode on dnode 2 $x = 0 -step1: +step2: $x = $x + 1 sleep 1000 if $x == 20 then @@ -58,18 +59,18 @@ endi if $data(1)[0] != 1 then return -1 endi -if $data(1)[2] != LEADER then +if $data(1)[2] != leader then return -1 endi if $data(2)[0] != 2 then return -1 endi -if $data(2)[2] != FOLLOWER then - goto step1 +if $data(2)[2] != follower then + goto step2 endi sleep 2000 -print ============ drop mnodes +print ============ drop mnode 2 sql drop mnode on dnode 2 sql show mnodes if $rows != 1 then @@ -88,25 +89,25 @@ sql show mnodes print $data(1)[0] $data(1)[1] $data(1)[2] print $data(2)[0] $data(2)[1] $data(2)[2] -if $rows != 2 then +if $rows != 1 then return -1 endi if $data(1)[0] != 1 then return -1 endi -if $data(1)[2] != LEADER then +if $data(1)[2] != leader then return -1 endi -if $data(2)[0] != NULL then +if $data(2)[0] != null then goto step2 endi -if $data(2)[2] != NULL then +if $data(2)[2] != null then goto step2 endi sleep 2000 -print =============== create drop mnodes +print =============== create mnodes sql create mnode on dnode 2 sql show mnodes if $rows != 2 then @@ -130,13 +131,13 @@ endi if $data(1)[0] != 1 then return -1 endi -if $data(1)[2] != LEADER then +if $data(1)[2] != leader then return -1 endi if $data(2)[0] != 2 then return -1 endi -if $data(2)[2] != FOLLOWER then +if $data(2)[2] != follower then goto step3 endi diff --git a/tests/script/tsim/mnode/basic2.sim b/tests/script/tsim/mnode/basic2.sim index 024e2b2406deacd305b8611b31e04276ae7fca92..78558263d619ee3e9cef2e03c51790823c95b6a9 100644 --- a/tests/script/tsim/mnode/basic2.sim +++ b/tests/script/tsim/mnode/basic2.sim @@ -15,7 +15,7 @@ if $data00 != 1 then return -1 endi -if $data02 != LEADER then +if $data02 != leader then return -1 endi @@ -56,13 +56,13 @@ endi if $data(1)[0] != 1 then return -1 endi -if $data(1)[2] != LEADER then +if $data(1)[2] != leader then return -1 endi if $data(2)[0] != 2 then return -1 endi -if $data(2)[2] != FOLLOWER then +if $data(2)[2] != follower then goto step2 endi @@ -119,9 +119,16 @@ if $data(2)[4] != ready then endi print =============== insert data -#sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd" -#sql create table db.ctb using db.stb tags(101, 102, "103") -#sql insert into db.ctb values(now, 1, "2") +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd" +sql create table db.ctb using db.stb tags(101, 102, "103") +sql insert into db.ctb values(now, 1, "2") + +sql select * from db.ctb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] + +if $rows != 1 then + return -1 +endi system sh/exec.sh -n dnode1 -s stop system sh/exec.sh -n dnode2 -s stop \ No newline at end of file diff --git a/tests/script/tsim/mnode/basic3.sim b/tests/script/tsim/mnode/basic3.sim index 40c0f01229edd318af49697fb9eff664f4da1eff..bc70cd7a85522230a54359b8a4144eb4ce7a4eed 100644 --- a/tests/script/tsim/mnode/basic3.sim +++ b/tests/script/tsim/mnode/basic3.sim @@ -2,20 +2,27 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 +system sh/cfg.sh -n dnode1 -c transPullupInterval -v 1 +system sh/cfg.sh -n dnode2 -c transPullupInterval -v 1 +system sh/cfg.sh -n dnode3 -c transPullupInterval -v 1 +system sh/cfg.sh -n dnode4 -c transPullupInterval -v 1 system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start sql connect print =============== step1: create dnodes sql create dnode $hostname port 7200 sql create dnode $hostname port 7300 +sql create dnode $hostname port 7400 $x = 0 step1: $x = $x + 1 sleep 1000 - if $x == 20 then + if $x == 10 then return -1 endi sql show dnodes -x step1 @@ -32,22 +39,23 @@ endi print =============== step2: create mnode 2 sql create mnode on dnode 2 sql create mnode on dnode 3 +sql_error create mnode on dnode 4 $x = 0 step2: $x = $x + 1 sleep 1000 - if $x == 20 then + if $x == 10 then return -1 endi sql show mnodes -x step2 -if $data(1)[2] != LEADER then +if $data(1)[2] != leader then goto step2 endi -if $data(2)[2] != FOLLOWER then +if $data(2)[2] != follower then goto step2 endi -if $data(3)[2] != FOLLOWER then +if $data(3)[2] != follower then goto step2 endi @@ -68,7 +76,7 @@ $x = 0 step4: $x = $x + 1 sleep 1000 - if $x == 20 then + if $x == 10 then return -1 endi sql show mnodes -x step4 @@ -98,7 +106,7 @@ $x = 0 step5: $x = $x + 1 sleep 1000 - if $x == 20 then + if $x == 10 then return -1 endi sql show mnodes -x step5 @@ -106,6 +114,10 @@ print $data(1)[0] $data(1)[1] $data(1)[2] print $data(2)[0] $data(2)[1] $data(2)[2] print $data(3)[0] $data(3)[1] $data(3)[2] +if $data(2)[2] != offline then + goto step5 +endi + sql show users if $rows != 2 then return -1 @@ -119,7 +131,7 @@ $x = 0 step6: $x = $x + 1 sleep 1000 - if $x == 20 then + if $x == 10 then return -1 endi sql show mnodes -x step6 @@ -134,4 +146,5 @@ endi system sh/exec.sh -n dnode1 -s stop system sh/exec.sh -n dnode2 -s stop -system sh/exec.sh -n dnode3 -s stop \ No newline at end of file +system sh/exec.sh -n dnode3 -s stop +system sh/exec.sh -n dnode4 -s stop \ No newline at end of file diff --git a/tests/script/tsim/mnode/basic4.sim b/tests/script/tsim/mnode/basic4.sim new file mode 100644 index 0000000000000000000000000000000000000000..88deb5af898fde58d94f5129fb4e2a030795f29b --- /dev/null +++ b/tests/script/tsim/mnode/basic4.sim @@ -0,0 +1,194 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +sql connect + +print =============== step1: create dnodes +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 + +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 5 then + return -1 + endi +sql show dnodes -x step1 +if $data(1)[4] != ready then + goto step1 +endi +if $data(2)[4] != ready then + goto step1 +endi + +print =============== step2: create mnode 2 +sql create mnode on dnode 2 +sql_error create mnode on dnode 3 + +system sh/exec.sh -n dnode3 -s start + +$x = 0 +step2: + $x = $x + 1 + sleep 1000 + if $x == 5 then + return -1 + endi +sql show dnodes -x step2 +if $data(1)[4] != ready then + goto step2 +endi +if $data(2)[4] != ready then + goto step2 +endi + +system sh/exec.sh -n dnode3 -s stop +sql_error create mnode on dnode 3 + +print =============== step3: show mnodes + +$x = 0 +step3: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi +sql show mnodes -x step3 +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] + +if $data(1)[2] != leader then + goto step3 +endi +if $data(2)[2] != follower then + goto step3 +endi +if $data(3)[2] != offline then + goto step3 +endi +if $data(1)[3] != ready then + goto step3 +endi +if $data(2)[3] != ready then + goto step3 +endi +if $data(3)[3] != creating then + goto step3 +endi + +print =============== step4: start dnode3 +system sh/exec.sh -n dnode3 -s start + +$x = 0 +step4: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi +sql show mnodes -x step4 +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] + +if $data(1)[2] != leader then + goto step4 +endi +if $data(2)[2] != follower then + goto step4 +endi +if $data(3)[2] != follower then + goto step4 +endi +if $data(1)[3] != ready then + goto step4 +endi +if $data(2)[3] != ready then + goto step4 +endi +if $data(3)[3] != ready then + goto step4 +endi + +print =============== step5: drop mnode 3 and stop dnode3 +system sh/exec.sh -n dnode3 -s stop +sql_error drop mnode on dnode 3 + +$x = 0 +step5: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi +sql show mnodes -x step5 +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] + +if $data(1)[2] != leader then + goto step5 +endi +if $data(2)[2] != follower then + goto step5 +endi +if $data(3)[2] != offline then + goto step5 +endi +if $data(1)[3] != ready then + goto step5 +endi +if $data(2)[3] != ready then + goto step5 +endi +if $data(3)[3] != dropping then + goto step5 +endi + +print =============== step6: start dnode3 +system sh/exec.sh -n dnode3 -s start + +$x = 0 +step6: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi +sql show mnodes -x step6 +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] + +if $rows != 2 then + goto step6 +endi +if $data(1)[2] != leader then + goto step6 +endi +if $data(2)[2] != follower then + goto step6 +endi +if $data(3)[2] != null then + goto step6 +endi +if $data(1)[3] != ready then + goto step6 +endi +if $data(2)[3] != ready then + goto step6 +endi +if $data(3)[3] != null then + goto step6 +endi + +system sh/exec.sh -n dnode1 -s stop +system sh/exec.sh -n dnode2 -s stop +system sh/exec.sh -n dnode3 -s stop +system sh/exec.sh -n dnode4 -s stop diff --git a/tests/script/tsim/qnode/basic1.sim b/tests/script/tsim/qnode/basic1.sim index 2351403909e9f641e2ada2789561a095a0e915d4..7108fcaf59ec420a8657dd8e061e5261ec15ce3c 100644 --- a/tests/script/tsim/qnode/basic1.sim +++ b/tests/script/tsim/qnode/basic1.sim @@ -24,7 +24,7 @@ if $data00 != 1 then return -1 endi -if $data02 != LEADER then +if $data02 != leader then return -1 endi @@ -71,7 +71,7 @@ if $data00 != 1 then return -1 endi -if $data02 != LEADER then +if $data02 != leader then return -1 endi diff --git a/tests/script/tsim/query/explain.sim b/tests/script/tsim/query/explain.sim index 21162a99b0928040ae115b13c117864a170ef4e9..2b0d52d25327833221ffe53953f904d74ed1784a 100644 --- a/tests/script/tsim/query/explain.sim +++ b/tests/script/tsim/query/explain.sim @@ -1,7 +1,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -print ========= start dnode1 as LEADER +print ========= start dnode1 as leader system sh/exec.sh -n dnode1 -s start sql connect diff --git a/tests/script/tsim/query/scalarNull.sim b/tests/script/tsim/query/scalarNull.sim index c7e7aa9a349a98623033c40cebfa9c499ee0fe2a..b08ac1d3d9abe157915ec25c438d82e2774ced04 100644 --- a/tests/script/tsim/query/scalarNull.sim +++ b/tests/script/tsim/query/scalarNull.sim @@ -4,7 +4,7 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c wallevel -v 2 system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 -print ========= start dnode1 as LEADER +print ========= start dnode1 as leader system sh/exec.sh -n dnode1 -s start sleep 2000 sql connect diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim index 24ddcc1b75c70865f334f553a6b0f1ee176d62ca..93cae4e3912cab0b5c36e60d28743f0c10f1e45a 100644 --- a/tests/script/tsim/query/udf.sim +++ b/tests/script/tsim/query/udf.sim @@ -5,7 +5,7 @@ system sh/cfg.sh -n dnode1 -c wallevel -v 2 system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 system sh/cfg.sh -n dnode1 -c udf -v 1 -print ========= start dnode1 as LEADER +print ========= start dnode1 as leader system sh/exec.sh -n dnode1 -s start sleep 1000 sql connect diff --git a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim index 38ae0dc0a298d7743f3eb1466357ff0bbb621d06..f929dda18cb1b287c3ffe05487464624ff0eebc5 100644 --- a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim +++ b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim @@ -9,7 +9,7 @@ sql create database d0 retentions 15s:7d,1m:21d,15m:365d; sql use d0 print =============== create super table and register rsma -sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1 delay 2; +sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1; sql show stables if $rows != 1 then @@ -37,6 +37,15 @@ if $rows > 2 then print retention level 2 file rows $rows > 2 return -1 endi + + +if $data01 != 1 then + if $data01 != 10 then + print retention level 2 file result $data01 != 1 or 10 + return -1 + endi +endi + print =============== select * from retention level 1 from memory sql select * from ct1 where ts > now-8d; print $data00 $data01 @@ -44,15 +53,30 @@ if $rows > 2 then print retention level 1 file rows $rows > 2 return -1 endi + +if $data01 != 1 then + if $data01 != 10 then + print retention level 1 file result $data01 != 1 or 10 + return -1 + endi +endi + print =============== select * from retention level 0 from memory sql select * from ct1 where ts > now-3d; print $data00 $data01 print $data10 $data11 print $data20 $data21 + if $rows < 1 then print retention level 0 file rows $rows < 1 return -1 endi + +if $data01 != 10 then + print retention level 0 file result $data01 != 10 + return -1 +endi + #=================================================================== @@ -68,6 +92,13 @@ if $rows > 2 then return -1 endi +if $data01 != 1 then + if $data01 != 10 then + print retention level 2 file result $data01 != 1 or 10 + return -1 + endi +endi + print =============== select * from retention level 1 from file sql select * from ct1 where ts > now-8d; print $data00 $data01 @@ -76,6 +107,13 @@ if $rows > 2 then return -1 endi +if $data01 != 1 then + if $data01 != 10 then + print retention level 1 file result $data01 != 1 or 10 + return -1 + endi +endi + print =============== select * from retention level 0 from file sql select * from ct1 where ts > now-3d; print $data00 $data01 @@ -86,4 +124,9 @@ if $rows < 1 then return -1 endi +if $data01 != 10 then + print retention level 0 file result $data01 != 10 + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/sma/tsmaCreateInsertData.sim b/tests/script/tsim/sma/tsmaCreateInsertData.sim index b7a127e1b0d67f9af620919740dae87e649c82cd..0202c53800260b4974cabe10ff4cbd9f180fd590 100644 --- a/tests/script/tsim/sma/tsmaCreateInsertData.sim +++ b/tests/script/tsim/sma/tsmaCreateInsertData.sim @@ -5,7 +5,7 @@ sleep 50 sql connect print =============== create database -sql create database d1 +sql create database d1 vgroups 1 sql use d1 print =============== create super table, include column type for count/sum/min/max/first @@ -37,5 +37,12 @@ print =============== trigger stream to execute sma aggr task and insert sma dat sql insert into ct1 values(now+5s, 20, 20.0, 30.0) #=================================================================== +print =============== select * from ct1 from memory +sql select * from ct1; +print $data00 $data01 +if $rows != 5 then + print rows $rows != 5 + return -1 +endi system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/snode/basic1.sim b/tests/script/tsim/snode/basic1.sim index 660951c591bb9048b592e7be60492925b13b600d..a9d4867354e70a867d23e65ed03dda47b0b2524c 100644 --- a/tests/script/tsim/snode/basic1.sim +++ b/tests/script/tsim/snode/basic1.sim @@ -24,7 +24,7 @@ if $data00 != 1 then return -1 endi -if $data02 != LEADER then +if $data02 != leader then return -1 endi @@ -71,7 +71,7 @@ if $data00 != 1 then return -1 endi -if $data02 != LEADER then +if $data02 != leader then return -1 endi diff --git a/tests/script/tsim/stable/alter1.sim b/tests/script/tsim/stable/alter_comment.sim similarity index 99% rename from tests/script/tsim/stable/alter1.sim rename to tests/script/tsim/stable/alter_comment.sim index 1205f50f6ea144de6f5fae06ef7569a60b47e0cb..cfcbb9a1daa046c894bbfe47f4684ded5faf79a6 100644 --- a/tests/script/tsim/stable/alter1.sim +++ b/tests/script/tsim/stable/alter_comment.sim @@ -166,4 +166,5 @@ if $data[0][6] != abcde then return -1 endi +return system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/alter/count.sim b/tests/script/tsim/stable/alter_count.sim similarity index 96% rename from tests/script/general/alter/count.sim rename to tests/script/tsim/stable/alter_count.sim index fc936668b8ea08f9cd08874ad98668a4d8904315..e5af9a5735e6f7f9844d055be8d4c2892d6b2ed7 100644 --- a/tests/script/general/alter/count.sim +++ b/tests/script/tsim/stable/alter_count.sim @@ -1,13 +1,8 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 2 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4 print ========= start dnode1 as master system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect print ======== step1 @@ -141,10 +136,13 @@ endi print ============= step10 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sql connect + +sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from d1.tb; +sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from d1.tb; +sql use d1 sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from tb if $data00 != 24 then return -1 diff --git a/tests/script/general/alter/import.sim b/tests/script/tsim/stable/alter_import.sim similarity index 73% rename from tests/script/general/alter/import.sim rename to tests/script/tsim/stable/alter_import.sim index aef0a258b24563e915cd8aa3dd42f6623a29170a..cdd7b60e14fc5e8f46f3413e9037a95f534718e1 100644 --- a/tests/script/general/alter/import.sim +++ b/tests/script/tsim/stable/alter_import.sim @@ -1,13 +1,8 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 2 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4 print ========= start dnode1 as master system sh/exec.sh -n dnode1 -s start -sleep 2000 sql connect print ======== step1 @@ -34,14 +29,14 @@ if $data00 != 3 then endi print ========= step3 -sql import into tb values(now-23d, -23, 0) -sql import into tb values(now-21d, -21, 0) +sql insert into tb values(now-23d, -23, 0) +sql insert into tb values(now-21d, -21, 0) sql select count(b) from tb if $data00 != 5 then return -1 endi -sql import into tb values(now-29d, -29, 0) +sql insert into tb values(now-29d, -29, 0) sql select count(b) from tb if $data00 != 6 then return -1 diff --git a/tests/script/general/alter/insert1.sim b/tests/script/tsim/stable/alter_insert1.sim similarity index 99% rename from tests/script/general/alter/insert1.sim rename to tests/script/tsim/stable/alter_insert1.sim index 12ab09beb989dd963a9e8c9c3ff5926e78d8b0ac..82781f2fe5cadf0488c5107e9e54b06364629680 100644 --- a/tests/script/general/alter/insert1.sim +++ b/tests/script/tsim/stable/alter_insert1.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 2 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======== step1 diff --git a/tests/script/general/alter/insert2.sim b/tests/script/tsim/stable/alter_insert2.sim similarity index 98% rename from tests/script/general/alter/insert2.sim rename to tests/script/tsim/stable/alter_insert2.sim index dcd9f500304f906ddddb33bd1a04c5943c232d49..a30175f3980cc117ec052ebb13a2e0b31b2cb316 100644 --- a/tests/script/general/alter/insert2.sim +++ b/tests/script/tsim/stable/alter_insert2.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 2 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======== step1 diff --git a/tests/script/general/alter/metrics.sim b/tests/script/tsim/stable/alter_metrics.sim similarity index 96% rename from tests/script/general/alter/metrics.sim rename to tests/script/tsim/stable/alter_metrics.sim index fd0b210cd1b452b2a35ebcd9f74aec98c3817b03..f33246dfe2d14c092cb9483ce31c0788da9e5397 100644 --- a/tests/script/general/alter/metrics.sim +++ b/tests/script/tsim/stable/alter_metrics.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 2 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======== step1 @@ -351,7 +347,7 @@ endi if $data80 != h then return -1 endi -if $data81 != BINARY then +if $data81 != VARCHAR then return -1 endi if $data82 != 10 then @@ -367,9 +363,8 @@ endi print ======== step9 print ======== step10 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sql connect sql use d2 sql describe tb @@ -424,7 +419,7 @@ endi if $data80 != h then return -1 endi -if $data81 != BINARY then +if $data81 != VARCHAR then return -1 endi if $data82 != 10 then @@ -506,7 +501,7 @@ endi if $data70 != h then return -1 endi -if $data71 != BINARY then +if $data71 != VARCHAR then return -1 endi if $data72 != 10 then @@ -561,7 +556,7 @@ endi if $data60 != h then return -1 endi -if $data61 != BINARY then +if $data61 != VARCHAR then return -1 endi if $data62 != 10 then @@ -610,7 +605,7 @@ endi if $data50 != h then return -1 endi -if $data51 != BINARY then +if $data51 != VARCHAR then return -1 endi if $data52 != 10 then @@ -653,7 +648,7 @@ endi if $data40 != h then return -1 endi -if $data41 != BINARY then +if $data41 != VARCHAR then return -1 endi if $data42 != 10 then @@ -690,7 +685,7 @@ endi if $data30 != h then return -1 endi -if $data31 != BINARY then +if $data31 != VARCHAR then return -1 endi if $data32 != 10 then @@ -721,7 +716,7 @@ endi if $data20 != h then return -1 endi -if $data21 != BINARY then +if $data21 != VARCHAR then return -1 endi if $data22 != 10 then @@ -762,7 +757,7 @@ endi print ======= over sql drop database d2 sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/tsim/stable/column_add.sim b/tests/script/tsim/stable/column_add.sim index a5d9b48508baa78e7266a9af7d1473b192643041..db592e6c69ee2fc3111b19b2502d67960ee943cf 100644 --- a/tests/script/tsim/stable/column_add.sim +++ b/tests/script/tsim/stable/column_add.sim @@ -143,7 +143,7 @@ sql insert into db.ctb values(now+2s, 1, 2, 3, 4) sql select * from db.stb print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] -print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] if $rows != 3 then return -1 @@ -200,7 +200,6 @@ sql insert into db.ctb values(now+3s, 1, 2, 3, 4, 5) sql select * from db.stb print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] -print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] if $rows != 4 then diff --git a/tests/script/tsim/stable/column_drop.sim b/tests/script/tsim/stable/column_drop.sim index af84a3ecac28da9f6dbf41d08af707d1aa6226a4..3401465103762d523b8cb5f15585f9924db4abfa 100644 --- a/tests/script/tsim/stable/column_drop.sim +++ b/tests/script/tsim/stable/column_drop.sim @@ -129,7 +129,8 @@ endi print ========== step2 drop column c5 sql alter table db.stb drop column c5 -sql insert into db.ctb values(now+2s, 1, 2, 3, 4, 5) +sql_error insert into db.ctb values(now+2s, 1, 2, 3, 4, 5) +sql insert into db.ctb values(now+2s, 1, 2, 3, 4) sql insert into db.ctb values(now+3s, 1, 2, 3, 4) sql_error insert into db.ctb values(now+2s, 1, 2, 3, 4, 5) @@ -206,4 +207,4 @@ if $data[7][0] != t3 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stable/column_modify.sim b/tests/script/tsim/stable/column_modify.sim index 732e449c4aea74f5df310a9af71411e99eeb9f25..e2752ccf951cef30587aa1f604f92cbbaa265b85 100644 --- a/tests/script/tsim/stable/column_modify.sim +++ b/tests/script/tsim/stable/column_modify.sim @@ -47,7 +47,7 @@ endi print ========== step2 describe sql describe db.ctb -if $rows != 7 then +if $rows != 6 then return -1 endi if $data[0][0] != ts then @@ -75,4 +75,35 @@ if $data[5][0] != t3 then return -1 endi +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start + +sql connect + +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 2 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 1234 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[1][1] != 1 then + return -1 +endi +if $data[1][2] != 12345 then + return -1 +endi +if $data[1][3] != 101 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/disk.sim b/tests/script/tsim/stable/disk.sim index c1ced6ae1076b3b1cc5e8a79f31188c076a93f59..ff734b4234263ca71253dee97eaa0158fe5221c4 100644 --- a/tests/script/tsim/stable/disk.sim +++ b/tests/script/tsim/stable/disk.sim @@ -1,17 +1,9 @@ system sh/stop_dnodes.sh - - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======================== dnode1 start - $dbPrefix = d_db $tbPrefix = d_tb $mtPrefix = d_mt @@ -57,11 +49,9 @@ if $data00 != $totalNum then return -1 endi -sleep 1000 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 6000 +sql connect sql use $db sql show vgroups diff --git a/tests/script/tsim/stable/dnode3.sim b/tests/script/tsim/stable/dnode3.sim index 706c4aa499ce3cebaedcbb71c24a9473a9069c9a..03e8df26b7543e61f0e8e52a1fd5bd8ab9de5e0f 100644 --- a/tests/script/tsim/stable/dnode3.sim +++ b/tests/script/tsim/stable/dnode3.sim @@ -1,19 +1,9 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode2 -i 2 system sh/deploy.sh -n dnode3 -i 3 system sh/deploy.sh -n dnode4 -i 4 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode4 -c walLevel -v 1 -# system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -# system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -# system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -# system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start - sql connect sql create dnode $hostname PORT 7200 diff --git a/tests/script/tsim/stable/metrics.sim b/tests/script/tsim/stable/metrics.sim index e68d95511cfd3c4ea556e34ffed5111f05064405..c652670d7f4e904461adf33af8f1d10fc9e9e319 100644 --- a/tests/script/tsim/stable/metrics.sim +++ b/tests/script/tsim/stable/metrics.sim @@ -1,10 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start - -sleep 1000 sql connect $dbPrefix = m_me_db @@ -97,9 +93,6 @@ $i = 2 $tb = $tbPrefix . $i sql insert into $tb values (now + 1m , 1 ) -print sleep 2000 -sleep 2000 - print =============== step6 # sql select * from $mt diff --git a/tests/script/tsim/stable/refcount.sim b/tests/script/tsim/stable/refcount.sim index fffa6f75a4adfe2b52b1a7d1b587f6bf7a182ba4..d77c8e08900c1b0eeeee95bbfc4c6a4540558e6b 100644 --- a/tests/script/tsim/stable/refcount.sim +++ b/tests/script/tsim/stable/refcount.sim @@ -1,11 +1,6 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print =============== step1 diff --git a/tests/script/tsim/stable/show.sim b/tests/script/tsim/stable/show.sim index 823aefe9d86954dc8a3af85359ec02a475182aae..d3ab75adf5ac08dbd4c2a8a0870cfe4fbfd62a4d 100644 --- a/tests/script/tsim/stable/show.sim +++ b/tests/script/tsim/stable/show.sim @@ -1,14 +1,9 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======================== create stable - sql create database d1 sql use d1 diff --git a/tests/script/tsim/stable/tag_add.sim b/tests/script/tsim/stable/tag_add.sim new file mode 100644 index 0000000000000000000000000000000000000000..a7615df14c3fc51851feb19937c51cbead7c8ea2 --- /dev/null +++ b/tests/script/tsim/stable/tag_add.sim @@ -0,0 +1,193 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ========== prepare stb and ctb +sql create database db vgroups 1 +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd" +sql create table db.ctb using db.stb tags(101, "102") +sql insert into db.ctb values(now, 1, "2") + +sql show db.stables +if $rows != 1 then + return -1 +endi +if $data[0][0] != stb then + return -1 +endi +if $data[0][1] != db then + return -1 +endi +if $data[0][3] != 3 then + return -1 +endi +if $data[0][4] != 2 then + return -1 +endi +if $data[0][6] != abd then + return -1 +endi + +sql show db.tables +if $rows != 1 then + return -1 +endi +if $data[0][0] != ctb then + return -1 +endi +if $data[0][1] != db then + return -1 +endi +if $data[0][3] != 3 then + return -1 +endi +if $data[0][4] != stb then + return -1 +endi +if $data[0][6] != 2 then + return -1 +endi +if $data[0][9] != CHILD_TABLE then + return -1 +endi + +sql select * from db.stb +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 102 then + return -1 +endi + +sql_error alter table db.stb add tag ts int +sql_error alter table db.stb add tag t1 int +sql_error alter table db.stb add tag t2 int +sql_error alter table db.stb add tag c1 int +sql_error alter table db.stb add tag c2 int + +print ========== step1 add tag t3 +sql alter table db.stb add tag t3 int + +sql show db.stables +if $data[0][3] != 3 then + return -1 +endi + +sql show db.tables +if $data[0][3] != 3 then + return -1 +endi + +sql describe db.ctb +if $rows != 6 then + return -1 +endi +if $data[5][0] != t3 then + return -1 +endi +if $data[5][1] != INT then + return -1 +endi +if $data[5][2] != 4 then + return -1 +endi + +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 102 then + return -1 +endi +if $data[0][5] != NULL then + return -1 +endi + +print ========== step2 add tag t4 +sql alter table db.stb add tag t4 bigint +sql select * from db.stb +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 102 then + return -1 +endi +if $data[0][5] != NULL then + return -1 +endi +if $data[0][6] != NULL then + return -1 +endi + +sql_error create table db.ctb2 using db.stb tags(101, "102") +sql create table db.ctb2 using db.stb tags(101, "102", 103, 104) +sql insert into db.ctb2 values(now, 1, "2") + +sql select * from db.stb where tbname = 'ctb2'; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 102 then + return -1 +endi +if $data[0][5] != 103 then + return -1 +endi +if $data[0][6] != 104 then + return -1 +endi + +print ========== step3 describe +sql describe db.ctb +if $rows != 7 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/tag_drop.sim b/tests/script/tsim/stable/tag_drop.sim new file mode 100644 index 0000000000000000000000000000000000000000..50907be23efb005071820c8f1baa4ca58b0b727b --- /dev/null +++ b/tests/script/tsim/stable/tag_drop.sim @@ -0,0 +1,337 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ========== prepare stb and ctb +sql create database db vgroups 1 +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd" +sql create table db.ctb using db.stb tags(101, "102") +sql insert into db.ctb values(now, 1, "2") + +sql show db.stables +if $rows != 1 then + return -1 +endi +if $data[0][0] != stb then + return -1 +endi +if $data[0][1] != db then + return -1 +endi +if $data[0][3] != 3 then + return -1 +endi +if $data[0][4] != 2 then + return -1 +endi +if $data[0][6] != abd then + return -1 +endi + +sql show db.tables +if $rows != 1 then + return -1 +endi +if $data[0][0] != ctb then + return -1 +endi +if $data[0][1] != db then + return -1 +endi +if $data[0][3] != 3 then + return -1 +endi +if $data[0][4] != stb then + return -1 +endi +if $data[0][6] != 2 then + return -1 +endi +if $data[0][9] != CHILD_TABLE then + return -1 +endi + +sql select * from db.stb +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 102 then + return -1 +endi + +sql_error alter table db.stb drop tag ts int +sql_error alter table db.stb drop tag t3 int +sql_error alter table db.stb drop tag t4 int +sql_error alter table db.stb drop tag c1 int +sql_error alter table db.stb drop tag c2 int + +print ========== step1 drop tag t2 +sql alter table db.stb drop tag t2 + +sql show db.stables +if $data[0][4] != 1 then + return -1 +endi + +sql describe db.ctb +if $rows != 4 then + return -1 +endi +if $data[4][0] != null then + return -1 +endi + +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != null then + return -1 +endi + +print ========== step2 add tag t3 +sql alter table db.stb add tag t3 int + +sql show db.stables +if $data[0][4] != 2 then + return -1 +endi + +sql describe db.ctb +if $rows != 5 then + return -1 +endi +if $data[4][0] != t3 then + return -1 +endi +if $data[4][1] != INT then + return -1 +endi +if $data[4][2] != 4 then + return -1 +endi + +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != NULL then + return -1 +endi + +print ========== step3 add tag t4 +sql alter table db.stb add tag t4 bigint +sql select * from db.stb +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != NULL then + return -1 +endi +if $data[0][5] != NULL then + return -1 +endi +if $data[0][6] != null then + return -1 +endi + +sql_error create table db.ctb2 using db.stb tags(101, "102") +sql create table db.ctb2 using db.stb tags(201, 202, 203) +sql insert into db.ctb2 values(now, 1, "2") + +sql select * from db.stb where tbname = 'ctb2'; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 201 then + return -1 +endi +if $data[0][4] != 202 then + return -1 +endi +if $data[0][5] != 203 then + return -1 +endi + +print ========== step4 describe +sql describe db.ctb +if $rows != 6 then + return -1 +endi + +print ========== step5 add tag2 +sql alter table db.stb add tag t2 bigint +sql select * from db.stb where tbname = 'ctb2'; +sql select * from db.stb where tbname = 'ctb2'; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 201 then + return -1 +endi +if $data[0][4] != 202 then + return -1 +endi +if $data[0][5] != 203 then + return -1 +endi +if $data[0][6] != NULL then + return -1 +endi + +sql_error create table db.ctb2 using db.stb tags(101, "102") +sql_error create table db.ctb2 using db.stb tags(201, 202, 203) +sql create table db.ctb3 using db.stb tags(301, 302, 303, 304) +sql insert into db.ctb3 values(now, 1, "2") + +sql select * from db.stb where tbname = 'ctb3'; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 301 then + return -1 +endi +if $data[0][4] != 302 then + return -1 +endi +if $data[0][5] != 303 then + return -1 +endi +if $data[0][6] != 304 then + return -1 +endi + +print ========== step6 describe +sql describe db.ctb +if $rows != 7 then + return -1 +endi + +if $data[3][0] != t1 then + return -1 +endi +if $data[4][0] != t3 then + return -1 +endi +if $data[5][0] != t4 then + return -1 +endi +if $data[6][0] != t2 then + return -1 +endi +if $data[6][1] != BIGINT then + return -1 +endi + +print ========== step7 drop tag t1 +sql alter table db.stb drop tag t1 + +sql show db.stables +if $data[0][4] != 3 then + return -1 +endi + +sql describe db.ctb +if $rows != 6 then + return -1 +endi + +sql select * from db.stb where tbname = 'ctb3'; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 2 then + return -1 +endi +if $data[0][3] != 302 then + return -1 +endi +if $data[0][4] != 303 then + return -1 +endi +if $data[0][5] != 304 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/tag_filter.sim b/tests/script/tsim/stable/tag_filter.sim new file mode 100644 index 0000000000000000000000000000000000000000..c8edfb1ee3862046875c6f432be8602b43120a9a --- /dev/null +++ b/tests/script/tsim/stable/tag_filter.sim @@ -0,0 +1,59 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ========== prepare stb and ctb +sql create database db vgroups 1 +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd" + + +sql create table db.ctb1 using db.stb tags(1, "102") +sql insert into db.ctb1 values(now, 1, "2") + +sql create table db.ctb2 using db.stb tags(2, "102") +sql insert into db.ctb2 values(now, 2, "2") + +sql create table db.ctb3 using db.stb tags(3, "102") +sql insert into db.ctb3 values(now, 3, "2") + +sql create table db.ctb4 using db.stb tags(4, "102") +sql insert into db.ctb4 values(now, 4, "2") + +sql create table db.ctb5 using db.stb tags(5, "102") +sql insert into db.ctb5 values(now, 5, "2") + +sql create table db.ctb6 using db.stb tags(6, "102") +sql insert into db.ctb6 values(now, 6, "2") + +sql select * from db.stb where t1 = 1 +if $rows != 1 then + return -1 +endi + +sql select * from db.stb where t1 < 1 +if $rows != 0 then + return -=1 +endi + +sql select * from db.stb where t1 < 2 +if $rows != 1 then + return -1 +endi + +sql select * from db.stb where t1 <= 2 +if $rows != 2 then + return -1 +endi + +sql select * from db.stb where t1 >= 1 +if $rows != 6 then + return -1 +endi + +sql select * from db.stb where t1 > 1 +if $rows != 5 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stable/tag_modify.sim b/tests/script/tsim/stable/tag_modify.sim new file mode 100644 index 0000000000000000000000000000000000000000..62e4c7b28255ee085250cb4fc43612116fc50be0 --- /dev/null +++ b/tests/script/tsim/stable/tag_modify.sim @@ -0,0 +1,123 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ========== prepare stb and ctb +sql create database db vgroups 1 +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(4)) comment "abd" + +sql_error alter table db.stb MODIFY tag c2 binary(3) +sql_error alter table db.stb MODIFY tag c2 int +sql_error alter table db.stb MODIFY tag c1 int +sql_error alter table db.stb MODIFY tag ts int +sql_error alter table db.stb MODIFY tag t2 binary(3) +sql_error alter table db.stb MODIFY tag t2 int +sql_error alter table db.stb MODIFY tag t1 int +sql create table db.ctb using db.stb tags(101, "12345") +sql insert into db.ctb values(now, 1, "1234") + +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 1234 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 1234 then + return -1 +endi + +print ========== step1 modify tag +sql alter table db.stb MODIFY tag t2 binary(5) +sql select * from db.stb + +sql create table db.ctb2 using db.stb tags(101, "12345") +sql insert into db.ctb2 values(now, 1, "1234") + +sql select * from db.stb where tbname = 'ctb2'; +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 1234 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 12345 then + return -1 +endi + +print ========== step2 describe +sql describe db.ctb2 +if $rows != 5 then + return -1 +endi +if $data[0][0] != ts then + return -1 +endi +if $data[1][0] != c1 then + return -1 +endi +if $data[2][0] != c2 then + return -1 +endi +if $data[3][0] != t1 then + return -1 +endi +if $data[4][0] != t2 then + return -1 +endi +if $data[4][1] != VARCHAR then + return -1 +endi +if $data[4][2] != 5 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start + +sql connect +sql describe db.ctb2 +if $rows != 5 then + return -1 +endi +if $data[0][0] != ts then + return -1 +endi +if $data[1][0] != c1 then + return -1 +endi +if $data[2][0] != c2 then + return -1 +endi +if $data[3][0] != t1 then + return -1 +endi +if $data[4][0] != t2 then + return -1 +endi +if $data[4][1] != VARCHAR then + return -1 +endi +if $data[4][2] != 5 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/tag_rename.sim b/tests/script/tsim/stable/tag_rename.sim new file mode 100644 index 0000000000000000000000000000000000000000..2f67a3ab2c51d8c8499219ea8779b23797d9d0af --- /dev/null +++ b/tests/script/tsim/stable/tag_rename.sim @@ -0,0 +1,120 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ========== prepare stb and ctb +sql create database db vgroups 1 +sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(4)) comment "abd" + +sql_error alter table db.stb rename tag c2 c3 +sql_error alter table db.stb rename tag c2 c3 +sql_error alter table db.stb rename tag c1 c3 +sql_error alter table db.stb rename tag ts c3 +sql_error alter table db.stb rename tag t2 t1 +sql_error alter table db.stb rename tag t2 t2 +sql_error alter table db.stb rename tag t1 t2 +sql create table db.ctb using db.stb tags(101, "12345") +sql insert into db.ctb values(now, 1, "1234") + +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 1234 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 1234 then + return -1 +endi + +print ========== step1 rename tag +sql alter table db.stb rename tag t1 t3 +sql select * from db.stb +sql select * from db.stb + +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] + +if $rows != 1 then + return -1 +endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 1234 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[0][4] != 1234 then + return -1 +endi + +print ========== step2 describe +sql describe db.ctb +if $rows != 5 then + return -1 +endi +if $data[0][0] != ts then + return -1 +endi +if $data[1][0] != c1 then + return -1 +endi +if $data[2][0] != c2 then + return -1 +endi +if $data[3][0] != t3 then + return -1 +endi +if $data[4][0] != t2 then + return -1 +endi +if $data[4][1] != VARCHAR then + return -1 +endi +if $data[4][2] != 4 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start + +sql connect +sql describe db.ctb +if $rows != 5 then + return -1 +endi +if $data[0][0] != ts then + return -1 +endi +if $data[1][0] != c1 then + return -1 +endi +if $data[2][0] != c2 then + return -1 +endi +if $data[3][0] != t3 then + return -1 +endi +if $data[4][0] != t2 then + return -1 +endi +if $data[4][1] != VARCHAR then + return -1 +endi +if $data[4][2] != 4 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/values.sim b/tests/script/tsim/stable/values.sim index e5e3118e12634f41b0d124d3ba379b8f93df442f..88eca28a12c6a48c5c39178f194e8836864e71d8 100644 --- a/tests/script/tsim/stable/values.sim +++ b/tests/script/tsim/stable/values.sim @@ -1,16 +1,9 @@ system sh/stop_dnodes.sh - - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======================== dnode1 start - sql create database vdb0 sql create table vdb0.mt (ts timestamp, tbcol int) TAGS(tgcol int) diff --git a/tests/script/tsim/stable/vnode3.sim b/tests/script/tsim/stable/vnode3.sim index 97a8203883cc5f427ccc355cf5898b1e3ebe6cd2..186d0f5eea254aeb451f48c3cbf7d0d094723c09 100644 --- a/tests/script/tsim/stable/vnode3.sim +++ b/tests/script/tsim/stable/vnode3.sim @@ -1,16 +1,9 @@ system sh/stop_dnodes.sh - system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start - -sleep 2000 sql connect print ======================== dnode1 start - $dbPrefix = v3_db $tbPrefix = v3_tb $mtPrefix = v3_mt diff --git a/tests/script/tsim/stream/basic0.sim b/tests/script/tsim/stream/basic0.sim index 9edad991dc0ac5c5c960be026c1fd17073d17881..29775a5ef1d1daf90122f053da6c153bac843341 100644 --- a/tests/script/tsim/stream/basic0.sim +++ b/tests/script/tsim/stream/basic0.sim @@ -63,7 +63,8 @@ if $data02 != 234 then return -1 endi -if $data03 != 234 then +if $data03 != 234 then + print expect 234, actual $data03 return -1 endi diff --git a/tests/script/tsim/stream/session0.sim b/tests/script/tsim/stream/session0.sim index 46b343632abd0347502b86e0978f2afd22c139a8..41a8b3371002848dd6909ab1c681bde0628e6324 100644 --- a/tests/script/tsim/stream/session0.sim +++ b/tests/script/tsim/stream/session0.sim @@ -23,7 +23,7 @@ sql insert into t1 values(1648791223001,10,2,3,1.1,2); sql insert into t1 values(1648791233002,3,2,3,2.1,3); sql insert into t1 values(1648791243003,NULL,NULL,NULL,NULL,4); sql insert into t1 values(1648791213002,NULL,NULL,NULL,NULL,5) (1648791233012,NULL,NULL,NULL,NULL,6); - +sleep 300 sql select * from streamt order by s desc; # row 0 @@ -115,7 +115,7 @@ sql insert into t1 values(1648791233002,3,2,3,2.1,9); sql insert into t1 values(1648791243003,4,2,3,3.1,10); sql insert into t1 values(1648791213002,4,2,3,4.1,11) ; sql insert into t1 values(1648791213002,4,2,3,4.1,12) (1648791223009,4,2,3,4.1,13); - +sleep 300 sql select * from streamt order by s desc ; # row 0 diff --git a/tests/script/tsim/stream/session1.sim b/tests/script/tsim/stream/session1.sim index a44639ba7a5e17e51e6ac8190d991bfd2edf1a9e..fb31818f98138948ca91758e14de85146b9940d5 100644 --- a/tests/script/tsim/stream/session1.sim +++ b/tests/script/tsim/stream/session1.sim @@ -22,7 +22,7 @@ sql insert into t1 values(1648791210000,1,1,1,1.1,1); sql insert into t1 values(1648791220000,2,2,2,2.1,2); sql insert into t1 values(1648791230000,3,3,3,3.1,3); sql insert into t1 values(1648791240000,4,4,4,4.1,4); - +sleep 300 sql select * from streamt order by s desc; # row 0 @@ -50,7 +50,7 @@ sql insert into t1 values(1648791250005,5,5,5,5.1,5); sql insert into t1 values(1648791260006,6,6,6,6.1,6); sql insert into t1 values(1648791270007,7,7,7,7.1,7); sql insert into t1 values(1648791240005,5,5,5,5.1,8) (1648791250006,6,6,6,6.1,9); - +sleep 300 sql select * from streamt order by s desc; # row 0 @@ -100,7 +100,7 @@ sql insert into t1 values(1648791260007,7,7,7,7.1,12) (1648791290008,7,7,7,7.1,1 sql insert into t1 values(1648791500000,7,7,7,7.1,15) (1648791520000,8,8,8,8.1,16) (1648791540000,8,8,8,8.1,17); sql insert into t1 values(1648791530000,8,8,8,8.1,18); sql insert into t1 values(1648791220000,10,10,10,10.1,19) (1648791290008,2,2,2,2.1,20) (1648791540000,17,17,17,17.1,21) (1648791500001,22,22,22,22.1,22); - +sleep 300 sql select * from streamt order by s desc; # row 0 diff --git a/tests/script/tsim/stream/triggerInterval0.sim b/tests/script/tsim/stream/triggerInterval0.sim new file mode 100644 index 0000000000000000000000000000000000000000..756f591f3ff8a58586cc77ba5a95acc1f31d46b0 --- /dev/null +++ b/tests/script/tsim/stream/triggerInterval0.sim @@ -0,0 +1,97 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database test vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger window_close into streamt as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s); + +sql insert into t1 values(1648791213001,1,2,3,1.0); +sleep 300 +sql select * from streamt; +if $rows != 0 then + print ======$rows + return -1 +endi + +sql insert into t1 values(1648791223001,2,2,3,1.1); +sql insert into t1 values(1648791223002,2,2,3,1.1); +sql insert into t1 values(1648791223003,2,2,3,1.1); +sql insert into t1 values(1648791223001,2,2,3,1.1); +sleep 300 +sql select * from streamt; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 1 then + print ======$data01 + return -1 +endi + +sql insert into t1 values(1648791233001,2,2,3,1.1); +sleep 300 +sql select * from streamt; +if $rows != 2 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 3 then + print ======$data11 + return -1 +endi + +sql insert into t1 values(1648791223004,2,2,3,1.1); +sql insert into t1 values(1648791223004,2,2,3,1.1); +sql insert into t1 values(1648791223005,2,2,3,1.1); +sleep 300 +sql select * from streamt; +if $rows != 2 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 5 then + print ======$data11 + return -1 +endi + + +sql insert into t1 values(1648791233002,3,2,3,2.1); +sql insert into t1 values(1648791213002,4,2,3,3.1) +sql insert into t1 values(1648791213002,4,2,3,4.1); +sleep 300 +sql select * from streamt; +if $rows != 2 then + print ======$rows + return -1 +endi +if $data01 != 2 then + print ======$data01 + return -1 +endi +if $data11 != 5 then + print ======$data11 + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stream/triggerSession0.sim b/tests/script/tsim/stream/triggerSession0.sim new file mode 100644 index 0000000000000000000000000000000000000000..fb0666fdcfe847dd25a3e4eb3b66acd16ed09f63 --- /dev/null +++ b/tests/script/tsim/stream/triggerSession0.sim @@ -0,0 +1,105 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database test vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test +sql create table t2(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger window_close into streamt2 as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 session(ts, 10s); + +sql insert into t2 values(1648791213000,1,2,3,1.0); +sql insert into t2 values(1648791222999,1,2,3,1.0); +sql insert into t2 values(1648791223000,1,2,3,1.0); +sql insert into t2 values(1648791223001,1,2,3,1.0); +sql insert into t2 values(1648791233001,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 0 then + print ======$rows + return -1 +endi + +sql insert into t2 values(1648791243002,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 5 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791223001,1,2,3,1.0) (1648791223002,1,2,3,1.0) (1648791222999,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 6 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791233002,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 6 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791253003,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 8 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791243003,1,2,3,1.0) (1648791243002,1,2,3,1.0) (1648791270004,1,2,3,1.0) (1648791280005,1,2,3,1.0) (1648791290006,1,2,3,1.0); +sleep 500 +sql select * from streamt2; +if $rows != 3 then + print ======$rows + return -1 +endi + +if $data01 != 10 then + print ======$data01 + return -1 +endi +if $data11 != 1 then + print ======$data11 + return -1 +endi +if $data21 != 1 then + print ======$data21 + return -1 +endi + +#system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/sync/3Replica1VgElect.sim b/tests/script/tsim/sync/3Replica1VgElect.sim new file mode 100644 index 0000000000000000000000000000000000000000..e531fa82ad3d78d45447b583834c5b8842c171d1 --- /dev/null +++ b/tests/script/tsim/sync/3Replica1VgElect.sim @@ -0,0 +1,478 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 + +system sh/cfg.sh -n dnode1 -c supportVnodes -v 0 + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start + +$loop_cnt = 0 +check_dnode_ready: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][0] != 1 then + return -1 +endi +if $data[0][4] != ready then + goto check_dnode_ready +endi + +sql connect +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 +sql create dnode $hostname port 7400 + +$loop_cnt = 0 +check_dnode_ready_1: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 10 then + print ====> dnodes not ready! + return -1 +endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][4] != ready then + goto check_dnode_ready_1 +endi +if $data[1][4] != ready then + goto check_dnode_ready_1 +endi +if $data[2][4] != ready then + goto check_dnode_ready_1 +endi +if $data[3][4] != ready then + goto check_dnode_ready_1 +endi + +$replica = 3 +$vgroups = 1 + +print ============= create database +sql create database db replica $replica vgroups $vgroups + +$loop_cnt = 0 +check_db_ready: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 100 then + print ====> db not ready! + return -1 +endi +sql show databases +print ===> rows: $rows +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] $data[2][14] $data[2][15] $data[2][16] $data[2][17] $data[2][18] $data[2][19] +if $rows != 3 then + return -1 +endi +if $data[2][19] != ready then + goto check_db_ready +endi + +sql use db + +$loop_cnt = 0 +check_vg_ready: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] + +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == leader then + if $data[0][6] == follower then + if $data[0][8] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + endi + endi +elif $data[0][6] == leader then + if $data[0][4] == follower then + if $data[0][8] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + endi + endi +elif $data[0][8] == leader then + if $data[0][4] == follower then + if $data[0][6] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + endi + endi +else + goto check_vg_ready +endi + + +vg_ready: +print ====> create stable/child table +sql create table stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) + +sql show stables +if $rows != 1 then + return -1 +endi + +$ctbPrefix = ctb +$ntbPrefix = ntb +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +$totalTblNum = $tbNum * 2 +sleep 1000 +sql show tables +print ====> expect $totalTblNum and infinsert $rows in fact +if $rows != $totalTblNum then + return -1 +endi + +start_switch_leader: + +$switch_loop_cnt = 0 +sql show vgroups +$dnodeId = $data[0][3] +$dnodeId = dnode . $dnodeId + +switch_leader_to_offine_loop: + +print $dnodeId +print ====> stop $dnodeId +system sh/exec.sh -n $dnodeId -s stop -x SIGINT + + +$loop_cnt = 0 +$loop_cnt = $loop_cnt + 1 +sleep 201 +if $loop_cnt == 300 then + print ====> vgroups switch fail!!! + return -1 +endi +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] + +if $rows != $vgroups then + return -1 +endi + + +vg_offline_1: + +print ====> start $dnodeId +system sh/exec.sh -n $dnodeId -s start + +$switch_loop_cnt = $switch_loop_cnt + 1 +print $switch_loop_cnt + +if $switch_loop_cnt == 1 then + sql show vgroups + $dnodeId = $data[1][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 2 then + sql show vgroups + $dnodeId = $data[2][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 3 then + sql show vgroups + $dnodeId = $data[3][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 4 then + sql show vgroups + $dnodeId = $data[4][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +else + goto stop_leader_to_offine_loop +endi + +stop_leader_to_offine_loop: + +$loop_cnt = 0 +check_vg_ready1: +$loop_cnt = $loop_cnt + 1 +print $loop_cnt +sleep 202 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] + +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == leader then + if $data[0][6] == follower then + if $data[0][8] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + endi + endi +elif $data[0][6] == leader then + if $data[0][4] == follower then + if $data[0][8] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + endi + endi +elif $data[0][8] == leader then + if $data[0][4] == follower then + if $data[0][6] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + endi + endi +else + goto check_vg_ready1 +endi + + +print ====> final test: create stable/child table +sql create table stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) + + +sql show stables +if $rows != 2 then + return -1 +endi + +$ctbPrefix = ctb1 +$ntbPrefix = ntb1 +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb1 tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +sleep 1000 +sql show stables +if $rows != 2 then + return -1 +endi + +sql show tables +if $rows != 40 then + return -1 +endi + + + +system sh/deploy.sh -n dnode5 -i 5 +system sh/exec.sh -n dnode5 -s start + +sql connect +sql create dnode $hostname port 7500 + +$loop_cnt = 0 +check_dnode_ready3: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 100 then + print ====> dnode not ready! + return -1 + endi + +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +print ===> $rows $data[4][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] + +if $rows != 5 then + return -1 +endi + +if $data[4][4] != ready then + goto check_dnode_ready3 +endi + + + +# restart clusters + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT +system sh/exec.sh -n dnode5 -s stop -x SIGINT + + + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start +system sh/exec.sh -n dnode5 -s start + + +$loop_cnt = 0 +check_dnode_ready_2: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][0] != 1 then + return -1 +endi + +if $data[0][4] != ready then + goto check_dnode_ready_2 +endi +if $data[1][4] != ready then + goto check_dnode_ready_2 +endi +if $data[2][4] != ready then + goto check_dnode_ready_2 +endi +if $data[3][4] != ready then + goto check_dnode_ready_2 +endi + +sql use db; +$ctbPrefix = ctb2 +$ntbPrefix = ntb2 +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb1 tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +sleep 1000 +sql use db +sql show stables +if $rows != 2 then + return -1 +endi + +sql show tables +print $rows +if $rows != 60 then + return -1 +endi + + + +$replica = 3 +$vgroups = 5 + +print ============= create database +sql create database db1 replica $replica vgroups $vgroups + +$loop_cnt = 0 +check_db_ready1: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 100 then + print ====> db not ready! + return -1 +endi +sql show databases +print ===> rows: $rows +print $data(db1)[0] $data(db1)[1] $data(db1)[2] $data(db1)[3] $data(db1)[4] $data(db1)[5] $data(db1)[6] $data(db1)[7] $data(db1)[8] $data(db1)[9] $data(db1)[6] $data(db1)[11] $data(db1)[12] $data(db1)[13] $data(db1)[14] $data(db1)[15] $data(db1)[16] $data(db1)[17] $data(db1)[18] $data(db1)[19] +if $rows != 4 then + return -1 +endi +if $data(db1)[19] != ready then + goto check_db_ready1 +endi + + +sql use db1 + +$loop_cnt = 0 +check_vg_ready3: +$loop_cnt = $loop_cnt + 1 +print $loop_cnt +sleep 202 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == leader then + if $data[0][6] == follower then + if $data[0][8] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + endi + endi +elif $data[0][6] == leader then + if $data[0][4] == follower then + if $data[0][8] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + endi + endi +elif $data[0][8] == leader then + if $data[0][4] == follower then + if $data[0][6] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + endi + endi +else + goto check_vg_ready3 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT + + + diff --git a/tests/script/tsim/sync/3Replica5VgElect.sim b/tests/script/tsim/sync/3Replica5VgElect.sim new file mode 100644 index 0000000000000000000000000000000000000000..2e05e848be43671da4e981242eff6d03721e36b9 --- /dev/null +++ b/tests/script/tsim/sync/3Replica5VgElect.sim @@ -0,0 +1,755 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 + +system sh/cfg.sh -n dnode1 -c supportVnodes -v 0 + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start + +$loop_cnt = 0 +check_dnode_ready: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][0] != 1 then + return -1 +endi +if $data[0][4] != ready then + goto check_dnode_ready +endi + +sql connect +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 +sql create dnode $hostname port 7400 + +$loop_cnt = 0 +check_dnode_ready_1: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 10 then + print ====> dnodes not ready! + return -1 +endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][4] != ready then + goto check_dnode_ready_1 +endi +if $data[1][4] != ready then + goto check_dnode_ready_1 +endi +if $data[2][4] != ready then + goto check_dnode_ready_1 +endi +if $data[3][4] != ready then + goto check_dnode_ready_1 +endi + +$replica = 3 +$vgroups = 5 + +print ============= create database +sql create database db replica $replica vgroups $vgroups + +$loop_cnt = 0 +check_db_ready: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 100 then + print ====> db not ready! + return -1 +endi +sql show databases +print ===> rows: $rows +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] $data[2][14] $data[2][15] $data[2][16] $data[2][17] $data[2][18] $data[2][19] +if $rows != 3 then + return -1 +endi +if $data[2][19] != ready then + goto check_db_ready +endi + +sql use db + +$loop_cnt = 0 +check_vg_ready: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][10] $data[1][11] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][10] $data[2][11] +print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][10] $data[3][11] +print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][10] $data[4][11] +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == leader then + if $data[0][6] == follower then + if $data[0][8] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + endi + endi +elif $data[0][6] == leader then + if $data[0][4] == follower then + if $data[0][8] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + endi + endi +elif $data[0][8] == leader then + if $data[0][4] == follower then + if $data[0][6] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + endi + endi +else + goto check_vg_ready +endi + +if $data[1][4] == leader then + if $data[1][6] == follower then + if $data[1][8] == follower then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][3] + endi + endi +elif $data[1][6] == leader then + if $data[1][4] == follower then + if $data[1][8] == follower then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][5] + endi + endi +elif $data[1][8] == leader then + if $data[1][4] == follower then + if $data[1][6] == follower then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][7] + endi + endi +else + goto check_vg_ready +endi + +if $data[2][4] == leader then + if $data[2][6] == follower then + if $data[2][8] == follower then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][3] + endi + endi +elif $data[2][6] == leader then + if $data[2][4] == follower then + if $data[2][8] == follower then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][5] + endi + endi +elif $data[2][8] == leader then + if $data[2][4] == follower then + if $data[2][6] == follower then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][7] + endi + endi +else + goto check_vg_ready +endi + +if $data[3][4] == leader then + if $data[3][6] == follower then + if $data[3][8] == follower then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][3] + endi + endi +elif $data[3][6] == leader then + if $data[3][4] == follower then + if $data[3][8] == follower then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][5] + endi + endi +elif $data[3][8] == leader then + if $data[3][4] == follower then + if $data[3][6] == follower then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][7] + endi + endi +else + goto check_vg_ready +endi + +if $data[4][4] == leader then + if $data[4][6] == follower then + if $data[4][8] == follower then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][3] + endi + endi +elif $data[4][6] == leader then + if $data[4][4] == follower then + if $data[4][8] == follower then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][5] + endi + endi +elif $data[4][8] == leader then + if $data[4][4] == follower then + if $data[4][6] == follower then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][7] + endi + endi +else + goto check_vg_ready +endi + +vg_ready: +print ====> create stable/child table +sql create table stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) + +sql show stables +if $rows != 1 then + return -1 +endi + +$ctbPrefix = ctb +$ntbPrefix = ntb +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +$totalTblNum = $tbNum * 2 +sleep 1000 +sql show tables +print ====> expect $totalTblNum and infinsert $rows in fact +if $rows != $totalTblNum then + return -1 +endi + +start_switch_leader: + +$switch_loop_cnt = 0 +sql show vgroups +$dnodeId = $data[0][3] +$dnodeId = dnode . $dnodeId + +switch_leader_to_offine_loop: + +print $dnodeId +print ====> stop $dnodeId +system sh/exec.sh -n $dnodeId -s stop -x SIGINT + + +$loop_cnt = 0 +$loop_cnt = $loop_cnt + 1 +sleep 201 +if $loop_cnt == 300 then + print ====> vgroups switch fail!!! + return -1 +endi +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][10] $data[1][11] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][10] $data[2][11] +print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][10] $data[3][11] +print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][10] $data[4][11] +if $rows != $vgroups then + return -1 +endi + + +vg_offline_1: + +print ====> start $dnodeId +system sh/exec.sh -n $dnodeId -s start + +$switch_loop_cnt = $switch_loop_cnt + 1 +print $switch_loop_cnt + +if $switch_loop_cnt == 1 then + sql show vgroups + $dnodeId = $data[1][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 2 then + sql show vgroups + $dnodeId = $data[2][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 3 then + sql show vgroups + $dnodeId = $data[3][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 4 then + sql show vgroups + $dnodeId = $data[4][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +else + goto stop_leader_to_offine_loop +endi + +stop_leader_to_offine_loop: + +$loop_cnt = 0 +check_vg_ready1: +$loop_cnt = $loop_cnt + 1 +print $loop_cnt +sleep 202 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][10] $data[1][11] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][10] $data[2][11] +print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][10] $data[3][11] +print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][10] $data[4][11] +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == leader then + if $data[0][6] == follower then + if $data[0][8] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + endi + endi +elif $data[0][6] == leader then + if $data[0][4] == follower then + if $data[0][8] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + endi + endi +elif $data[0][8] == leader then + if $data[0][4] == follower then + if $data[0][6] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + endi + endi +else + goto check_vg_ready1 +endi + +if $data[1][4] == leader then + if $data[1][6] == follower then + if $data[1][8] == follower then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][3] + endi + endi +elif $data[1][6] == leader then + if $data[1][4] == follower then + if $data[1][8] == follower then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][5] + endi + endi +elif $data[1][8] == leader then + if $data[1][4] == follower then + if $data[1][6] == follower then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][7] + endi + endi +else + goto check_vg_ready1 +endi + +if $data[2][4] == leader then + if $data[2][6] == follower then + if $data[2][8] == follower then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][3] + endi + endi +elif $data[2][6] == leader then + if $data[2][4] == follower then + if $data[2][8] == follower then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][5] + endi + endi +elif $data[2][8] == leader then + if $data[2][4] == follower then + if $data[2][6] == follower then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][7] + endi + endi +else + goto check_vg_ready1 +endi + +if $data[3][4] == leader then + if $data[3][6] == follower then + if $data[3][8] == follower then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][3] + endi + endi +elif $data[3][6] == leader then + if $data[3][4] == follower then + if $data[3][8] == follower then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][5] + endi + endi +elif $data[3][8] == leader then + if $data[3][4] == follower then + if $data[3][6] == follower then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][7] + endi + endi +else + goto check_vg_ready1 +endi + +if $data[4][4] == leader then + if $data[4][6] == follower then + if $data[4][8] == follower then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][3] + endi + endi +elif $data[4][6] == leader then + if $data[4][4] == follower then + if $data[4][8] == follower then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][5] + endi + endi +elif $data[4][8] == leader then + if $data[4][4] == follower then + if $data[4][6] == follower then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][7] + endi + endi +else + goto check_vg_ready1 +endi + + +print ====> final test: create stable/child table +sql create table stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) + + +sql show stables +if $rows != 2 then + return -1 +endi + +$ctbPrefix = ctb1 +$ntbPrefix = ntb1 +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb1 tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +sleep 1000 +sql show stables +if $rows != 2 then + return -1 +endi + +sql show tables +if $rows != 40 then + return -1 +endi + + + +system sh/deploy.sh -n dnode5 -i 5 +system sh/exec.sh -n dnode5 -s start + +sql connect +sql create dnode $hostname port 7500 + +$loop_cnt = 0 +check_dnode_ready3: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 100 then + print ====> dnode not ready! + return -1 + endi + +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +print ===> $rows $data[4][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] + +if $rows != 5 then + return -1 +endi + +if $data[4][4] != ready then + goto check_dnode_ready3 +endi + + + +# restart clusters + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT +system sh/exec.sh -n dnode5 -s stop -x SIGINT + + + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start +system sh/exec.sh -n dnode5 -s start + + +$loop_cnt = 0 +check_dnode_ready_2: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][0] != 1 then + return -1 +endi + +if $data[0][4] != ready then + goto check_dnode_ready_2 +endi +if $data[1][4] != ready then + goto check_dnode_ready_2 +endi +if $data[2][4] != ready then + goto check_dnode_ready_2 +endi +if $data[3][4] != ready then + goto check_dnode_ready_2 +endi + +sql use db; +$ctbPrefix = ctb2 +$ntbPrefix = ntb2 +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb1 tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +sleep 1000 +sql use db +sql show stables +if $rows != 2 then + return -1 +endi + +sql show tables +print $rows +if $rows != 60 then + return -1 +endi + + + +$replica = 3 +$vgroups = 5 + +print ============= create database +sql create database db1 replica $replica vgroups $vgroups + +$loop_cnt = 0 +check_db_ready1: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 100 then + print ====> db not ready! + return -1 +endi +sql show databases +print ===> rows: $rows +print $data(db1)[0] $data(db1)[1] $data(db1)[2] $data(db1)[3] $data(db1)[4] $data(db1)[5] $data(db1)[6] $data(db1)[7] $data(db1)[8] $data(db1)[9] $data(db1)[6] $data(db1)[11] $data(db1)[12] $data(db1)[13] $data(db1)[14] $data(db1)[15] $data(db1)[16] $data(db1)[17] $data(db1)[18] $data(db1)[19] +if $rows != 4 then + return -1 +endi +if $data(db1)[19] != ready then + goto check_db_ready1 +endi + + +sql use db1 + +$loop_cnt = 0 +check_vg_ready3: +$loop_cnt = $loop_cnt + 1 +print $loop_cnt +sleep 202 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][10] $data[1][11] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][10] $data[2][11] +print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][10] $data[3][11] +print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][10] $data[4][11] +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == leader then + if $data[0][6] == follower then + if $data[0][8] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + endi + endi +elif $data[0][6] == leader then + if $data[0][4] == follower then + if $data[0][8] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + endi + endi +elif $data[0][8] == leader then + if $data[0][4] == follower then + if $data[0][6] == follower then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + endi + endi +else + goto check_vg_ready3 +endi + +if $data[1][4] == leader then + if $data[1][6] == follower then + if $data[1][8] == follower then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][3] + endi + endi +elif $data[1][6] == leader then + if $data[1][4] == follower then + if $data[1][8] == follower then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][5] + endi + endi +elif $data[1][8] == leader then + if $data[1][4] == follower then + if $data[1][6] == follower then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][7] + endi + endi +else + goto check_vg_ready3 +endi + +if $data[2][4] == leader then + if $data[2][6] == follower then + if $data[2][8] == follower then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][3] + endi + endi +elif $data[2][6] == leader then + if $data[2][4] == follower then + if $data[2][8] == follower then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][5] + endi + endi +elif $data[2][8] == leader then + if $data[2][4] == follower then + if $data[2][6] == follower then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][7] + endi + endi +else + goto check_vg_ready3 +endi + +if $data[3][4] == leader then + if $data[3][6] == follower then + if $data[3][8] == follower then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][3] + endi + endi +elif $data[3][6] == leader then + if $data[3][4] == follower then + if $data[3][8] == follower then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][5] + endi + endi +elif $data[3][8] == leader then + if $data[3][4] == follower then + if $data[3][6] == follower then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][7] + endi + endi +else + goto check_vg_ready3 +endi + +if $data[4][4] == leader then + if $data[4][6] == follower then + if $data[4][8] == follower then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][3] + endi + endi +elif $data[4][6] == leader then + if $data[4][4] == follower then + if $data[4][8] == follower then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][5] + endi + endi +elif $data[4][8] == leader then + if $data[4][4] == follower then + if $data[4][6] == follower then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][7] + endi + endi +else + goto check_vg_ready3 +endi + +# sql drop dnode 5 + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT + + diff --git a/tests/script/tsim/sync/oneReplica1VgElect.sim b/tests/script/tsim/sync/oneReplica1VgElect.sim index bb9b3f449640818d888137721350b0cea90eebae..cf8912e654c04314e96a5fb4a718a3569ddea5f5 100644 --- a/tests/script/tsim/sync/oneReplica1VgElect.sim +++ b/tests/script/tsim/sync/oneReplica1VgElect.sim @@ -31,7 +31,7 @@ if $data[0][4] != ready then goto check_dnode_ready endi -#sql connect +sql connect sql create dnode $hostname port 7200 sql create dnode $hostname port 7300 sql create dnode $hostname port 7400 @@ -66,144 +66,99 @@ $vgroups = 1 $replica = 1 print ============= create database -sql create database db replica $replica vgroups $vgroups +sql create database db1 replica $replica vgroups $vgroups $loop_cnt = 0 check_db_ready: $loop_cnt = $loop_cnt + 1 sleep 200 -if $loop_cnt == 10 then - print ====> db not ready! +if $loop_cnt == 100 then + print ====> db1 not ready! return -1 endi sql show databases print ===> rows: $rows -print $data(db)[0] $data(db)[1] $data(db)[2] $data(db)[3] $data(db)[4] $data(db)[5] $data(db)[6] $data(db)[7] $data(db)[8] $data(db)[9] $data(db)[10] $data(db)[11] $data(db)[12] +print $data(db1)[0] $data(db)[1] $data(db)[2] $data(db)[3] $data(db)[4] $data(db)[5] $data(db)[6] $data(db)[7] $data(db)[8] $data(db)[9] $data(db)[10] $data(db)[11] $data(db)[12] print $data(db)[13] $data(db)[14] $data(db)[15] $data(db)[16] $data(db)[17] $data(db)[18] $data(db)[19] $data(db)[20] if $rows != 3 then return -1 endi -if $data(db)[19] != ready then +if $data(db1)[19] != ready then goto check_db_ready endi -sql use db +sql use db1 $loop_cnt = 0 check_vg_ready: $loop_cnt = $loop_cnt + 1 sleep 200 -if $loop_cnt == 10 then +if $loop_cnt == 300 then print ====> vgroups not ready! return -1 endi sql show vgroups print ===> rows: $rows -print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] -print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[10][6] $data[0][11] $data[0][12] $data[0][13] +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] if $rows != $vgroups then return -1 endi -if $data[0][4] == LEADER then - if $data[0][6] != NULL then - goto check_vg_ready - endi - if $data[0][8] != NULL then - goto check_vg_ready - endi +if $data[0][4] == leader then print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] - goto vg_ready -endi -if $data[0][6] == LEADER then - if $data[0][4] != NULL then - goto check_vg_ready - endi - if $data[0][8] != NULL then - goto check_vg_ready - endi + goto vg_ready +elif $data[0][6] == leader then print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] - goto vg_ready -endi -if $data[0][8] == LEADER then - if $data[0][4] != NULL then - goto check_vg_ready - endi - if $data[0][6] != NULL then - goto check_vg_ready - endi + goto vg_ready +elif $data[0][8] == leader then print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] - goto vg_ready + goto vg_ready +else + goto check_vg_ready endi -vg_ready: -print ====> create stable/child table, insert data, and select -sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) +vg_ready: +print ====> create stable/child table +sql create table stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) sql show stables if $rows != 1 then return -1 endi + $ctbPrefix = ctb $ntbPrefix = ntb $tbNum = 10 -$rowNum = 10 -$tstart = 1640966400000 # 2022-01-01 00:00:00.000 - $i = 0 while $i < $tbNum $ctb = $ctbPrefix . $i sql create table $ctb using stb tags( $i ) $ntb = $ntbPrefix . $i sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) - - $x = 0 - while $x < $rowNum - $binary = ' . binary - $binary = $binary . $i - $binary = $binary . ' - - sql insert into $ctb values ($tstart , $i , $x , $binary ) - sql insert into $ntb values ($tstart , 999 , 999 , 'binary-ntb' ) - $tstart = $tstart + 1 - $x = $x + 1 - endw - - print ====> insert rows: $rowNum into $ctb and $ntb - $i = $i + 1 - $tstart = 1640966400000 endw $totalTblNum = $tbNum * 2 +sleep 1000 sql show tables +print ====> expect $totalTblNum and infinsert $rows in fact if $rows != $totalTblNum then return -1 endi -sql select count(*) from ntb0 -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $rowNum then - return -1 -endi +start_switch_leader: -$totalRowsOfStb = $rowNum * $tbNum -sql select count(*) from stb -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $totalRowsOfStb then - return -1 -endi +$switch_loop_cnt = 0 +switch_leader_to_offine_loop: print ====> finde vnode of leader, and stop the dnode where the vnode is located, and query stb/ntb count(*) sql show vgroups -print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] -if $data[0][4] == LEADER then +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] +if $data[0][4] == leader then $dnodeId = $data[0][3] -elif $data[0][6] == LEADER then +elif $data[0][6] == leader then $dnodeId = $data[0][5] -elif $data[0][8] == LEADER then +elif $data[0][8] == leader then $dnodeId = $data[0][7] else print ====> no leader vnode!!! @@ -213,148 +168,78 @@ endi $dnodeId = dnode . $dnodeId print ====> stop $dnodeId system sh/exec.sh -n $dnodeId -s stop -x SIGINT +#print ====> start $dnodeId +#system sh/exec.sh -n $dnodeId -s start $loop_cnt = 0 check_vg_ready_2: $loop_cnt = $loop_cnt + 1 sleep 200 -if $loop_cnt == 10 then +if $loop_cnt == 300 then print ====> vgroups switch fail!!! return -1 endi sql show vgroups print ===> rows: $rows -print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] -print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[10][6] $data[0][11] $data[0][12] $data[0][13] +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] if $rows != $vgroups then return -1 endi -if $data[0][4] == LEADER then - if $data[0][6] != NULL then - goto check_vg_ready_2 - endi - if $data[0][8] != NULL then - goto check_vg_ready_2 - endi - print ---- vgroup $data[0][0] leader switch to dnode $data[0][3] - goto vg_ready_2 -endi -if $data[0][6] == LEADER then - if $data[0][4] != NULL then - goto check_vg_ready_2 - endi - if $data[0][8] != NULL then - goto check_vg_ready_2 - endi - print ---- vgroup $data[0][0] leader switch to dnode $data[0][5] - goto vg_ready_2 -endi -if $data[0][8] == LEADER then - if $data[0][4] != NULL then - goto check_vg_ready_2 - endi - if $data[0][6] != NULL then - goto check_vg_ready_2 - endi - print ---- vgroup $data[0][0] leader switch to dnode $data[0][7] - goto vg_ready_2 -endi -vg_ready_2: -sql select count(*) from ntb0 -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $rowNum then - return -1 -endi - -sql select count(*) from ctb0 -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $rowNum then - return -1 -endi +if $data[0][4] == offline then + print ---- vgroup $dnodeId leader switch to offline + goto vg_offline_1 +elif $data[0][6] == offline then + print ---- vgroup $dnodeId leader switch to offline + goto vg_offline_1 +elif $data[0][8] == offline then + print ---- vgroup $dnodeId leader switch to offline + goto vg_offline_1 +else + goto check_vg_ready_2 +endi -sql select count(*) from stb -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $totalRowsOfStb then - return -1 -endi +vg_offline_1: -print ====> stop and start all dnode(not include the dnode where mnode is located), then query -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode2 -s start +print ====> start $dnodeId +system sh/exec.sh -n $dnodeId -s start -$loop_cnt = 0 -check_vg_ready_1: -$loop_cnt = $loop_cnt + 1 +$loop_cnt1= 0 +check_vg1_ready: +$loop_cnt1 = $loop_cnt1 + 1 sleep 200 -if $loop_cnt == 10 then - print ====> after restart dnode, vgroups not ready! +if $loop_cnt1 == 300 then + print ====> vgroups not ready! return -1 endi sql show vgroups print ===> rows: $rows -print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] -print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[10][6] $data[0][11] $data[0][12] $data[0][13] +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] if $rows != $vgroups then return -1 endi -if $data[0][4] == LEADER then - if $data[0][6] != NULL then - goto check_vg_ready_1 - endi - if $data[0][8] != NULL then - goto check_vg_ready_1 - endi - goto vg_ready_1 -endi -if $data[0][6] == LEADER then - if $data[0][4] != NULL then - goto check_vg_ready_1 - endi - if $data[0][8] != NULL then - goto check_vg_ready_1 - endi - goto vg_ready_1 -endi -if $data[0][8] == LEADER then - if $data[0][4] != NULL then - goto check_vg_ready_1 - endi - if $data[0][6] != NULL then - goto check_vg_ready_1 - endi - goto vg_ready_1 +if $data[0][4] == leader then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] + goto countinu_loop +elif $data[0][6] == leader then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] + goto countinu_loop +elif $data[0][8] == leader then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] + goto countinu_loop +else + goto check_vg1_ready endi -vg_ready_1: -print ====> after restart dnode2/dnode3/dnode4, query stb/ntb count(*) -sql select count(*) from ntb0 -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $rowNum then - return -1 -endi +countinu_loop: -sql select count(*) from ctb0 -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $rowNum then - return -1 +$switch_loop_cnt = $switch_loop_cnt + 1 +print $switch_loop_cnt +if $switch_loop_cnt < 4 then + goto switch_leader_to_offine_loop endi -sql select count(*) from stb -print rows: $rows -print $data[0][0] $data[0][1] -if $data[0][0] != $totalRowsOfStb then - return -1 -endi +stop_leader_to_offine_loop: system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT diff --git a/tests/script/tsim/sync/oneReplica1VgElectWithInsert.sim b/tests/script/tsim/sync/oneReplica1VgElectWithInsert.sim index 7ceeb2806b320014c2b35ea5c640063e44793063..06a67b3c1bfdf183f919fb7ac9c861055f566f42 100644 --- a/tests/script/tsim/sync/oneReplica1VgElectWithInsert.sim +++ b/tests/script/tsim/sync/oneReplica1VgElectWithInsert.sim @@ -104,7 +104,7 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d if $rows != $vgroups then return -1 endi -if $data[0][4] == LEADER then +if $data[0][4] == leader then if $data[0][6] != NULL then goto check_vg_ready endi @@ -114,7 +114,7 @@ if $data[0][4] == LEADER then print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] goto vg_ready endi -if $data[0][6] == LEADER then +if $data[0][6] == leader then if $data[0][4] != NULL then goto check_vg_ready endi @@ -124,7 +124,7 @@ if $data[0][6] == LEADER then print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] goto vg_ready endi -if $data[0][8] == LEADER then +if $data[0][8] == leader then if $data[0][4] != NULL then goto check_vg_ready endi @@ -208,11 +208,11 @@ switch_leader_loop: print ====> finde vnode of leader, and stop the dnode where the vnode is located, and query stb/ntb count(*) sql show vgroups print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] -if $data[0][4] == LEADER then +if $data[0][4] == leader then $dnodeId = $data[0][3] -elif $data[0][6] == LEADER then +elif $data[0][6] == leader then $dnodeId = $data[0][5] -elif $data[0][8] == LEADER then +elif $data[0][8] == leader then $dnodeId = $data[0][7] else print ====> no leader vnode!!! @@ -238,7 +238,7 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d if $rows != $vgroups then return -1 endi -if $data[0][4] == LEADER then +if $data[0][4] == leader then if $data[0][6] != NULL then goto check_vg_ready_2 endi @@ -248,7 +248,7 @@ if $data[0][4] == LEADER then print ---- vgroup $data[0][0] leader switch to dnode $data[0][3] goto vg_ready_2 endi -if $data[0][6] == LEADER then +if $data[0][6] == leader then if $data[0][4] != NULL then goto check_vg_ready_2 endi @@ -258,7 +258,7 @@ if $data[0][6] == LEADER then print ---- vgroup $data[0][0] leader switch to dnode $data[0][5] goto vg_ready_2 endi -if $data[0][8] == LEADER then +if $data[0][8] == leader then if $data[0][4] != NULL then goto check_vg_ready_2 endi @@ -343,7 +343,7 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d if $rows != $vgroups then return -1 endi -if $data[0][4] == LEADER then +if $data[0][4] == leader then if $data[0][6] != NULL then goto check_vg_ready_1 endi @@ -352,7 +352,7 @@ if $data[0][4] == LEADER then endi goto vg_ready_1 endi -if $data[0][6] == LEADER then +if $data[0][6] == leader then if $data[0][4] != NULL then goto check_vg_ready_1 endi @@ -361,7 +361,7 @@ if $data[0][6] == LEADER then endi goto vg_ready_1 endi -if $data[0][8] == LEADER then +if $data[0][8] == leader then if $data[0][4] != NULL then goto check_vg_ready_1 endi diff --git a/tests/script/tsim/sync/oneReplica5VgElect.sim b/tests/script/tsim/sync/oneReplica5VgElect.sim new file mode 100644 index 0000000000000000000000000000000000000000..5af48c7491208c8f3a440665fa7bb6919c373a46 --- /dev/null +++ b/tests/script/tsim/sync/oneReplica5VgElect.sim @@ -0,0 +1,417 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 + +system sh/cfg.sh -n dnode1 -c supportVnodes -v 0 + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start + +$loop_cnt = 0 +check_dnode_ready: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][0] != 1 then + return -1 +endi +if $data[0][4] != ready then + goto check_dnode_ready +endi + +sql connect +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 +sql create dnode $hostname port 7400 + +$loop_cnt = 0 +check_dnode_ready_1: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 10 then + print ====> dnodes not ready! + return -1 +endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][4] != ready then + goto check_dnode_ready_1 +endi +if $data[1][4] != ready then + goto check_dnode_ready_1 +endi +if $data[2][4] != ready then + goto check_dnode_ready_1 +endi +if $data[3][4] != ready then + goto check_dnode_ready_1 +endi + +$replica = 1 +$vgroups = 5 + +print ============= create database +sql create database db1 replica $replica vgroups $vgroups + +$loop_cnt = 0 +check_db_ready: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 100 then + print ====> db1 not ready! + return -1 +endi +sql show databases +print ===> rows: $rows +print $data(db1)[0] $data(db)[1] $data(db)[2] $data(db)[3] $data(db)[4] $data(db)[5] $data(db)[6] $data(db)[7] $data(db)[8] $data(db)[9] $data(db)[10] $data(db)[11] $data(db)[12] +if $rows != 3 then + return -1 +endi +if $data(db1)[19] != ready then + goto check_db_ready +endi + +sql use db1 + +$loop_cnt = 0 +check_vg_ready: +$loop_cnt = $loop_cnt + 1 +sleep 200 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == leader then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] +elif $data[0][6] == leader then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] +elif $data[0][8] == leader then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] +else + goto check_vg_ready +endi + +if $data[1][4] == leader then + print ---- vgroup $data[1][0] leader locate on dnode $data[0][3] +elif $data[1][6] == leader then + print ---- vgroup $data[1][0] leader locate on dnode $data[0][5] +elif $data[1][8] == leader then + print ---- vgroup $data[1][0] leader locate on dnode $data[0][7] +else + goto check_vg_ready +endi + +if $data[2][4] == leader then + print ---- vgroup $data[2][0] leader locate on dnode $data[0][3] +elif $data[2][6] == leader then + print ---- vgroup $data[2][0] leader locate on dnode $data[0][5] +elif $data[2][8] == leader then + print ---- vgroup $data[2][0] leader locate on dnode $data[0][7] +else + goto check_vg_ready +endi + +if $data[3][4] == leader then + print ---- vgroup $data[3][0] leader locate on dnode $data[0][3] +elif $data[3][6] == leader then + print ---- vgroup $data[3][0] leader locate on dnode $data[0][5] +elif $data[3][8] == leader then + print ---- vgroup $data[3][0] leader locate on dnode $data[0][7] +else + goto check_vg_ready +endi + +if $data[4][4] == leader then + print ---- vgroup $data[4][0] leader locate on dnode $data[0][3] +elif $data[4][6] == leader then + print ---- vgroup $data[4][0] leader locate on dnode $data[0][5] +elif $data[4][8] == leader then + print ---- vgroup $data[4][0] leader locate on dnode $data[0][7] +else + goto check_vg_ready +endi + +vg_ready: +print ====> create stable/child table +sql create table stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) + +sql show stables +if $rows != 1 then + return -1 +endi + +$ctbPrefix = ctb +$ntbPrefix = ntb +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + +$totalTblNum = $tbNum * 2 +sleep 1000 +sql show tables +print ====> expect $totalTblNum and infinsert $rows in fact +if $rows != $totalTblNum then + return -1 +endi + +start_switch_leader: + +$switch_loop_cnt = 0 +sql show vgroups +$dnodeId = $data[0][3] +$dnodeId = dnode . $dnodeId + +switch_leader_to_offine_loop: + +print $dnodeId +print ====> stop $dnodeId +system sh/exec.sh -n $dnodeId -s stop -x SIGINT + + +$loop_cnt = 0 +check_vg_ready_2: +$loop_cnt = $loop_cnt + 1 +sleep 201 +if $loop_cnt == 300 then + print ====> vgroups switch fail!!! + return -1 +endi +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][6] $data[1][11] $data[1][12] $data[1][13] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] +print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][6] $data[3][11] $data[3][12] $data[3][13] +print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][6] $data[4][11] $data[4][12] $data[4][13] +if $rows != $vgroups then + return -1 +endi + + +vg_offline_1: + +print ====> start $dnodeId +system sh/exec.sh -n $dnodeId -s start + +$switch_loop_cnt = $switch_loop_cnt + 1 +print $switch_loop_cnt + +if $switch_loop_cnt == 1 then + sql show vgroups + $dnodeId = $data[1][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 2 then + sql show vgroups + $dnodeId = $data[2][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 3 then + sql show vgroups + $dnodeId = $data[3][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +elif $switch_loop_cnt == 4 then + sql show vgroups + $dnodeId = $data[4][3] + $dnodeId = dnode . $dnodeId + goto switch_leader_to_offine_loop +else + goto stop_leader_to_offine_loop +endi + +stop_leader_to_offine_loop: + +$loop_cnt = 0 +check_vg_ready1: +$loop_cnt = $loop_cnt + 1 +print $loop_cnt +sleep 202 +if $loop_cnt == 300 then + print ====> vgroups not ready! + return -1 +endi + +sql show vgroups +print ===> rows: $rows +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][6] $data[0][11] $data[0][12] $data[0][13] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] $data[1][7] $data[1][8] $data[1][9] $data[1][6] $data[1][11] $data[1][12] $data[1][13] +print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] +print $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] $data[3][7] $data[3][8] $data[3][9] $data[3][6] $data[3][11] $data[3][12] $data[3][13] +print $data[4][0] $data[4][1] $data[4][2] $data[4][3] $data[4][4] $data[4][5] $data[4][6] $data[4][7] $data[4][8] $data[4][9] $data[4][6] $data[4][11] $data[4][12] $data[4][13] +if $rows != $vgroups then + return -1 +endi + +if $data[0][4] == leader then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] +elif $data[0][6] == leader then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] +elif $data[0][8] == leader then + print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] +else + goto check_vg_ready1 +endi + +if $data[1][4] == leader then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][3] +elif $data[1][6] == leader then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][5] +elif $data[1][8] == leader then + print ---- vgroup $data[1][0] leader locate on dnode $data[1][7] +else + goto check_vg_ready1 +endi + +if $data[2][4] == leader then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][3] +elif $data[2][6] == leader then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][5] +elif $data[2][8] == leader then + print ---- vgroup $data[2][0] leader locate on dnode $data[2][7] +else + goto check_vg_ready +endi + +if $data[3][4] == leader then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][3] +elif $data[3][6] == leader then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][5] +elif $data[3][8] == leader then + print ---- vgroup $data[3][0] leader locate on dnode $data[3][7] +else + goto check_vg_ready1 +endi + +if $data[4][4] == leader then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][3] +elif $data[4][6] == leader then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][5] +elif $data[4][8] == leader then + print ---- vgroup $data[4][0] leader locate on dnode $data[4][7] +else + goto check_vg_ready1 +endi + + +print ====> final test: create stable/child table +sql create table stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) + + +sql show stables +if $rows != 2 then + return -1 +endi + +$ctbPrefix = ctb1 +$ntbPrefix = ntb1 +$tbNum = 10 +$i = 0 +while $i < $tbNum + $ctb = $ctbPrefix . $i + sql create table $ctb using stb1 tags( $i ) + $ntb = $ntbPrefix . $i + sql create table $ntb (ts timestamp, c1 int, c2 float, c3 binary(10)) + $i = $i + 1 +endw + + +sql show stables +if $rows != 2 then + return -1 +endi + +sql show tables +if $rows != 40 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT + + + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start + + + +$loop_cnt = 0 +check_dnode_ready_2: + $loop_cnt = $loop_cnt + 1 + sleep 200 + if $loop_cnt == 10 then + print ====> dnode not ready! + return -1 + endi +sql show dnodes +print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] +print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] +print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6] +if $data[0][0] != 1 then + return -1 +endi + +if $data[0][4] != ready then + goto check_dnode_ready_2 +endi +if $data[1][4] != ready then + goto check_dnode_ready_2 +endi +if $data[2][4] != ready then + goto check_dnode_ready_2 +endi +if $data[3][4] != ready then + goto check_dnode_ready_2 +endi + +sql use db1 +sql show stables +if $rows != 2 then + return -1 +endi + +sql show tables +if $rows != 40 then + return -1 +endi + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT diff --git a/tests/script/tsim/sync/threeReplica1VgElect.sim b/tests/script/tsim/sync/threeReplica1VgElect.sim index 1496d7c778b475895479eb3661fae7ad86a121d3..c3e9c13793466ecdd57890d7e48a71f5b04ca190 100644 --- a/tests/script/tsim/sync/threeReplica1VgElect.sim +++ b/tests/script/tsim/sync/threeReplica1VgElect.sim @@ -104,7 +104,7 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d if $rows != $vgroups then return -1 endi -if $data[0][4] == LEADER then +if $data[0][4] == leader then if $data[0][6] != FLLOWER then goto check_vg_ready endi @@ -114,7 +114,7 @@ if $data[0][4] == LEADER then print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] goto vg_ready endi -if $data[0][6] == LEADER then +if $data[0][6] == leader then if $data[0][4] != FLLOWER then goto check_vg_ready endi @@ -124,7 +124,7 @@ if $data[0][6] == LEADER then print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] goto vg_ready endi -if $data[0][8] == LEADER then +if $data[0][8] == leader then if $data[0][4] != FLLOWER then goto check_vg_ready endi @@ -199,11 +199,11 @@ endi print ====> finde vnode of leader, and stop the dnode where the vnode is located, and query stb/ntb count(*) sql show vgroups print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] -if $data[0][4] == LEADER then +if $data[0][4] == leader then $dnodeId = $data[0][3] -elif $data[0][6] == LEADER then +elif $data[0][6] == leader then $dnodeId = $data[0][5] -elif $data[0][8] == LEADER then +elif $data[0][8] == leader then $dnodeId = $data[0][7] else print ====> no leader vnode!!! @@ -216,11 +216,11 @@ system sh/exec.sh -n $dnodeId -s stop -x SIGINT sql show vgroups print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] -if $data[0][4] == LEADER then +if $data[0][4] == leader then print ---- vgroup $data[0][0] leader switch to dnode $data[0][3] -elif $data[0][6] == LEADER then +elif $data[0][6] == leader then print ---- vgroup $data[0][0] leader switch to dnode $data[0][5] -elif $data[0][8] == LEADER then +elif $data[0][8] == leader then print ---- vgroup $data[0][0] leader switch to dnode $data[0][7] else print ====> no leader vnode!!! @@ -264,7 +264,7 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d if $rows != $vgroups then return -1 endi -if $data[0][4] == LEADER then +if $data[0][4] == leader then if $data[0][6] != FLLOWER then goto check_vg_ready_1 endi @@ -273,7 +273,7 @@ if $data[0][4] == LEADER then endi goto vg_ready_1 endi -if $data[0][6] == LEADER then +if $data[0][6] == leader then if $data[0][4] != FLLOWER then goto check_vg_ready_1 endi @@ -282,7 +282,7 @@ if $data[0][6] == LEADER then endi goto vg_ready_1 endi -if $data[0][8] == LEADER then +if $data[0][8] == leader then if $data[0][4] != FLLOWER then goto check_vg_ready_1 endi @@ -325,27 +325,27 @@ system sh/exec.sh -n $dnodeId -s stop -x SIGINT check_vg_ready_3: sql show vgroups print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] -if $data[0][4] == LEADER then - if $data[0][6] == LEADER then +if $data[0][4] == leader then + if $data[0][6] == leader then goto check_vg_ready_3 endi - if $data[0][8] == LEADER then + if $data[0][8] == leader then goto check_vg_ready_3 endi print ---- vgroup $data[0][0] leader locating dnode $data[0][5] -elif $data[0][6] == LEADER then - if $data[0][4] == LEADER then +elif $data[0][6] == leader then + if $data[0][4] == leader then goto check_vg_ready_3 endi - if $data[0][8] == LEADER then + if $data[0][8] == leader then goto check_vg_ready_3 endi print ---- vgroup $data[0][0] leader locating dnode $data[0][7] -elif $data[0][8] == LEADER then - if $data[0][4] == LEADER then +elif $data[0][8] == leader then + if $data[0][4] == leader then goto check_vg_ready_3 endi - if $data[0][6] == LEADER then + if $data[0][6] == leader then goto check_vg_ready_3 endi print ---- vgroup $data[0][0] leader locating dnode $data[0][9] diff --git a/tests/script/tsim/sync/threeReplica1VgElectWihtInsert.sim b/tests/script/tsim/sync/threeReplica1VgElectWihtInsert.sim index fc501096e687c0b7681bbf9e7fcad706f7aafced..3c21dff1b65a9737d96f9e0d9ae1b8c173fa3250 100644 --- a/tests/script/tsim/sync/threeReplica1VgElectWihtInsert.sim +++ b/tests/script/tsim/sync/threeReplica1VgElectWihtInsert.sim @@ -103,29 +103,29 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d if $rows != $vgroups then return -1 endi -if $data[0][4] == LEADER then - if $data[0][6] != FOLLOWER then +if $data[0][4] == leader then + if $data[0][6] != follower then goto check_vg_ready endi - if $data[0][8] != FOLLOWER then + if $data[0][8] != follower then goto check_vg_ready endi print ---- vgroup $data[0][0] leader locate on dnode $data[0][3] goto vg_ready -elif $data[0][6] == LEADER then - if $data[0][4] != FOLLOWER then +elif $data[0][6] == leader then + if $data[0][4] != follower then goto check_vg_ready endi - if $data[0][8] != FOLLOWER then + if $data[0][8] != follower then goto check_vg_ready endi print ---- vgroup $data[0][0] leader locate on dnode $data[0][5] goto vg_ready -elif $data[0][8] == LEADER then - if $data[0][4] != FOLLOWER then +elif $data[0][8] == leader then + if $data[0][4] != follower then goto check_vg_ready endi - if $data[0][6] != FOLLOWER then + if $data[0][6] != follower then goto check_vg_ready endi print ---- vgroup $data[0][0] leader locate on dnode $data[0][7] @@ -193,11 +193,11 @@ switch_leader_loop: print ====> finde vnode of leader, and stop the dnode where the vnode is located, and query stb/ntb count(*) sql show vgroups print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] -if $data[0][4] == LEADER then +if $data[0][4] == leader then $dnodeId = $data[0][3] -elif $data[0][6] == LEADER then +elif $data[0][6] == leader then $dnodeId = $data[0][5] -elif $data[0][8] == LEADER then +elif $data[0][8] == leader then $dnodeId = $data[0][7] else print ====> no leader vnode!!! @@ -226,29 +226,29 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d if $rows != $vgroups then return -1 endi -if $data[0][4] == LEADER then - if $data[0][6] != FOLLOWER then +if $data[0][4] == leader then + if $data[0][6] != follower then goto check_vg_ready_2 endi - if $data[0][8] != FOLLOWER then + if $data[0][8] != follower then goto check_vg_ready_2 endi print ---- vgroup $dnodeId leader switch to dnode $data[0][3] goto vg_ready_2 -elif $data[0][6] == LEADER then - if $data[0][4] != FOLLOWER then +elif $data[0][6] == leader then + if $data[0][4] != follower then goto check_vg_ready_2 endi - if $data[0][8] != FOLLOWER then + if $data[0][8] != follower then goto check_vg_ready_2 endi print ---- vgroup $dnodeId leader switch to dnode $data[0][5] goto vg_ready_2 -elif $data[0][8] == LEADER then - if $data[0][4] != FOLLOWER then +elif $data[0][8] == leader then + if $data[0][4] != follower then goto check_vg_ready_2 endi - if $data[0][6] != FOLLOWER then + if $data[0][6] != follower then goto check_vg_ready_2 endi print ---- vgroup $dnodeId leader switch to dnode $data[0][7] @@ -329,29 +329,29 @@ print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $d if $rows != $vgroups then return -1 endi -if $data[0][4] == LEADER then - if $data[0][6] != FOLLOWER then +if $data[0][4] == leader then + if $data[0][6] != follower then goto check_vg_ready_1 endi - if $data[0][8] != FOLLOWER then + if $data[0][8] != follower then goto check_vg_ready_1 endi goto vg_ready_1 endi -if $data[0][6] == LEADER then - if $data[0][4] != FOLLOWER then +if $data[0][6] == leader then + if $data[0][4] != follower then goto check_vg_ready_1 endi - if $data[0][8] != FOLLOWER then + if $data[0][8] != follower then goto check_vg_ready_1 endi goto vg_ready_1 endi -if $data[0][8] == LEADER then - if $data[0][4] != FOLLOWER then +if $data[0][8] == leader then + if $data[0][4] != follower then goto check_vg_ready_1 endi - if $data[0][6] != FOLLOWER then + if $data[0][6] != follower then goto check_vg_ready_1 endi goto vg_ready_1 @@ -390,27 +390,27 @@ system sh/exec.sh -n $dnodeId -s stop -x SIGINT check_vg_ready_3: sql show vgroups print $data(2)[0] $data(2)[1] $data(2)[2] $data(2)[3] $data(2)[4] $data(2)[5] $data(2)[6] $data(2)[7] $data(2)[8] $data(2)[9] $data(2)[10] $data(2)[11] $data(2)[12] $data(2)[13] -if $data[0][4] == LEADER then - if $data[0][6] == LEADER then +if $data[0][4] == leader then + if $data[0][6] == leader then goto check_vg_ready_3 endi - if $data[0][8] == LEADER then + if $data[0][8] == leader then goto check_vg_ready_3 endi print ---- vgroup $data[0][0] leader locating dnode $data[0][5] -elif $data[0][6] == LEADER then - if $data[0][4] == LEADER then +elif $data[0][6] == leader then + if $data[0][4] == leader then goto check_vg_ready_3 endi - if $data[0][8] == LEADER then + if $data[0][8] == leader then goto check_vg_ready_3 endi print ---- vgroup $data[0][0] leader locating dnode $data[0][7] -elif $data[0][8] == LEADER then - if $data[0][4] == LEADER then +elif $data[0][8] == leader then + if $data[0][4] == leader then goto check_vg_ready_3 endi - if $data[0][6] == LEADER then + if $data[0][6] == leader then goto check_vg_ready_3 endi print ---- vgroup $data[0][0] leader locating dnode $data[0][9] diff --git a/tests/script/tsim/testsuit.sim b/tests/script/tsim/testsuit.sim index e32abe4b7ff8850f9818113bed5f006c2182392e..0b1f0df04e9db6af2547cc1da49873082b2682b3 100644 --- a/tests/script/tsim/testsuit.sim +++ b/tests/script/tsim/testsuit.sim @@ -77,3 +77,4 @@ run sma/tsmaCreateInsertData.sim run sma/rsmaCreateInsertQuery.sim run valgrind/checkError.sim run bnode/basic1.sim + diff --git a/tests/script/tsim/trans/create_db.sim b/tests/script/tsim/trans/create_db.sim index ae6b7eab160f788db5a1d7fa8f47ed4ffda6e8c8..e13014f9c02370f65cf1e1700b84efdc4bcdcce2 100644 --- a/tests/script/tsim/trans/create_db.sim +++ b/tests/script/tsim/trans/create_db.sim @@ -26,7 +26,7 @@ if $data00 != 1 then return -1 endi -if $data02 != LEADER then +if $data02 != leader then return -1 endi @@ -76,14 +76,6 @@ if $data[0][3] != d1 then return -1 endi -if $data[0][4] != create-db then - return -1 -endi - -if $data[0][7] != @Unable to establish connection@ then - return -1 -endi - sql_error create database d1 vgroups 2; print =============== start dnode2 @@ -125,15 +117,7 @@ endi if $data[0][3] != d2 then return -1 endi - -if $data[0][4] != create-db then - return -1 -endi - -if $data[0][7] != @Unable to establish connection@ then - return -1 -endi - +return sql_error create database d2 vgroups 2; print =============== kill transaction diff --git a/tests/script/tsim/valgrind/checkError.sim b/tests/script/tsim/valgrind/checkError.sim index 97d16dba9663a77fdf96fe1741d045765a306d42..5790437a671e61dedb90b3384de08b145f2a4cac 100644 --- a/tests/script/tsim/valgrind/checkError.sim +++ b/tests/script/tsim/valgrind/checkError.sim @@ -71,7 +71,7 @@ print ====> start to check if there are ERRORS in vagrind log file for each dnod # -n : dnode[x] be check system_content sh/checkValgrind.sh -n dnode1 print cmd return result----> [ $system_content ] -if $system_content <= 1 then +if $system_content <= 3 then return 0 endi diff --git a/tests/script/unique/account/account_create.sim b/tests/script/unique/account/account_create.sim deleted file mode 100644 index e36de29e7c5835ddc78a9f3eab4b2b4d34634c42..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/account_create.sim +++ /dev/null @@ -1,80 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============================ dnode1 start - -$i = 0 -$dbPrefix = acdb -$tbPrefix = actb -$db = $dbPrefix . $i -$tb = $tbPrefix . $i -$accountPrefix = acac - -print =============== step1-4 -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 3 then - return -1 -endi - -$i = 0 -$acc = $accountPrefix . $i -sql_error create account $acc PASS pass123 -sql create account $acc PASS 'pass123' -#sql create account $acc PASS 'pass123' -x step1 -# return -1 -#step1: -sql create user $acc PASS 'pass123' -x step2 - return -1 -step2: - -sql show accounts -if $rows != 2 then - return -1 -endi - -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step5-6 -sql drop account $acc -sql drop account $acc -x step5 - return -1 -step5: -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step7 -sql create account $acc PASS 'pass123' -#sql create account $acc PASS 'pass123' -x step7 -# return -1 -#step7: - -sql show accounts -if $rows != 2 then - return -1 -endi - -sql drop account $acc -sql show accounts -if $rows != 1 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/account_delete.sim b/tests/script/unique/account/account_delete.sim deleted file mode 100644 index d99a8b559dc6e04e4d6996e042d915671781d699..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/account_delete.sim +++ /dev/null @@ -1,99 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============= step1 -sql create account oroot pass 'taosdata' -sql close -sql connect oroot -sleep 2000 - -print ============= step2 -sql create user read pass 'taosdata' -sql create user write pass 'taosdata' - -sql create database d1 -sql create database d2 -sql create table d1.t1 (ts timestamp, i int) -sql create table d2.t2 (ts timestamp, i int) -sql insert into d1.t1 values(now, 1) -sql insert into d2.t2 values(now, 1) -sql insert into d2.t2 values(now+1s, 2) - -sql show databases -if $rows != 2 then - return -1 -endi -sql show users -if $rows != 4 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -if $rows != 2 then - return -1 -endi - -print ============= step3 -sql close -sql connect -sleep 2000 - -sql show databases -if $rows != 0 then - return -1 -endi -sql show dnodes -print $data00 $data01 $data02 $data03 -if $data02 != 2 then - return -1 -endi -sql drop account oroot - -print ============= step4 -$x = 0 -show4: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show dnodes -if $data02 != 0 then - goto show4 -endi - -print ============= step5 -sql create account oroot pass 'taosdata' - -sql close -sql connect oroot -sleep 2000 - -sql show databases -if $rows != 0 then - return -1 -endi -sql show users -if $rows != 2 then - return -1 -endi - -sql close -sql connect -sleep 2000 -sql drop account oroot -sql show accounts -if $rows != 1 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/account_len.sim b/tests/script/unique/account/account_len.sim deleted file mode 100644 index f8379bdf954bdde122e68585b973f4957ef15739..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/account_len.sim +++ /dev/null @@ -1,92 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -$i = 0 -$dbPrefix = aldb -$tbPrefix = altb -$db = $dbPrefix . $i -$tb = $tbPrefix . $i - -print =============== step1 -sql drop account ac -x step0 - return -1 -step0: - -sql create account PASS 123 -x step1 - return -1 -step1: - -sql show accounts -if $rows != 1 then - return -1 -endi - -print =============== step2 -sql drop account a -x step2 -step2: -sql create account a PASS '123' -sql show accounts -if $rows != 2 then - return -1 -endi - -sql drop account a -sql show accounts -if $rows != 1 then - return -1 -endi - -print =============== step3 -sql drop account abc01234567890123456789 -x step3 -step3: -sql create account abc01234567890123456789 PASS '123' -sql show accounts -if $rows != 2 then - return -1 -endi - -sql drop account abc01234567890123456789 -sql show accounts -if $rows != 1 then - return -1 -endi - -print =============== step4 -sql create account abcd01234567890123456789012345689012345 PASS '123' -x step4 - return -1 -step4: -sql show accounts -if $rows != 1 then - return -1 -endi - -print =============== step5 -sql drop account 123 -x step5 -step5: -sql create account 123 pass '123' -x step51 - return -1 -step51: - -sql create account a123 PASS '123' -sql show accounts -if $rows != 2 then - return -1 -endi - -sql drop account a123 -sql show accounts -if $rows != 1 then - return -1 -endi - -sql show users -if $rows != 3 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/authority.sim b/tests/script/unique/account/authority.sim deleted file mode 100644 index 8f2408de1429a8ea34add79e335f6bf7f42ca2b0..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/authority.sim +++ /dev/null @@ -1,346 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============= step1 - -sql create user read pass 'taosdata' -sql create user write pass 'taosdata' -sql create user manage pass 'taosdata' - -sql create user a PASS 'ade' privilege -x step11 - return -1 -step11: - -sql create user a PASS 'ade' privilege a -x step12 - return -1 -step12: - -sql create user a PASS 'ade' privilege read -x step13 - return -1 -step13: - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 6 then - return -1 -endi - -sql alter user read privilege read -sql alter user write privilege write -sql_error alter user manage privilege super - -print ============= step2 -sql close -sql connect write -sleep 2000 - -sql create database d1 -sql create database d2 -sql create table d1.t1 (ts timestamp, i int) -sql create table d2.t2 (ts timestamp, i int) -sql insert into d1.t1 values(now, 1) -sql insert into d2.t2 values(now, 1) -sql insert into d2.t2 values(now+1s, 2) - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 6 then - return -1 -endi -sql show databases -if $rows != 2 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -if $rows != 2 then - return -1 -endi - -sql create account t1 pass 'taosdata' -x step21 - return -1 -step21: - -sql create user t1 pass 'taosdata' -x step22 - return -1 -step22: - -sql alter user read pass 'taosdata' -x step23 - return -1 -step23: - -sql create dnode $hostname2 -x step24 - return -1 -step24: - -sql drop dnode $hostname2 -x step25 - return -1 -step25: - -sql create mnode 192.168.0.2 -x step26 - return -1 -step26: - -sql drop mnode 192.168.0.2 -x step27 - return -1 -step27: - -sql drop account root -x step28 - return -1 -step28: - -sql alter user write pass 'taosdata' - -print ============= step3 -sql close -sql connect read -sleep 2000 - -sql create database d3 -x step31 - return -1 -step31: - -sql create table d1.t3 (ts timestamp, i int) -x step32 - return -1 -step32: - -#sql insert into d1.t1 values(now, 2) -x step33 -# return -1 -#step33: - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 6 then - return -1 -endi -sql show databases -if $rows != 2 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi - -sql select * from d2.t2 -if $rows != 2 then - return -1 -endi - -sql sql create account t1 pass 'taosdata' -x step34 - return -1 -step34: - -sql sql create user t1 pass 'taosdata' -x step35 - return -1 -step35: - -print ============= step4 -sql close -sql connect manage -sleep 2000 - -sql create database d3 -sql create database d4 -sql create table d3.t3 (ts timestamp, i int) -sql create table d4.t4 (ts timestamp, i int) - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 6 then - return -1 -endi -sql show databases -if $rows != 4 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -if $rows != 2 then - return -1 -endi - -sql create account other pass 'taosdata' -x step41 - return -1 -step41: - -sql close -sql connect -sleep 2000 -sql create account other pass 'taosdata' - -print ============= step5 -sql close -sql connect other -sleep 2000 -sql create user read pass 'taosdata' -x step51 - return -1 -step51: -sql create other write pass 'taosdata' -x step52 - return -1 -step52: - -sql create user oread pass 'taosdata' -sql create user owrite pass 'taosdata' -sql create user omanage pass 'taosdata' - -sql show users -print show users $rows -if $rows != 5 then - return -1 -endi - -sql alter user oread privilege read -sql alter user owrite privilege write -sql alter user oroot privilege super -x step53 - return -1 -step53: -sql alter user read privilege read -x step54 - return -1 -step54: - -print ============= step6 -sql close -sql connect owrite -sleep 2000 -sql reset query cache -sleep 1000 -sql create database d1 -sql create database d3 -sql create table d1.t1 (ts timestamp, i int) -sql create table d3.t3 (ts timestamp, i int) -sql insert into d1.t1 values(now, 11) -sql insert into d3.t3 values(now, 11) -sql insert into d3.t3 values(now+1s, 12) - -sql show databases -if $rows != 2 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -x step6 - return -1 -step6: -sql select * from d3.t3 -if $rows != 2 then - return -1 -endi - -sql sql create account t1 pass 'taosdata' -x step61 - return -1 -step61: - -sql sql create user t1 pass 'taosdata' -x step62 - return -1 -step62: - -print ============= step7 -sql close -sql connect oread -sleep 2000 - -sql create database d7 -x step71 - return -1 -step71: - -sql show databases -if $rows != 2 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -x step72 - return -1 -step72: -sql select * from d3.t3 -if $rows != 2 then - return -1 -endi - -sql sql create account t1 pass 'taosdata' -x step73 - return -1 -step73: - -sql sql create user t1 pass 'taosdata' -x step74 - return -1 -step74: - -print ============= step8 -sql close -sql connect omanage -sleep 2000 - -sql create account t1 pass 'taosdata' -x step81 - return -1 -step81: - -sql create database d4 -sql create table d4.t4 (ts timestamp, i int) - -sql show databases -if $rows != 3 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -x step82 - return -1 -step82: -sql select * from d3.t3 -if $rows != 2 then - return -1 -endi - -print ============= step9 -sql close -sql connect -sleep 2000 -sql show databases -if $rows != 4 then - return -1 -endi - -sql drop account other -sql drop user read -sql drop user manage -sql drop user write - -sql close -sql connect -sleep 2000 -sql drop database d1 -sql drop database d2 -sql drop database d3 -sql drop database d4 - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/basic.sim b/tests/script/unique/account/basic.sim deleted file mode 100644 index 00e706a4482d9fa57ed2f97a9995ce84d3667fa1..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/basic.sim +++ /dev/null @@ -1,46 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -sql connect - -print =============== show accounts -sql show accounts -if $rows != 1 then - return -1 -endi - -print $data00 $data01 $data02 - -print =============== create account1 -sql create account account1 PASS 'account1' -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data00 $data01 $data02 -print $data10 $data11 $data22 - -print =============== create account2 -sql create account account2 PASS 'account2' -sql show accounts -if $rows != 3 then - return -1 -endi - -print $data00 $data01 $data02 -print $data10 $data11 $data22 -print $data20 $data11 $data22 - -print =============== drop account1 -sql drop account account1 -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data00 $data01 $data02 -print $data10 $data11 $data22 - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/paras.sim b/tests/script/unique/account/paras.sim deleted file mode 100644 index 102f5b6a381e5100b35a4f0125b1318bcb8b1d76..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/paras.sim +++ /dev/null @@ -1,114 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -sql connect - -print =============== show accounts -sql show accounts -if $rows != 1 then - return -1 -endi - -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != root then - return -1 -endi -if $data02 != 3/128 then - return -1 -endi -if $data03 != 0/128 then - return -1 -endi -if $data04 != 0/2147483647 then - return -1 -endi -if $data05 != 0/1000 then - return -1 -endi -if $data06 != 0.000/unlimited then - return -1 -endi - -print =============== create account -sql create account hou pass "hou" tseries 80000 storage 10737418240 streams 10 qtime 3600 dbs 3 users 3 conns 10 -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data10 $data11 $data12 $data13 $data14 $data15 $data16 -if $data10 != hou then - return -1 -endi -if $data12 != 2/3 then - return -1 -endi -if $data13 != 0/3 then - return -1 -endi -if $data14 != 0/80000 then - return -1 -endi -if $data15 != 0/10 then - return -1 -endi -if $data16 != 0.000/10.000 then - return -1 -endi - -print =============== alter account -sql alter account hou pass "hou" tseries 8000 streams 10 dbs 5 users 5 -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data10 $data11 $data12 $data13 $data14 $data15 $data16 -if $data10 != hou then - return -1 -endi -if $data12 != 2/5 then - return -1 -endi -if $data13 != 0/5 then - return -1 -endi -if $data14 != 0/8000 then - return -1 -endi -if $data15 != 0/10 then - return -1 -endi -if $data16 != 0.000/10.000 then - return -1 -endi - -print =============== alter account -sql create account hou pass "hou" tseries 8000 streams 10 dbs 5 users 6 -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data10 $data11 $data12 $data13 $data14 $data15 $data16 -if $data10 != hou then - return -1 -endi -if $data12 != 2/6 then - return -1 -endi -if $data13 != 0/5 then - return -1 -endi -if $data14 != 0/8000 then - return -1 -endi -if $data15 != 0/10 then - return -1 -endi -if $data16 != 0.000/10.000 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/pass_alter.sim b/tests/script/unique/account/pass_alter.sim deleted file mode 100644 index 8b857b014a292d53536c5acf2a00daa15be11239..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/pass_alter.sim +++ /dev/null @@ -1,116 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============= step1 -sql create user read pass 'taosdata1' -sql create user write pass 'taosdata1' - -sql alter user read pass 'taosdata' -sql alter user write pass 'taosdata' - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 5 then - return -1 -endi - -print ============= step2 -sql close -sql connect read -sleep 2000 -sql alter user read pass 'taosdata' -sql alter user write pass 'taosdata1' -x step2 - return -1 -step2: - - -print ============= step3 -sql close -sql connect write -sleep 2000 -sql alter user write pass 'taosdata' -sql alter user read pass 'taosdata' -x step3 - return -1 -step3: - -print ============= step4 -sql close -sleep 1000 -sql connect -sleep 2000 -sql create account oroot pass 'taosdata' -sql show accounts -if $rows != 2 then - return -1 -endi -sql show users -if $rows != 5 then - return -1 -endi - -print ============= step5 -sql close -sql connect oroot -sleep 2000 - -sql create user oread pass 'taosdata1' -sql create user owrite pass 'taosdata1' -sql alter user oread pass 'taosdata' -sql alter user owrite pass 'taosdata' - -sql create user read pass 'taosdata1' -x step51 - return -1 -step51: -sql alter user read pass 'taosdata1' -x step52 - return -1 -step52: - -sql show accounts -x step53 - return -1 -step53: -sql show users -print show users $rows -if $rows != 4 then - return -1 -endi - -print ============= step6 -sql close -sql connect oread -sleep 2000 -sql alter user oread pass 'taosdata' -sql alter user owrite pass 'taosdata1' -x step6 - return -1 -step6: - - -print ============= step7 -sql close -sql connect owrite -sleep 2000 -sql alter user owrite pass 'taosdata' -sql alter user oread pass 'taosdata' -x step7 - return -1 -step7: - -print ============= step8 -sql close -sql connect -sleep 2000 -sql alter user oread pass 'taosdata' -sql alter user owrite pass 'taosdata' -sql alter user oroot pass 'taosdata' - -sql drop account oroot -sql drop user read -sql drop user write - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/pass_len.sim b/tests/script/unique/account/pass_len.sim deleted file mode 100644 index f4ceb76f7b8b41873217bd11ae2c3d385386b0e9..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/pass_len.sim +++ /dev/null @@ -1,81 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -$i = 0 -$dbPrefix = apdb -$tbPrefix = aptb -$db = $dbPrefix . $i -$tb = $tbPrefix . $i -$userPrefix = apusr - -print =============== step1 -$i = 0 -$user = $userPrefix . $i - -sql drop user $user -x step11 - return -1 -step11: - -sql create user $user PASS -x step12 - return -1 -step12: - -sql create user $user PASS 'taosdata' - -sql show users -if $rows != 4 then - return -1 -endi - -print =============== step2 -$i = 1 -$user = $userPrefix . $i -sql drop user $user -x step2 -step2: -sql create user $user PASS '1' -sql show users -if $rows != 5 then - return -1 -endi - -print =============== step3 -$i = 2 -$user = $userPrefix . $i -sql drop user $user -x step3 -step3: - -sql create user $user PASS 'abc0123456789' -sql show users -if $rows != 6 then - return -1 -endi - -print =============== step4 -$i = 3 -$user = $userPrefix . $i -sql create user $user PASS 'abcd012345678901234567891234567890' -x step4 - return -1 -step4: -sql show users -if $rows != 6 then - return -1 -endi - -$i = 0 -while $i < 3 - $user = $userPrefix . $i - sql drop user $user - $i = $i + 1 -endw - -sql show users -if $rows != 3 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/testSuite.sim b/tests/script/unique/account/testSuite.sim deleted file mode 100644 index 9d4141cfe0c086f9a8863fffb00a9cb0f410e265..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/testSuite.sim +++ /dev/null @@ -1,11 +0,0 @@ -run unique/account/account_create.sim -run unique/account/account_delete.sim -run unique/account/account_len.sim -run unique/account/authority.sim -run unique/account/basic.sim -run unique/account/paras.sim -run unique/account/pass_alter.sim -run unique/account/pass_len.sim -run unique/account/usage.sim -run unique/account/user_create.sim -run unique/account/user_len.sim diff --git a/tests/script/unique/account/usage.sim b/tests/script/unique/account/usage.sim deleted file mode 100644 index 3b9c20b159a6237f469fc1e48b5b3a3f4ca5f7b8..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/usage.sim +++ /dev/null @@ -1,154 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/exec.sh -n dnode1 -s start -#system sh/exec.sh -n monitor -s 1 -system sh/exec.sh -n monitorInterval -s 1 -sleep 2000 -sql connect - -print =============== show accounts - -print =============== create account -sql alter account root pass "taosdata" tseries 8000 streams 10 dbs 5 users 5 -sql show accounts -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != root then - return -1 -endi -if $data02 != 3/5 then - return -1 -endi -if $data03 != 0/5 then - return -1 -endi -if $data04 != 0/8000 then - return -1 -endi -if $data05 != 0/10 then - return -1 -endi -if $data06 != 0.000/unlimited then - return -1 -endi - -print =============== check usage account -sql create database d1 wal 2 -sql create database d2 wal 2 -sql create database d3 wal 2 -sql create database d4 wal 2 -sql create database d5 wal 2 - -sql create table d1.t1 (ts timestamp, i int); -sql create user u1 pass "u1" - -sql show accounts -print $data10 $data11 $data12 $data13 $data14 $data15 $data16 -if $data00 != root then - return -1 -endi -if $data02 != 4/5 then - return -1 -endi -if $data03 != 5/5 then - return -1 -endi -if $data04 != 1/8000 then - return -1 -endi -if $data05 != 0/10 then - return -1 -endi -if $data06 != 0.000/unlimited then - return -1 -endi - -print =============== step2 -sql alter account root pass "taosdata" tseries 10 storage 1073741824 streams 10 dbs 5 users 5 -sql show accounts -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != root then - return -1 -endi -if $data02 != 4/5 then - return -1 -endi -if $data03 != 5/5 then - return -1 -endi -if $data04 != 1/10 then - return -1 -endi -if $data05 != 0/10 then - return -1 -endi -if $data06 != 0.000/1.000 then - return -1 -endi - -print =============== step3 -sql alter account root pass "taosdata" tseries 10 storage 16 streams 10 dbs 5 users 5 -sql show accounts -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != root then - return -1 -endi -if $data02 != 4/5 then - return -1 -endi -if $data03 != 5/5 then - return -1 -endi -if $data04 != 1/10 then - return -1 -endi -if $data05 != 0/10 then - return -1 -endi -if $data06 != 0.000/0.000 then - return -1 -endi - -print =============== step4 -sql insert into d1.t1 values(now + 1s, 1) -sql insert into d1.t1 values(now + 2s, 2) - -sleep 10000 -print no write auth -sql_error insert into d1.t1 values(now + 3s, 2) -sql_error insert into d1.t1 values(now + 4s, 2) - -sql alter account root pass "taosdata" tseries 10 storage 36 streams 10 dbs 5 users 5 -sleep 10000 -print has write auth -sql insert into d1.t1 values(now + 5s, 1) -sql insert into d1.t1 values(now + 6s, 2) - -# no write auth -sleep 10000 -print no write auth -sql_error insert into d1.t1 values(now + 7s, 2) -sql_error insert into d1.t1 values(now + 8s, 2) - -print =============== step5 -sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all -sleep 10000 - -sql insert into d1.t1 values(now + 11s, 1) -sql insert into d1.t1 values(now + 12s, 2) - -sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state no -sleep 10000 -print no write auth -sql_error insert into d1.t1 values(now + 13s, 2) -sql_error insert into d1.t1 values(now + 14s, 2) - -sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all -sleep 10000 -print has write auth -sql insert into d1.t1 values(now + 15s, 1) -sql insert into d1.t1 values(now + 16s, 2) - -print =============== check grant -sql_error create database d6 - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/user_create.sim b/tests/script/unique/account/user_create.sim deleted file mode 100644 index e54a380f0dbef8107de452354ea01bc58262d548..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/user_create.sim +++ /dev/null @@ -1,84 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print =============== step1 -sql show users -if $rows != 3 then - return -1 -endi - -sql create user read PASS 'pass123' -sql create user read PASS 'pass123' -x step1 - return -1 -step1: - -sql show users -if $rows != 4 then - return -1 -endi - -sql alter user read PASS 'taosdata' - -print =============== step2 -sql close -sql connect read -sleep 2000 - -sql alter user read PASS 'taosdata' - -print =============== step3 -sql drop user read -x step31 - return -1 -step31: -sql drop user _root -x step32 - return -1 -step32: -sql drop user monitor -x step33 - return -1 -step33: - -print =============== step4 -sql close -sql connect -sleep 2000 - -sql alter user read privilege read -sql show users -print $data1_read -if $data1_read != readable then - return -1 -endi - -sql_error alter user read privilege super -sql show users -print $data1_read -if $data1_read != readable then - return -1 -endi - -sql alter user read privilege write -sql show users -if $data1_read != writable then - return -1 -endi - -sql alter user read privilege 1 -x step43 - return -1 -step43: - -sql drop user _root -x step41 - return -1 -step41: - -sql drop user monitor -x step42 - return -1 -step42: - -sql drop user read - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/user_len.sim b/tests/script/unique/account/user_len.sim deleted file mode 100644 index b8d448f0ffc9e43cbc0f0a5a849bda215e72e790..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/user_len.sim +++ /dev/null @@ -1,94 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -$i = 0 -$dbPrefix = lm_us_db -$tbPrefix = lm_us_tb -$db = $dbPrefix . $i -$tb = $tbPrefix . $i - -print =============== step1 -sql drop user ac -x step0 - return -1 -step0: - -sql create user PASS '123' -x step1 - return -1 -step1: - -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step2 -sql drop user a -x step2 -step2: -sleep 1000 -sql create user a PASS '123' -sql show users -if $rows != 4 then - return -1 -endi - -sql drop user a -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step3 -sql drop user abc01234567890123456789 -x step3 -step3: - -sql create user abc01234567890123456789 PASS '123' -sql show users -if $rows != 4 then - return -1 -endi - -sql drop user abc01234567890123456789 -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step4 -sql create user abcd0123456789012345678901234567890111 PASS '123' -x step4 - return -1 -step4: -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step5 -sql drop user 123 -x step5 -step5: -sql create user 123 PASS '123' -x step61 - return -1 -step61: - -sql create user a123 PASS '123' -sql show users -if $rows != 4 then - return -1 -endi - -sql drop user a123 -sql show users -if $rows != 3 then - return -1 -endi - -sql show accounts -if $rows != 1 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/http/admin.sim b/tests/script/unique/http/admin.sim deleted file mode 100644 index ae206744c4e93ab7cebd5f4db7d8d4b84ad5ebbb..0000000000000000000000000000000000000000 --- a/tests/script/unique/http/admin.sim +++ /dev/null @@ -1,192 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c http -v 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -#system sh/cfg.sh -n dnode1 -c adminRowLimit -v 10 -system sh/cfg.sh -n dnode1 -c httpDebugFlag -v 135 -system sh/exec.sh -n dnode1 -s start - -sql connect -sleep 2000 - -print ============================ dnode1 start - -print =============== step0 - prepare data -sql create database d1 -sql use d1 - -sql create table table_admin (ts timestamp, i int) - -sql insert into table_admin values('2017-12-25 21:28:41.022', 1) -sql insert into table_admin values('2017-12-25 21:28:42.022', 2) -sql insert into table_admin values('2017-12-25 21:28:43.022', 3) -sql insert into table_admin values('2017-12-25 21:28:44.022', 4) -sql insert into table_admin values('2017-12-25 21:28:45.022', 5) -sql insert into table_admin values('2017-12-25 21:28:46.022', 6) -sql insert into table_admin values('2017-12-25 21:28:47.022', 7) -sql insert into table_admin values('2017-12-25 21:28:48.022', 8) -sql insert into table_admin values('2017-12-25 21:28:49.022', 9) -sql insert into table_admin values('2017-12-25 21:28:50.022', 10) - -print =============== step1 - login - -system_content curl 127.0.0.1:7111/admin/ -print 1-> $system_content -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - print actual: $system_content - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/xx -print 2-> $system_content -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/login -print 3-> $system_content -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/login/root -print 4-> $system_content -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/login/root/123 -print 5-> $system_content -if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/login/root/123/1/1/3 -print 6-> $system_content -if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then - return -1 -endi - -system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.' -d 'show databases' 127.0.0.1:7111/admin/login/root/1 -print 7-> $system_content -if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' 127.0.0.1:7111/admin/login/root/1 -print 8-> $system_content -if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then - return -1 -endi - -sleep 2000 -system_content curl 127.0.0.1:7111/admin/login/root/taosdata -print 9 -----> $system_content - -if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then - return -1 -endi - -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/admin/login/root/1 -#print 10-> $system_content -#if $system_content != @{"status":"error","code":29,"desc":"failed to connect to server"}@ then -# return -1 -#endi - -print =============== step2 - logout - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/logout -print 10 -----> $system_content - -if $system_content != @{"status":"succ","code":0,"desc":"logout success"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/logout -print 11 -----> $system_content - -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - return -1 -endi - -print =============== step3 - info - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info -print curl 127.0.0.1:7111/admin/info -----> $system_content -if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then - return -1 -endi - -print =============== step4 - meta - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show mnodes' 127.0.0.1:7111/admin/meta -print curl 127.0.0.1:7111/admin/meta -----> $system_content -#if $system_content != @{"status":"succ","head":["column type","column name","column bytes"],"data":[["binary","IP",16],["timestamp","created time",8],["binary","status",10],["binary","role",10],["binary","public ip",16]],"rows":5}@ then -# return -1 -#endi - -print =============== step5 - query data - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql -print curl 127.0.0.1:7111/admin/all -----> $system_content -if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql -print curl 127.0.0.1:7111/admin/sql -----> $system_content -if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then - return -1 -endi - -print =============== step6 - insert data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_admin values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/admin/sql -print curl 127.0.0.1:7111/admin/sql -----> $system_content -if $system_content != @{"status":"succ","head":["affect_rows"],"data":[[1]],"rows":1}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/all -print curl 127.0.0.1:7111/admin/all -----> $system_content -if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then - print actual: $system_content - print expect =======> {"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11} - return -1 -endi - -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql -#print curl 127.0.0.1:7111/admin/sql -----> $system_content -#if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:51.022",11],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:42.022",2]],"rows":10}@ then -# return -1 -#endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info -print curl 127.0.0.1:7111/admin/info -----> $system_content -if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then - return -1 -endi - -print =============== step7 - use dbs - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1;' 127.0.0.1:7111/admin/all -print 23-> $system_content -if $system_content != @{"status":"error","code":4360,"desc":"no need to execute use db cmd"}@ then - return -1 -endi - -print =============== step8 - monitor dbs -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes;show mnodes;' 127.0.0.1:7111/admin/sqls -#print 24-> $system_content -#if $system_content != @[{"status":"succ","head":["IP","created time","open vnodes","free vnodes","status","balance state"],"data":[["127.0.0.1","2018-09-04 #11:16:13.985",1,3,"ready","balanced"]],"rows":1},{"status":"succ","head":["IP","created time","status","role"],"data":[["127.0.0.1","2018-09-04 11:16:13.371","serving","master"]],"rows":1}]@ then -# return -1 -# endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/http/opentsdb.sim b/tests/script/unique/http/opentsdb.sim deleted file mode 100644 index 7d1e6b03d4547a6b0b2a6a7857000a8a6518a002..0000000000000000000000000000000000000000 --- a/tests/script/unique/http/opentsdb.sim +++ /dev/null @@ -1,247 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c http -v 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============================ dnode1 start - -print =============== step1 - parse -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/ -print $system_content -if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db123456789012345678901234567890db -print $system_content -if $system_content != @{"status":"error","code":4497,"desc":"database name too long"}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/ -print $system_content -if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put2 -print $system_content -if $system_content != @{"status":"error","code":4354,"desc":"invalid url format"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4498,"desc":"invalid opentsdb json fromat"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '{}' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content - -if $system_content != @{"status":"error","code":4501,"desc":"metric name not find"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": 1,"timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4502,"desc":"metric name type should be string"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4503,"desc":"metric name length is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "ab1234567890123456789012345678ab1234567890123456789012345678","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"errors":[{"datapoint":{"metric":"ab1234567890123456789012345678ab1234567890123456789012345678","stable":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb","table":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb_lga_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"status":"error","code":1547,"desc":"Timestamp data out of range"}}],"failed":1,"success":0,"affected_rows":0}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4505,"desc":"timestamp not find"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": "2","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4506,"desc":"timestamp type should be integer"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": -1,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4507,"desc":"timestamp value smaller than 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4517,"desc":"value not find"}@ then - return -1 -endi - -####### - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4508,"desc":"tags not find"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": 0}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":866,"desc":"failed to create table"}}],"failed":1,"success":0,"affected_rows":0}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"": "web01"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4512,"desc":"tag name is null"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host01123456789001123456789001123456789001123456789001123456789001123456789": "01"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4513,"desc":"tag name length too long"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web011234567890011234567890011234567890011234567890011234567890011234567890011234567890011234567890"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4516,"desc":"tag value can not more than 64"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": ""}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4515,"desc":"tag value is null"}@ then - return -1 -endi - -sleep 2000 - -print =============== step2 - insert single data -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ -print $system_content -if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000]],"rows":1}@ then - return -1 -endi - -print =============== step3 - multi-query data -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402000,"value": 18,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put - -print $system_content - -if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846405000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}},{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web02","timestamp":1346846402000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web02"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":2,"affected_rows":2}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ - -print $system_content - -if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000],["2012-09-05 20:00:05.000",18.000000000]],"rows":2}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/ - -print $system_content - -if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[3]],"rows":1}@ then - return -1 -endi - -print =============== step4 - summary-put data -system_content curl -u root:taosdata -d '[{"metric": "sys_mem","timestamp": 1346846400000,"value": 8,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_mem","timestamp": 1346846405000,"value": 9,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put?details=false - -print $system_content - -if $system_content != @{"failed":0,"success":2}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select * from db.sys_mem_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ - -print $system_content - -if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",8.000000000],["2012-09-05 20:00:05.000",9.000000000]],"rows":2}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select count(*) from db.sys_mem_d_bbb' 127.0.0.1:7111/rest/sql/ - -print $system_content - -if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[2]],"rows":1}@ then - return -1 -endi - -print =============== step5 - prepare data - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846402000,"value": 19,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402,"value": 19,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846403000,"value": 20,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846403,"value": 20,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846404000,"value": 21,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846404,"value": 21,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 22,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846405,"value": 22,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846406000,"value": 23,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846406,"value": 23,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put - -system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/ -print $system_content -if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[7]],"rows":1}@ then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT diff --git a/tests/script/unique/http/testSuite.sim b/tests/script/unique/http/testSuite.sim deleted file mode 100644 index 3a9753e744b84bfea28e40e8b3554cb82d2ebb40..0000000000000000000000000000000000000000 --- a/tests/script/unique/http/testSuite.sim +++ /dev/null @@ -1,2 +0,0 @@ -run unique/http/admin.sim -run general/http/opentsdb.sim \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt20.sim b/tests/script/unique/mnode/mgmt20.sim deleted file mode 100644 index 8945cffab226ab5dc379057d55e562f5c3ed9cfa..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt20.sim +++ /dev/null @@ -1,88 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 - -system sh/cfg.sh -n dnode1 -c monitor -v 1 -system sh/cfg.sh -n dnode2 -c monitor -v 1 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -system sh/exec.sh -n dnode2 -s start -sql connect - -print ============== step2 -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT - -print ============== step3 -system sh/exec.sh -n dnode2 -s start -sleep 10000 - -system sh/exec.sh -n dnode1 -s start -sql connect - -print =============== step4 -sql select * from log.dn1 -$d1_first = $rows -sql select * from log.dn2 -$d2_first = $rows - -$x = 0 -show4: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show4 -endi -if $data2_2 != slave then - goto show4 -endi - -sleep 2000 -sql select * from log.dn1 -$d1_second = $rows -sql select * from log.dn2 -$d2_second = $rows - -print dnode1 $d1_first $d1_second -print dnode2 $d2_first $d2_second -if $d1_first >= $d1_second then - return -1 -endi - -if $d2_first >= $d2_second then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt21.sim b/tests/script/unique/mnode/mgmt21.sim deleted file mode 100644 index 8409383309dbde5500b9719cd64fd74ca5e384b2..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt21.sim +++ /dev/null @@ -1,44 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode2 -s start -sleep 10000 - -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 5 then - return -1 - endi - -sql show mnodes -x show2 -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt22.sim b/tests/script/unique/mnode/mgmt22.sim deleted file mode 100644 index 399805312ba905d55bceffe011cfe074c831684e..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt22.sim +++ /dev/null @@ -1,114 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -sql_error drop dnode $hostname1 -x error1 -print should not drop master - -print ============== step4 -system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 -sql_error show mnodes -print error of no master - -print ============== step5 -sql_error drop dnode $hostname1 -print error of no master - -print ============== step6 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -sql close -sql connect - -$x = 0 -show6: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -x show6 -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show6 -endi -if $data2_2 != slave then - goto show6 -endi - -print ============== step7 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 - -$x = 0 -show7: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -print dnode3 ==> $data2_3 -if $data2_1 != master then - goto show7 -endi -if $data2_2 != slave then - goto show7 -endi -if $data3_3 != null then - goto show7 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt23.sim b/tests/script/unique/mnode/mgmt23.sim deleted file mode 100644 index 19c7b4ba762d4bf5a73c10c1afa39e927c7a1c91..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt23.sim +++ /dev/null @@ -1,141 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 -sleep 8000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != slave then - return -1 -endi -if $dnode3Role != null then - return -1 -endi - -print ============== step4 -sql drop dnode $hostname2 - -$x = 0 -step4: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step4 -endi -if $dnode2Role != null then - goto step4 -endi -if $dnode3Role != slave then - goto step4 -endi - -system sh/exec.sh -n dnode2 -s stop - -print ============== step5 -sleep 2000 -sql create dnode $hostname2 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/exec.sh -n dnode2 -s start - -$x = 0 -step5: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step5 -endi -if $dnode2Role != null then - goto step5 -endi -if $dnode3Role != slave then - goto step5 -endi - -print ============== step6 -system sh/exec.sh -n dnode1 -s stop -sql_error show mnodes - -print ============== step7 -sql_error drop dnode $hostname1 - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt24.sim b/tests/script/unique/mnode/mgmt24.sim deleted file mode 100644 index a7bcc59ac0bfa6163d1e2fddfd3a817b102bfa3c..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt24.sim +++ /dev/null @@ -1,84 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -system sh/exec.sh -n dnode1 -s stop -sleep 2000 -sql_error show mnodes - -print ============== step4 -sql_error drop dnode $hostname1 - -print ============== step5 -system sh/exec.sh -n dnode1 -s start -sql_error create dnode $hostname1 - -sql close -sql connect - -$x = 0 -step5: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step5 - -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto step5 -endi -if $data2_2 != slave then - goto step5 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt25.sim b/tests/script/unique/mnode/mgmt25.sim deleted file mode 100644 index 9cca9c844806b138faf52186ffc3184d4876a1d6..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt25.sim +++ /dev/null @@ -1,95 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != slave then - return -1 -endi -if $dnode3Role != null then - return -1 -endi - -print ============== step4 -sql drop dnode $hostname2 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != null then - return -1 -endi -if $dnode3Role != slave then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt26.sim b/tests/script/unique/mnode/mgmt26.sim deleted file mode 100644 index 2816845052e835cf11e0ec7d4ddc71cbdee0ada1..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt26.sim +++ /dev/null @@ -1,123 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != slave then - return -1 -endi -if $dnode3Role != null then - return -1 -endi - - -print ============== step4 -sql drop dnode $hostname2 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != null then - return -1 -endi -if $dnode3Role != slave then - return -1 -endi - -print ============== step5 -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -sleep 3000 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != null then - return -1 -endi -if $dnode3Role != slave then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt30.sim b/tests/script/unique/mnode/mgmt30.sim deleted file mode 100644 index d0858c0d6cdffa1cb1cd7f2ba570ae0521f412d5..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt30.sim +++ /dev/null @@ -1,68 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 - -system sh/cfg.sh -n dnode1 -c balanceInterval -v 3000 -system sh/cfg.sh -n dnode2 -c balanceInterval -v 3000 -system sh/cfg.sh -n dnode3 -c balanceInterval -v 3000 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -print dnode3 ==> $data3_3 -if $data2_1 != master then - return -1 -endi -if $data3_2 != null then - return -1 -endi -if $data3_3 != null then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -sleep 3000 - -sql create dnode $hostname2 -sql create dnode $hostname3 - -$x = 0 -step2: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step2 -endi -if $dnode2Role != slave then - goto step2 -endi -if $dnode3Role != slave then - goto step2 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt33.sim b/tests/script/unique/mnode/mgmt33.sim deleted file mode 100644 index ce7cdce35d8c0463564f46d26a0711d39340c8bf..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt33.sim +++ /dev/null @@ -1,214 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -print dnode3 ==> $data3_3 -if $data2_1 != master then - return -1 -endi -if $data3_2 != null then - return -1 -endi -if $data3_3 != null then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -step2: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step2 -endi -if $dnode2Role != slave then - goto step2 -endi -if $dnode3Role != null then - goto step2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 - -$x = 0 -step3: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step3 -endi -if $dnode2Role != slave then - goto step3 -endi -if $dnode3Role != slave then - goto step3 -endi - -print ============== step4 -sql drop dnode $hostname2 - -$x = 0 -step4: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step4 -endi -if $dnode2Role != null then - goto step4 -endi -if $dnode3Role != slave then - goto step4 -endi - -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/exec.sh -n dnode2 -s start - -print ============== step5 -sql create dnode $hostname2 - -$x = 0 -step5: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_4 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step5 -endi -if $dnode2Role != slave then - goto step5 -endi -if $dnode3Role != slave then - goto step5 -endi - -print ============== step6 -system sh/exec.sh -n dnode1 -s stop -$x = 0 -step6: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step6 -$dnode1Role = $data2_1 -$dnode2Role = $data2_4 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != offline then - goto step6 -endi -#if $dnode2Role != master then -# return -1 -#endi -#if $dnode3Role != slave then -# return -1 -#endi - -print ============== step7 -sql drop dnode $hostname1 -$x = 0 -step7: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step7 -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != null then - goto step7 -endi -#if $dnode2Role != master then -# return -1 -#endi -#if $dnode3Role != slave then -# return -1 -#endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt34.sim b/tests/script/unique/mnode/mgmt34.sim deleted file mode 100644 index d8a46b0955f59273279bbbc5c89c07c05db672d7..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt34.sim +++ /dev/null @@ -1,269 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -print dnode3 ==> $data3_3 -if $data2_1 != master then - return -1 -endi -if $data3_2 != null then - return -1 -endi -if $data3_3 != null then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 -$x = 0 -step2: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step2 -endi -if $dnode2Role != slave then - goto step2 -endi -if $dnode3Role != null then - goto step2 -endi -if $dnode4Role != null then - goto step2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 - -$x = 0 -step3: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step3 -endi -if $dnode2Role != slave then - goto step3 -endi -if $dnode3Role != slave then - goto step3 -endi -if $dnode4Role != null then - goto step3 -endi - - -print ============== step4 -system sh/exec.sh -n dnode4 -s start -sql create dnode $hostname4 -$x = 0 -step4: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step4 -endi -if $dnode2Role != slave then - goto step4 -endi -if $dnode3Role != slave then - goto step4 -endi -if $dnode4Role != null then - goto step4 -endi - -print ============== step5 -sql drop dnode $hostname2 -$x = 0 -step5: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step5 -endi -if $dnode2Role != null then - goto step5 -endi -if $dnode3Role != slave then - goto step5 -endi -if $dnode4Role != slave then - goto step5 -endi - -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/exec.sh -n dnode2 -s start - -print ============== step6 -sql create dnode $hostname2 -$x = 0 -step6: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step6 -endi -if $dnode2Role != null then - goto step6 -endi -if $dnode3Role != slave then - goto step6 -endi -if $dnode4Role != slave then - goto step6 -endi - -print ============== step7 -system sh/exec.sh -n dnode1 -s stop -$x = 0 -step7: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step7 -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != offline then - goto step7 -endi - -print ============== step8 -sql drop dnode $hostname1 -step8: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step8 -$dnode1Role = $data2_1 -$dnode2Role = $data2_5 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != null then - goto step8 -endi -if $dnode2Role != slave then - goto step8 -endi -#if $dnode3Role != master then -# return -1 -#endi -#if $dnode4Role != slave then -# return -1 -#endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmtr2.sim b/tests/script/unique/mnode/mgmtr2.sim deleted file mode 100644 index 5afb41905846bff3ce9894e928245a7d34078354..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmtr2.sim +++ /dev/null @@ -1,87 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -sql connect - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != null then - return -1 -endi -if $dnode3Role != null then - return -1 -endi - -print ============== step2 -sql create dnode $hostname2 -sql create dnode $hostname3 - -print ============== step3 -print ========= start dnode2 and dnode3 - -system sh/exec.sh -n dnode2 -s start -sleep 1000 -system sh/exec.sh -n dnode3 -s start - -sleep 8000 -system sh/exec.sh -n dnode2 -s stop -system sh/exec.sh -n dnode3 -s stop -sleep 4000 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -sleep 4000 -system sh/exec.sh -n dnode2 -s stop -system sh/exec.sh -n dnode3 -s stop -sleep 4000 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start - -print ============== step4 -$x = 0 -step4: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes - -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step4 -endi -if $dnode2Role != slave then - goto step4 -endi -if $dnode3Role != null then - goto step4 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/testSuite.sim b/tests/script/unique/mnode/testSuite.sim deleted file mode 100644 index b9adbe06a282548d56d7e7feb8a36f99198d8c0d..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/testSuite.sim +++ /dev/null @@ -1,9 +0,0 @@ -run unique/mnode/mgmt21.sim -run unique/mnode/mgmt22.sim -run unique/mnode/mgmt23.sim -run unique/mnode/mgmt24.sim -run unique/mnode/mgmt25.sim -run unique/mnode/mgmt26.sim -run unique/mnode/mgmt33.sim -run unique/mnode/mgmt34.sim -run unique/mnode/mgmtr2.sim diff --git a/tests/script/unique/stream/metrics_balance.sim b/tests/script/unique/stream/metrics_balance.sim deleted file mode 100644 index ff48c2236709635c8d1a790104b0185144a96866..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_balance.sim +++ /dev/null @@ -1,312 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c statusInterval -v 1 -system sh/cfg.sh -n dnode2 -c statusInterval -v 1 -system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 3 -$rowNum = 200 - -print ========= start dnode1 -system sh/exec.sh -n dnode1 -s start -sql connect - -print ============== step1 -$db = $dbPrefix -sql create database $db -sql use $db - -$i = 0 -$st = $stPrefix . $i -$mt = $mtPrefix . $i -$tbNum = 3 -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$st = $stPrefix . $i -$mt = $mtPrefix . $i -$tbNum = 6 -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$st = $stPrefix . $i -$mt = $mtPrefix . $i -$tbNum = 9 -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$st = $stPrefix . $i -$mt = $mtPrefix . $i -$tbNum = 12 -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - - -print =============== step2 - -sql show tables -if $rows != 16 then - return -1 -endi - -print =============== step3 -print sleep 22 seconds -sleep 22000 - -$i = 0 -$mt = $mtPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r0 = $rows -print $st ==> $r0 $data00 $data01 $data10 $data11 - -$i = 3 -$mt = $mtPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r3 = $rows -print $st ==> $r3 $data00 $data01 $data10 $data11 - -$i = 6 -$mt = $mtPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r6 = $rows -print $st ==> $r6 $data00 $data01 $data10 $data11 - -$i = 9 -$mt = $mtPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r9 = $rows -print $st ==> $r9 $data00 $data01 $data10 $data11 - -print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9 - -$x = 0 -show1: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show1 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 0 then - goto show1 -endi -if $dnode2Vnodes != NULL then - goto show1 -endi - -print =============== step4 start dnode2 -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -sleep 8000 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show2 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 2 then - goto show2 -endi -if $dnode2Vnodes != 2 then - goto show2 -endi - -print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9 -print =============== step5 -print sleep 22 seconds -sleep 22000 - -print =============== step6 -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 3 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 6 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 9 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9 -print =============== step7 -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r0 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r0 then - return -1 -endi - -$i = 3 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r3 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r3 then - return -1 -endi - - -$i = 6 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r6 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r6 then - return -1 -endi - -$i = 9 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r0 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r9 then - return -1 -endi - -print =============== clear -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop - diff --git a/tests/script/unique/stream/metrics_replica1_dnode2.sim b/tests/script/unique/stream/metrics_replica1_dnode2.sim deleted file mode 100644 index 20c37cefc39f8fa6393d49934adb046f409fca25..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica1_dnode2.sim +++ /dev/null @@ -1,260 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode1 start - -$dbPrefix = m1d_db -$tbPrefix = m1d_tb -$mtPrefix = m1d_mt -$stPrefix = m1d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 32000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_replica2_dnode2.sim b/tests/script/unique/stream/metrics_replica2_dnode2.sim deleted file mode 100644 index aa8c1871017982cecc695abc8f64d732a8a7fc4e..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica2_dnode2.sim +++ /dev/null @@ -1,260 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi - - -print ======================== dnode1 start - -$dbPrefix = m2d_db -$tbPrefix = m2d_tb -$mtPrefix = m2d_mt -$stPrefix = m2d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 22000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim b/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim deleted file mode 100644 index be2fcefe66ed6ca2e24a44cd22fa072201137b89..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim +++ /dev/null @@ -1,261 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = m2dv_db -$tbPrefix = m2dv_tb -$mtPrefix = m2dv_mt -$stPrefix = m2dv_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 22000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_replica2_dnode3.sim b/tests/script/unique/stream/metrics_replica2_dnode3.sim deleted file mode 100644 index f7b17610c380d9f90a2cefd4af86ea766facdffa..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica2_dnode3.sim +++ /dev/null @@ -1,270 +0,0 @@ -system sh/stop_dnodes.sh - - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -sql create dnode $hostname3 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -if $data4_192.168.0.3 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = m2d3_db -$tbPrefix = m2d3_tb -$mtPrefix = m2d3_mt -$stPrefix = m2d3_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 22000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_replica3_dnode4.sim b/tests/script/unique/stream/metrics_replica3_dnode4.sim deleted file mode 100644 index 402712800313ff5b96f970d12ffe007f77bc26f7..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica3_dnode4.sim +++ /dev/null @@ -1,280 +0,0 @@ -system sh/stop_dnodes.sh - - - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode4 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -sql create dnode $hostname3 -sql create dnode $hostname4 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode4 -s start - -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -if $data4_192.168.0.3 == offline then - goto createDnode -endi -if $data4_192.168.0.4 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = m2d3_db -$tbPrefix = m2d3_tb -$mtPrefix = m2d3_mt -$stPrefix = m2d3_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 22000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_vnode_stop.sim b/tests/script/unique/stream/metrics_vnode_stop.sim deleted file mode 100644 index cd84cb3cdf5f8096f4986a222cc371db3900f765..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_vnode_stop.sim +++ /dev/null @@ -1,188 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode start - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 -system sh/exec.sh -n dnode2 -s stop - -print =============== step4 -print sleep 22 seconds -sleep 22000 - -print =============== step5 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -print ============= step6 - -sql close -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/exec.sh -n dnode2 -s start -sleep 2000 - -$x = 0 -connectTbase2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql connect -x connectTbase2 -sleep 2000 - -sql create dnode $hostname1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -print ======================== dnode start - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step7 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step8 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step9 -system sh/exec.sh -n dnode1 -s stop - -print =============== step10 -print sleep 22 seconds -sleep 22000 - -print =============== step11 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - - - diff --git a/tests/script/unique/stream/table_balance.sim b/tests/script/unique/stream/table_balance.sim deleted file mode 100644 index 45e054e2efdfbd7f3d01e3a860c5ac227f3327fc..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_balance.sim +++ /dev/null @@ -1,238 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c statusInterval -v 1 -system sh/cfg.sh -n dnode2 -c statusInterval -v 1 -system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 - -$dbPrefix = tb_db -$tbPrefix = tb_tb -$mtPrefix = tb_mt -$stPrefix = tb_st -$tbNum = 10 -$rowNum = 200 -$totalNum = 200 - -print ========= start dnode1 -system sh/exec.sh -n dnode1 -s start - -sql connect - -print ============== step1 -$i = 0 -$db = $dbPrefix -$mt = $mtPrefix -$st = $stPrefix . $i - -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 - -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -sql show tables -if $rows != 13 then - return -1 -endi - -print =============== step3 -print sleep 22 seconds -sleep 22000 - -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r1 = $rows -print $st ==> $r1 $data00 $data01 $data10 $data11 - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r5 = $rows -print $st ==> $r5 $data00 $data01 $data10 $data11 - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r8 = $rows -print $st ==> $r8 $data00 $data01 $data10 $data11 - -print rows1=>$r1 rows5=>$r5 rows8=>$r8 - -$x = 0 -show1: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show1 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 0 then - goto show1 -endi -if $dnode2Vnodes != NULL then - goto show1 -endi - -print =============== step4 start dnode2 -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -sleep 8000 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show2 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 2 then - goto show2 -endi -if $dnode2Vnodes != 2 then - goto show2 -endi - -print rows1=>$r1 rows5=>$r5 rows8=>$r8 -print =============== step5 -print sleep 22 seconds -sleep 22000 - -print =============== step6 -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -print rows1=>$r1 rows5=>$r5 rows8=>$r8 -print =============== step7 -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r1 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r1 then - return -1 -endi - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r5 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r5 then - return -1 -endi - - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r8 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r8 then - return -1 -endi - - -if $r1 != $r5 then - return -1 -endi - -if $r8 != $r5 then - return -1 -endi - -print =============== clear -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop - diff --git a/tests/script/unique/stream/table_move.sim b/tests/script/unique/stream/table_move.sim deleted file mode 100644 index 964a0c025363fd650e8051312a812fffbddaea7d..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_move.sim +++ /dev/null @@ -1,269 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 - -system sh/cfg.sh -n dnode1 -c statusInterval -v 1 -system sh/cfg.sh -n dnode2 -c statusInterval -v 1 -system sh/cfg.sh -n dnode3 -c statusInterval -v 1 -system sh/cfg.sh -n dnode4 -c statusInterval -v 1 - -system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 - -system sh/cfg.sh -n dnode1 -c wallevel -v 1 -system sh/cfg.sh -n dnode2 -c wallevel -v 1 -system sh/cfg.sh -n dnode3 -c wallevel -v 1 -system sh/cfg.sh -n dnode4 -c wallevel -v 1 - -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 0 - -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 - -system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3 - -system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 1000 -system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 1000 -system sh/cfg.sh -n dnode1 -c maxShellConns -v 1000 -system sh/cfg.sh -n dnode1 -c maxMgmtConnections -v 1000 - -system sh/cfg.sh -n dnode2 -c maxVnodeConnections -v 1000 -system sh/cfg.sh -n dnode2 -c maxMeterConnections -v 1000 -system sh/cfg.sh -n dnode2 -c maxShellConns -v 1000 -system sh/cfg.sh -n dnode2 -c maxMgmtConnections -v 1000 - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 5 -$rowNum = 20 -$totalNum = 200 - -print ============== step1 -print ========= start dnode1 -system sh/exec.sh -n dnode1 -s start - -sql connect -sleep 2000 - -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -20 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now $ms , $x , $x ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i - -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != $rowNum then - return -1 -endi -if $data02 != $rowNum then - return -1 -endi -if $data03 != $rowNum then - return -1 -endi - -sql show tables -if $rows != 5 then - return -1 -endi - -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -sql show tables -if $rows != 6 then - return -1 -endi - -print =============== step3 -print sleep 22 seconds -sleep 22000 - -sql select * from $tb -if $rows != 20 then - return -1 -endi - -sql select * from $mt -if $rows != 100 then - return -1 -endi - -sql select * from $st -print select * from $st => $data01 -if $rows == 0 then - return -1 -endi - -$x = 0 -show1: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show1 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 6 then - goto show1 -endi -if $dnode2Vnodes != NULL then - goto show1 -endi - -print =============== step4 start dnode2 -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -sleep 8000 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show2 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 7 then - goto show2 -endi -if $dnode2Vnodes != 7 then - goto show2 -endi - -print =============== step5 drop dnode1 -system sh/exec.sh -n dnode1 -s stop -print stop dnode1 and sleep 10000 -sleep 10000 - -sql drop dnode $hostname1 -print drop dnode1 and sleep 9000 -sleep 9000 - -$x = 0 -show6: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show6 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != NULL then - goto show6 -endi -if $dnode2Vnodes != 6 then - goto show6 -endi - -print =============== step6 - -print select * from $tb -sql select * from $tb -if $rows != 20 then - return -1 -endi - -print select * from $mt -sql select * from $mt -if $rows != 80 then - return -1 -endi - - -print =============== step7 -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i - -print =============== step8 -print sleep 22 seconds -sleep 22000 - -print select * from $st -sql select * from $st -if $rows == 0 then - return -1 -endi - - -print =============== clear -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop - diff --git a/tests/script/unique/stream/table_replica1_dnode2.sim b/tests/script/unique/stream/table_replica1_dnode2.sim deleted file mode 100644 index ccc6026e9c92975ccdd4fd12366a11f50a818d3f..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica1_dnode2.sim +++ /dev/null @@ -1,137 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode1 start - -$dbPrefix = t1d_db -$tbPrefix = t1d_tb -$mtPrefix = t1d_mt -$stPrefix = t1d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 - -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) - -sql show tables -if $rows != 13 then - return -1 -endi - -print =============== step3 -print sleep 22 seconds -sleep 22000 - - -print =============== step4 -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $rows $data00 $data01 $data10 $data11 -$rows1 = $rows -if $data01 != 20 then - return -1 -endi - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st => $rows $data00 $data01 $data10 $data11 -$rows5 = $rows -if $data01 != 20 then - return -1 -endi - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $rows $data00 $data01 $data10 $data11 -$rows8 = $rows -if $data01 != 20 then - return -1 -endi - -if $rows8 != $rows5 then - return -1 -endi - -if $rows8 != $rows1 then - return -1 -endi \ No newline at end of file diff --git a/tests/script/unique/stream/table_replica2_dnode2.sim b/tests/script/unique/stream/table_replica2_dnode2.sim deleted file mode 100644 index 947fa0d2f9093c802a9c99c74edddeffca102d38..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica2_dnode2.sim +++ /dev/null @@ -1,312 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode1 start - -$dbPrefix = t2d_db -$tbPrefix = t2d_tb -$mtPrefix = t2d_mt -$stPrefix = t2d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop database $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $tb interval(1d) -print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $tb interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $tb interval(1d) -print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $tb interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $tb interval(1d) -print select min(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $tb interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $tb interval(1d) -print select max(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $tb interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $tb interval(1d) -print select first(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $tb interval(1d) - -print =============== step10 la -sql select last(tbcol) from $tb interval(1d) -print select last(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $tb interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $tb interval(1d) -print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . std -sql create table $st as select stddev(tbcol) from $tb interval(1d) - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $tb interval(1d) -print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01 -#if $data01 != @(0.000017, -25362055.126740)@ then -# return -1 -#endi - -$st = $stPrefix . le -sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d) - -print =============== step13 pe - -sql select percentile(tbcol, 1) from $tb interval(1d) -print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . pe -sql create table $st as select percentile(tbcol, 1) from $tb interval(1d) - -print =============== step14 wh -sql select count(tbcol) from $tb where ts < now + 4m interval(1d) -print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d) - -print =============== step15 as -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $tb interval(1d) - -print =============== step16 -print sleep 22 seconds -sleep 22000 - -print =============== step17 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . std -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . le -sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != @(0.000017, -25270086.331047)@ then -# return -1 -#endi - -$st = $stPrefix . pe -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != $rowNum then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi diff --git a/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim b/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim deleted file mode 100644 index 75300362393eaa543740307d4d11f9a4eabbbc50..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim +++ /dev/null @@ -1,314 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode1 start - -$dbPrefix = t2dv_db -$tbPrefix = t2dv_tb -$mtPrefix = t2dv_mt -$stPrefix = t2dv_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $tb interval(1d) -print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $tb interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $tb interval(1d) -print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $tb interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $tb interval(1d) -print select min(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $tb interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $tb interval(1d) -print select max(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $tb interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $tb interval(1d) -print select first(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $tb interval(1d) - -print =============== step10 la -sql select last(tbcol) from $tb interval(1d) -print select last(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $tb interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $tb interval(1d) -print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . std -sql create table $st as select stddev(tbcol) from $tb interval(1d) - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $tb interval(1d) -print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01 -#if $data01 != @(0.000017, -25362055.126740)@ then -# return -1 -#endi - -$st = $stPrefix . le -sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d) - -print =============== step13 pe - -sql select percentile(tbcol, 1) from $tb interval(1d) -print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . pe -sql create table $st as select percentile(tbcol, 1) from $tb interval(1d) - -print =============== step14 wh -sql select count(tbcol) from $tb where ts < now + 4m interval(1d) -print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d) - -print =============== step15 as -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $tb interval(1d) - -print =============== step16 -print sleep 22 seconds -sleep 22000 - -print =============== step17 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . std -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . le -sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != @(0.000017, -25270086.331047)@ then -# return -1 -#endi - -$st = $stPrefix . pe -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != $rowNum then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi diff --git a/tests/script/unique/stream/table_replica2_dnode3.sim b/tests/script/unique/stream/table_replica2_dnode3.sim deleted file mode 100644 index 49eb3563b3964f05f31d72a8fd1ff12f2b5b3a03..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica2_dnode3.sim +++ /dev/null @@ -1,325 +0,0 @@ -system sh/stop_dnodes.sh - - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - - -sql connect - -sql create dnode $hostname2 -sql create dnode $hostname3 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -if $data4_192.168.0.3 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = t2d3_db -$tbPrefix = t2d3_tb -$mtPrefix = t2d3_mt -$stPrefix = t2d3_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $tb interval(1d) -print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $tb interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $tb interval(1d) -print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $tb interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $tb interval(1d) -print select min(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $tb interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $tb interval(1d) -print select max(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $tb interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $tb interval(1d) -print select first(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $tb interval(1d) - -print =============== step10 la -sql select last(tbcol) from $tb interval(1d) -print select last(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $tb interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $tb interval(1d) -print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . std -sql create table $st as select stddev(tbcol) from $tb interval(1d) - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $tb interval(1d) -print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01 -#if $data01 != @(0.000017, -25362055.126740)@ then -# return -1 -#endi - -$st = $stPrefix . le -sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d) - -print =============== step13 pe - -sql select percentile(tbcol, 1) from $tb interval(1d) -print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . pe -sql create table $st as select percentile(tbcol, 1) from $tb interval(1d) - -print =============== step14 wh -sql select count(tbcol) from $tb where ts < now + 4m interval(1d) -print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d) - -print =============== step15 as -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $tb interval(1d) - -print =============== step16 -print sleep 22 seconds -sleep 22000 - -print =============== step17 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . std -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . le -sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != @(0.000017, -25270086.331047)@ then -# return -1 -#endi - -$st = $stPrefix . pe -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != $rowNum then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi diff --git a/tests/script/unique/stream/table_replica3_dnode4.sim b/tests/script/unique/stream/table_replica3_dnode4.sim deleted file mode 100644 index 2cc443c72fc656b87ca8c1d330381ed5078cd755..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica3_dnode4.sim +++ /dev/null @@ -1,333 +0,0 @@ -system sh/stop_dnodes.sh - - - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode4 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -sql create dnode $hostname3 -sql create dnode $hostname4 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode4 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -if $data4_192.168.0.3 == offline then - goto createDnode -endi -if $data4_192.168.0.4 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = t3d_db -$tbPrefix = t3d_tb -$mtPrefix = t3d_mt -$stPrefix = t3d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 3 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $tb interval(1d) -print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $tb interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $tb interval(1d) -print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $tb interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $tb interval(1d) -print select min(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $tb interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $tb interval(1d) -print select max(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $tb interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $tb interval(1d) -print select first(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $tb interval(1d) - -print =============== step10 la -sql select last(tbcol) from $tb interval(1d) -print select last(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $tb interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $tb interval(1d) -print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . std -sql create table $st as select stddev(tbcol) from $tb interval(1d) - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $tb interval(1d) -print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01 -#if $data01 != @(0.000017, -25362055.126740)@ then -# return -1 -#endi - -$st = $stPrefix . le -sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d) - -print =============== step13 pe - -sql select percentile(tbcol, 1) from $tb interval(1d) -print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . pe -sql create table $st as select percentile(tbcol, 1) from $tb interval(1d) - -print =============== step14 wh -sql select count(tbcol) from $tb where ts < now + 4m interval(1d) -print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d) - -print =============== step15 as -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $tb interval(1d) - -print =============== step16 -print sleep 22 seconds -sleep 22000 - -print =============== step17 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . std -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . le -sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != @(0.000017, -25270086.331047)@ then -# return -1 -#endi - -$st = $stPrefix . pe -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != $rowNum then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi diff --git a/tests/script/unique/stream/table_vnode_stop.sim b/tests/script/unique/stream/table_vnode_stop.sim deleted file mode 100644 index 625de32a8d7a1e5336dd10f313565bdbc0daf0fc..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_vnode_stop.sim +++ /dev/null @@ -1,189 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi - -print ======================== dnode start - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 -system sh/exec.sh -n dnode2 -s stop - -print =============== step4 -print sleep 22 seconds -sleep 22000 - -print =============== step5 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -print ============= step6 - -sql close -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -sleep 2000 -system sh/exec.sh -n dnode2 -s start - -$x = 0 -connectTbase2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql connect -x connectTbase2 -sleep 2000 - -sql create dnode $hostname1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -print ======================== dnode start - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step7 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step8 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step9 -system sh/exec.sh -n dnode1 -s stop - -print =============== step10 -print sleep 22 seconds -sleep 22000 - -print =============== step11 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - - - diff --git a/tests/script/unique/stream/testSuite.sim b/tests/script/unique/stream/testSuite.sim deleted file mode 100644 index bbf5da3d376d9eccc02aa61b1122cadb5fc04813..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/testSuite.sim +++ /dev/null @@ -1,15 +0,0 @@ -#run unique/stream/table_replica1_dnode2.sim -#run unique/stream/metrics_replica1_dnode2.sim -#run unique/stream/table_replica2_dnode2.sim -#run unique/stream/metrics_replica2_dnode2.sim -#run unique/stream/table_replica2_dnode2_vnoden.sim -#run unique/stream/metrics_replica2_dnode2_vnoden.sim -#run unique/stream/table_replica2_dnode3.sim -#run unique/stream/metrics_replica2_dnode3.sim -#run unique/stream/table_replica3_dnode4.sim -#run unique/stream/metrics_replica3_dnode4.sim -#run unique/stream/table_vnode_stop.sim -#run unique/stream/metrics_vnode_stop.sim -##run unique/stream/table_balance.sim -##run unique/stream/metrics_balance.sim -##run unique/stream/table_move.sim \ No newline at end of file diff --git a/tests/system-test/0-others/cachelast.py b/tests/system-test/0-others/cachelast.py new file mode 100644 index 0000000000000000000000000000000000000000..7e912eda9a73627962f98891a56da2c7fd3ab7ef --- /dev/null +++ b/tests/system-test/0-others/cachelast.py @@ -0,0 +1,148 @@ +import taos +import sys ,os ,json +import datetime +import inspect +import subprocess + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files or "taosd.exe" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def illegal_params(self): + + illegal_params = ["1","0","NULL","None","False","True" ,"keep","now" ,"*" , "," ,"_" , "abc" ,"keep"] + + for value in illegal_params: + + tdSql.error("create database testdb replica 1 cachelast '%s' " %value) + + unexpected_numbers = [-1 , 0.0 , 3.0 , 4, 10 , 100] + + for number in unexpected_numbers: + tdSql.error("create database testdb replica 1 cachelast %s " %number) + + + def prepare_datas(self): + for i in range(4): + tdSql.execute("create database test_db_%d replica 1 cachelast %d " %(i,i)) + tdSql.execute("use test_db_%d"%i) + tdSql.execute("create stable st(ts timestamp , c1 int ,c2 float ) tags(ind int) ") + tdSql.execute("create table tb1 using st tags(1) ") + tdSql.execute("create table tb2 using st tags(2) ") + + for k in range(10): + tdSql.execute(" insert into tb1 values(now , %d, %f)" %(k,k*10) ) + tdSql.execute(" insert into tb2 values(now , %d, %f)" %(k,k*10) ) + + def check_cache_last_sets(self): + + + # check cache_last value for database + + tdSql.query(" show databases ") + databases_infos = tdSql.queryResult + cache_lasts = {} + for db_info in databases_infos: + dbname = db_info[0] + # print(dbname) + cache_last_value = db_info[16] + # print(cache_last_value) + if dbname in ["information_schema" , "performance_schema"]: + continue + cache_lasts[dbname]=cache_last_value + + + # cache_last_set value + for k , v in cache_lasts.items(): + + if k.split("_")[-1]==str(v): + tdLog.info(" database %s cache_last value check pass, value is %d "%(k,v) ) + else: + tdLog.exit(" database %s cache_last value check fail, value is %d "%(k,v) ) + + # # check storage layer implementation + + + # buildPath = self.getBuildPath() + # if (buildPath == ""): + # tdLog.exit("taosd not found!") + # else: + # tdLog.info("taosd found in %s" % buildPath) + # dataPath = buildPath + "/../sim/dnode1/data" + # abs_vnodePath = os.path.abspath(dataPath)+"/vnode/" + # tdLog.info("abs_vnodePath: %s" % abs_vnodePath) + + # tdSql.query(" show dnodes ") + # dnode_id = tdSql.queryResult[0][0] + + # for dbname in cache_lasts.keys(): + # print(dbname) + # tdSql.execute(" use %s" % dbname) + # tdSql.query(" show vgroups ") + # vgroups_infos = tdSql.queryResult + # for vgroup_info in vgroups_infos: + # vnode_json = abs_vnodePath + "/vnode" +f"{vgroup_info[0]}/" + "vnode.json" + # vnode_info_of_db = f"cat {vnode_json}" + # vnode_info = subprocess.check_output(vnode_info_of_db, shell=True).decode("utf-8") + # infoDict = json.loads(vnode_info) + # vnode_json_of_dbname = f"{dnode_id}."+ dbname + # config = infoDict["config"] + # if infoDict["config"]["dbname"] == vnode_json_of_dbname: + # if "cachelast" in infoDict["config"]: + # if int(infoDict["config"]["cachelast"]) != cache_lasts[dbname]: + # tdLog.exit("cachelast value is error in vnode.json of vnode%d "%(vgroup_info[0])) + # else: + # tdLog.exit("cachelast not found in vnode.json of vnode%d "%(vgroup_info[0])) + + def restart_check_cache_last_sets(self): + + for i in range(3): + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + time.sleep(3) + self.check_cache_last_sets() + + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + + + self.illegal_params() + self.prepare_datas() + self.check_cache_last_sets() + self.restart_check_cache_last_sets() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/0-others/taosShell.py b/tests/system-test/0-others/taosShell.py index f6dfe3f75c795ab8bd8eefc7b9d043d75854dc2e..046db93c4927d0aa39fbd2da4ac60cf12a6537c6 100644 --- a/tests/system-test/0-others/taosShell.py +++ b/tests/system-test/0-others/taosShell.py @@ -3,8 +3,12 @@ import taos import sys import time import socket -import pexpect import os +import platform +if platform.system().lower() == 'windows': + import wexpect as taosExpect +else: + import pexpect as taosExpect from util.log import * from util.sql import * @@ -15,7 +19,11 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key if len(key) == 0: tdLog.exit("taos test key is null!") - taosCmd = buildPath + '/build/bin/taos ' + if platform.system().lower() == 'windows': + taosCmd = buildPath + '\\build\\bin\\taos.exe ' + taosCmd = taosCmd.replace('\\','\\\\') + else: + taosCmd = buildPath + '/build/bin/taos ' if len(cfgDir) != 0: taosCmd = taosCmd + '-c ' + cfgDir @@ -36,25 +44,30 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key tdLog.info ("taos cmd: %s" % taosCmd) - child = pexpect.spawn(taosCmd, timeout=3) + child = taosExpect.spawn(taosCmd, timeout=3) #output = child.readline() #print (output.decode()) if len(expectString) != 0: - i = child.expect([expectString, pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) else: - i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) - retResult = child.before.decode() + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() print(retResult) #print(child.after.decode()) if i == 0: print ('taos login success! Here can run sql, taos> ') if len(sqlString) != 0: child.sendline (sqlString) - w = child.expect(["Query OK", pexpect.TIMEOUT, pexpect.EOF], timeout=1) + w = child.expect(["Query OK", taosExpect.TIMEOUT, taosExpect.EOF], timeout=1) if w == 0: return "TAOS_OK" else: + print(1) + print(retResult) return "TAOS_FAIL" else: if key == 'A' or key1 == 'A' or key == 'C' or key1 == 'C' or key == 'V' or key1 == 'V': @@ -71,6 +84,12 @@ class TDTestCase: #updatecfgDict = {'clientCfg': {'serverPort': 7080, 'firstEp': 'trd02:7080', 'secondEp':'trd02:7080'},\ # 'serverPort': 7080, 'firstEp': 'trd02:7080'} hostname = socket.gethostname() + if (platform.system().lower() == 'windows' and not tdDnodes.dnodes[0].remoteIP == ""): + try: + config = eval(tdDnodes.dnodes[0].remoteIP) + hostname = config["host"] + except Exception: + hostname = tdDnodes.dnodes[0].remoteIP serverPort = '7080' rpcDebugFlagVal = '143' clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} @@ -102,7 +121,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] @@ -275,11 +294,15 @@ class TDTestCase: pwd=os.getcwd() newDbName="dbf" sqlFile = pwd + "/0-others/sql.txt" - sql1 = "echo 'create database " + newDbName + "' > " + sqlFile - sql2 = "echo 'use " + newDbName + "' >> " + sqlFile - sql3 = "echo 'create table ntbf (ts timestamp, c binary(40))' >> " + sqlFile - sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile - sql5 = "echo 'show databases' >> " + sqlFile + sql1 = "echo create database " + newDbName + " > " + sqlFile + sql2 = "echo use " + newDbName + " >> " + sqlFile + if platform.system().lower() == 'windows': + sql3 = "echo create table ntbf (ts timestamp, c binary(40)) >> " + sqlFile + sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile + else: + sql3 = "echo 'create table ntbf (ts timestamp, c binary(40))' >> " + sqlFile + sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile + sql5 = "echo show databases >> " + sqlFile os.system(sql1) os.system(sql2) os.system(sql3) diff --git a/tests/system-test/0-others/taosShellError.py b/tests/system-test/0-others/taosShellError.py index 5f2f79982a58fe33e361f7c05926fc7c276f84d7..2369e4d580e491f52e8508c21934085f6ecf89a6 100644 --- a/tests/system-test/0-others/taosShellError.py +++ b/tests/system-test/0-others/taosShellError.py @@ -3,7 +3,11 @@ import taos import sys import time import socket -import pexpect +import platform +if platform.system().lower() == 'windows': + import wexpect as taosExpect +else: + import pexpect as taosExpect import os from util.log import * @@ -15,7 +19,11 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key if len(key) == 0: tdLog.exit("taos test key is null!") - taosCmd = buildPath + '/build/bin/taos ' + if platform.system().lower() == 'windows': + taosCmd = buildPath + '\\build\\bin\\taos.exe ' + taosCmd = taosCmd.replace('\\','\\\\') + else: + taosCmd = buildPath + '/build/bin/taos ' if len(cfgDir) != 0: taosCmd = taosCmd + '-c ' + cfgDir @@ -36,23 +44,29 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key tdLog.info ("taos cmd: %s" % taosCmd) - child = pexpect.spawn(taosCmd, timeout=3) + child = taosExpect.spawn(taosCmd, timeout=3) #output = child.readline() #print (output.decode()) if len(expectString) != 0: - i = child.expect([expectString, pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) else: - i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) - retResult = child.before.decode() + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() print("cmd return result:\n%s\n"%retResult) #print(child.after.decode()) if i == 0: print ('taos login success! Here can run sql, taos> ') if len(sqlString) != 0: child.sendline (sqlString) - w = child.expect(["Query OK", pexpect.TIMEOUT, pexpect.EOF], timeout=1) - retResult = child.before.decode() + w = child.expect(["Query OK", taosExpect.TIMEOUT, taosExpect.EOF], timeout=1) + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() if w == 0: return "TAOS_OK", retResult else: @@ -72,6 +86,12 @@ class TDTestCase: #updatecfgDict = {'clientCfg': {'serverPort': 7080, 'firstEp': 'trd02:7080', 'secondEp':'trd02:7080'},\ # 'serverPort': 7080, 'firstEp': 'trd02:7080'} hostname = socket.gethostname() + if (platform.system().lower() == 'windows' and not tdDnodes.dnodes[0].remoteIP == ""): + try: + config = eval(tdDnodes.dnodes[0].remoteIP) + hostname = config["host"] + except Exception: + hostname = tdDnodes.dnodes[0].remoteIP serverPort = '7080' rpcDebugFlagVal = '143' clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} @@ -103,7 +123,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] @@ -216,11 +236,15 @@ class TDTestCase: pwd=os.getcwd() newDbName="dbf" sqlFile = pwd + "/0-others/sql.txt" - sql1 = "echo 'create database " + newDbName + "' > " + sqlFile - sql2 = "echo 'use " + newDbName + "' >> " + sqlFile - sql3 = "echo 'create table ntbf (ts timestamp, c binary(40)) no this item' >> " + sqlFile - sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile - sql5 = "echo 'show databases' >> " + sqlFile + sql1 = "echo create database " + newDbName + " > " + sqlFile + sql2 = "echo use " + newDbName + " >> " + sqlFile + if platform.system().lower() == 'windows': + sql3 = "echo create table ntbf (ts timestamp, c binary(40)) no this item >> " + sqlFile + sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile + else: + sql3 = "echo 'create table ntbf (ts timestamp, c binary(40)) no this item' >> " + sqlFile + sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile + sql5 = "echo show databases >> " + sqlFile os.system(sql1) os.system(sql2) os.system(sql3) diff --git a/tests/system-test/0-others/taosShellNetChk.py b/tests/system-test/0-others/taosShellNetChk.py index bbaeacf328fd5422ccd018a79ce6d9c632a370a9..3c99ddb8d697da58b7af8abd1eac1fc703bb06cf 100644 --- a/tests/system-test/0-others/taosShellNetChk.py +++ b/tests/system-test/0-others/taosShellNetChk.py @@ -3,7 +3,11 @@ import taos import sys import time import socket -import pexpect +import platform +if platform.system().lower() == 'windows': + import wexpect as taosExpect +else: + import pexpect as taosExpect import os from util.log import * @@ -15,7 +19,11 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key if len(key) == 0: tdLog.exit("taos test key is null!") - taosCmd = buildPath + '/build/bin/taos ' + if platform.system().lower() == 'windows': + taosCmd = buildPath + '\\build\\bin\\taos.exe ' + taosCmd = taosCmd.replace('\\','\\\\') + else: + taosCmd = buildPath + '/build/bin/taos ' if len(cfgDir) != 0: taosCmd = taosCmd + '-c ' + cfgDir @@ -36,23 +44,29 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key tdLog.info ("taos cmd: %s" % taosCmd) - child = pexpect.spawn(taosCmd, timeout=3) + child = taosExpect.spawn(taosCmd, timeout=3) #output = child.readline() #print (output.decode()) if len(expectString) != 0: - i = child.expect([expectString, pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) else: - i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) - retResult = child.before.decode() + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() print("expect() return code: %d, content:\n %s\n"%(i, retResult)) #print(child.after.decode()) if i == 0: print ('taos login success! Here can run sql, taos> ') if len(sqlString) != 0: child.sendline (sqlString) - w = child.expect(["Query OK", pexpect.TIMEOUT, pexpect.EOF], timeout=1) - retResult = child.before.decode() + w = child.expect(["Query OK", taosExpect.TIMEOUT, taosExpect.EOF], timeout=1) + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() if w == 0: return "TAOS_OK", retResult else: @@ -72,6 +86,12 @@ class TDTestCase: #updatecfgDict = {'clientCfg': {'serverPort': 7080, 'firstEp': 'trd02:7080', 'secondEp':'trd02:7080'},\ # 'serverPort': 7080, 'firstEp': 'trd02:7080'} hostname = socket.gethostname() + if (platform.system().lower() == 'windows' and not tdDnodes.dnodes[0].remoteIP == ""): + try: + config = eval(tdDnodes.dnodes[0].remoteIP ) + hostname = config["host"] + except Exception: + hostname = tdDnodes.dnodes[0].remoteIP serverPort = '7080' rpcDebugFlagVal = '143' clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} @@ -103,7 +123,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] @@ -168,21 +188,33 @@ class TDTestCase: tdDnodes.stop(1) role = 'server' - taosCmd = 'nohup ' + buildPath + '/build/bin/taos -c ' + keyDict['c'] - taosCmd = taosCmd + ' -n ' + role + ' > /dev/null 2>&1 &' + if platform.system().lower() == 'windows': + taosCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\taos.exe -c ' + keyDict['c'] + taosCmd = taosCmd.replace('\\','\\\\') + taosCmd = taosCmd + ' -n ' + role + else: + taosCmd = 'nohup ' + buildPath + '/build/bin/taos -c ' + keyDict['c'] + taosCmd = taosCmd + ' -n ' + role + ' > /dev/null 2>&1 &' print (taosCmd) os.system(taosCmd) pktLen = '2000' pktNum = '10' role = 'client' - taosCmd = buildPath + '/build/bin/taos -c ' + keyDict['c'] + if platform.system().lower() == 'windows': + taosCmd = buildPath + '\\build\\bin\\taos.exe -h 127.0.0.1 -c ' + keyDict['c'] + taosCmd = taosCmd.replace('\\','\\\\') + else: + taosCmd = buildPath + '/build/bin/taos -c ' + keyDict['c'] taosCmd = taosCmd + ' -n ' + role + ' -l ' + pktLen + ' -N ' + pktNum print (taosCmd) - child = pexpect.spawn(taosCmd, timeout=3) - i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6) + child = taosExpect.spawn(taosCmd, timeout=3) + i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) - retResult = child.before.decode() + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() print("expect() return code: %d, content:\n %s\n"%(i, retResult)) #print(child.after.decode()) if i == 0: @@ -195,7 +227,10 @@ class TDTestCase: else: tdLog.exit('taos -n client fail!') - os.system('pkill taos') + if platform.system().lower() == 'windows': + os.system('ps -a | grep taos | awk \'{print $2}\' | xargs kill -9') + else: + os.system('pkill taos') def stop(self): tdSql.close() diff --git a/tests/system-test/0-others/taosdMonitor.py b/tests/system-test/0-others/taosdMonitor.py index a3d3b052047faa12618a0b68846518269c9de3f5..a219c54e5925075a2c687c9cd134ada03c09e57e 100644 --- a/tests/system-test/0-others/taosdMonitor.py +++ b/tests/system-test/0-others/taosdMonitor.py @@ -2,7 +2,7 @@ import taos import sys import time import socket -import pexpect +# import pexpect import os import http.server import gzip @@ -75,7 +75,7 @@ def telemetryInfoCheck(infoDict=''): if k not in infoDict["cluster_info"]["dnodes"][0] or v != infoDict["cluster_info"]["dnodes"][0][k] : tdLog.exit("dnodes info is null!") - mnodes_info = { "mnode_id":1, "mnode_ep":f"{hostname}:{serverPort}","role": "LEADER" } + mnodes_info = { "mnode_id":1, "mnode_ep":f"{hostname}:{serverPort}","role": "leader" } for k ,v in mnodes_info.items(): if k not in infoDict["cluster_info"]["mnodes"][0] or v != infoDict["cluster_info"]["mnodes"][0][k] : diff --git a/tests/system-test/0-others/telemetry.py b/tests/system-test/0-others/telemetry.py index 3ab39f9e7bb14b40f7caaa2b6f3bff43869c1e21..203f87c085fe91a9a75cc4176065a893fc29cf1e 100644 --- a/tests/system-test/0-others/telemetry.py +++ b/tests/system-test/0-others/telemetry.py @@ -2,7 +2,7 @@ import taos import sys import time import socket -import pexpect +# import pexpect import os import http.server import gzip diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py new file mode 100644 index 0000000000000000000000000000000000000000..0ddeba46652d7d724f15cea0476c7baa8c60cc30 --- /dev/null +++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py @@ -0,0 +1,1333 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import traceback +import random +from taos.error import SchemalessError +import time +from copy import deepcopy +import numpy as np +from util.log import * +from util.cases import * +from util.sql import * +import threading +from util.types import TDSmlProtocolType, TDSmlTimestampType +from util.common import tdCom + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + + def createDb(self, name="test", db_update_tag=0): + if db_update_tag == 0: + tdSql.execute(f"drop database if exists {name}") + tdSql.execute(f"create database if not exists {name} precision 'ms' schemaless 1") + else: + tdSql.execute(f"drop database if exists {name}") + tdSql.execute(f"create database if not exists {name} precision 'ms' update 1 schemaless 1") + tdSql.execute(f'use {name}') + + def timeTrans(self, time_value, ts_type): + if int(time_value) == 0: + ts = time.time() + else: + if ts_type == TDSmlTimestampType.NANO_SECOND.value or ts_type is None: + ts = int(''.join(list(filter(str.isdigit, time_value)))) / 1000000000 + elif ts_type == TDSmlTimestampType.MICRO_SECOND.value: + ts = int(''.join(list(filter(str.isdigit, time_value)))) / 1000000 + elif ts_type == TDSmlTimestampType.MILLI_SECOND.value: + ts = int(''.join(list(filter(str.isdigit, time_value)))) / 1000 + elif ts_type == TDSmlTimestampType.SECOND.value: + ts = int(''.join(list(filter(str.isdigit, time_value)))) / 1 + ulsec = repr(ts).split('.')[1][:6] + if len(ulsec) < 6 and int(ulsec) != 0: + ulsec = int(ulsec) * (10 ** (6 - len(ulsec))) + elif int(ulsec) == 0: + ulsec *= 6 + # * follow two rows added for tsCheckCase + td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + return td_ts + #td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts)) + return td_ts + #return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f")) + + def dateToTs(self, datetime_input): + return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) + + def getTdTypeValue(self, value, vtype="col"): + """ + vtype must be col or tag + """ + if vtype == "col": + if value.lower().endswith("i8"): + td_type = "TINYINT" + td_tag_value = ''.join(list(value)[:-2]) + elif value.lower().endswith("i16"): + td_type = "SMALLINT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("i32"): + td_type = "INT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("i64"): + td_type = "BIGINT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().lower().endswith("u64"): + td_type = "BIGINT UNSIGNED" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("f32"): + td_type = "FLOAT" + td_tag_value = ''.join(list(value)[:-3]) + td_tag_value = '{}'.format(np.float32(td_tag_value)) + elif value.lower().endswith("f64"): + td_type = "DOUBLE" + td_tag_value = ''.join(list(value)[:-3]) + if "e" in value.lower(): + td_tag_value = str(float(td_tag_value)) + elif value.lower().startswith('l"'): + td_type = "NCHAR" + td_tag_value = ''.join(list(value)[2:-1]) + elif value.startswith('"') and value.endswith('"'): + td_type = "VARCHAR" + td_tag_value = ''.join(list(value)[1:-1]) + elif value.lower() == "t" or value.lower() == "true": + td_type = "BOOL" + td_tag_value = "True" + elif value.lower() == "f" or value.lower() == "false": + td_type = "BOOL" + td_tag_value = "False" + elif value.isdigit(): + td_type = "DOUBLE" + td_tag_value = str(float(value)) + else: + td_type = "DOUBLE" + if "e" in value.lower(): + td_tag_value = str(float(value)) + else: + td_tag_value = value + elif vtype == "tag": + td_type = "NCHAR" + td_tag_value = str(value) + return td_type, td_tag_value + + def typeTrans(self, type_list): + type_num_list = [] + for tp in type_list: + if tp.upper() == "TIMESTAMP": + type_num_list.append(9) + elif tp.upper() == "BOOL": + type_num_list.append(1) + elif tp.upper() == "TINYINT": + type_num_list.append(2) + elif tp.upper() == "SMALLINT": + type_num_list.append(3) + elif tp.upper() == "INT": + type_num_list.append(4) + elif tp.upper() == "BIGINT": + type_num_list.append(5) + elif tp.upper() == "FLOAT": + type_num_list.append(6) + elif tp.upper() == "DOUBLE": + type_num_list.append(7) + elif tp.upper() == "VARCHAR": + type_num_list.append(8) + elif tp.upper() == "NCHAR": + type_num_list.append(10) + elif tp.upper() == "BIGINT UNSIGNED": + type_num_list.append(14) + return type_num_list + + def inputHandle(self, input_sql, ts_type): + input_sql_split_list = input_sql.split(" ") + + stb_tag_list = input_sql_split_list[0].split(',') + stb_col_list = input_sql_split_list[1].split(',') + time_value = self.timeTrans(input_sql_split_list[2], ts_type) + + stb_name = stb_tag_list[0] + stb_tag_list.pop(0) + + tag_name_list = [] + tag_value_list = [] + td_tag_value_list = [] + td_tag_type_list = [] + + col_name_list = [] + col_value_list = [] + td_col_value_list = [] + td_col_type_list = [] + for elm in stb_tag_list: + if "id=" in elm.lower(): + tb_name = elm.split('=')[1] + tag_name_list.append(elm.split("=")[0]) + td_tag_value_list.append(tb_name) + td_tag_type_list.append("NCHAR") + else: + tag_name_list.append(elm.split("=")[0]) + tag_value_list.append(elm.split("=")[1]) + tb_name = "" + td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + + for elm in stb_col_list: + col_name_list.append(elm.split("=")[0]) + col_value_list.append(elm.split("=")[1]) + td_col_value_list.append(self.getTdTypeValue(elm.split("=")[1])[1]) + td_col_type_list.append(self.getTdTypeValue(elm.split("=")[1])[0]) + + final_field_list = [] + final_field_list.extend(col_name_list) + final_field_list.extend(tag_name_list) + + final_type_list = [] + final_type_list.append("TIMESTAMP") + final_type_list.extend(td_col_type_list) + final_type_list.extend(td_tag_type_list) + final_type_list = self.typeTrans(final_type_list) + + final_value_list = [] + final_value_list.append(time_value) + final_value_list.extend(td_col_value_list) + final_value_list.extend(td_tag_value_list) + return final_value_list, final_field_list, final_type_list, stb_name, tb_name + + def gen_influxdb_line(self, stb_name, tb_name, id, t0, t1, t2, t3, t4, t5, t6, t7, t8, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, + ts, id_noexist_tag, id_change_tag, id_double_tag, ct_add_tag, ct_am_tag, ct_ma_tag, ct_min_tag, c_multi_tag, t_multi_tag, c_blank_tag, t_blank_tag, chinese_tag): + input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}' + if id_noexist_tag is not None: + input_sql = f'{stb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}' + if ct_add_tag is not None: + input_sql = f'{stb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t9={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}' + if id_change_tag is not None: + input_sql = f'{stb_name},t0={t0},t1={t1},{id}={tb_name},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}' + if id_double_tag is not None: + input_sql = f'{stb_name},{id}=\"{tb_name}_1\",t0={t0},t1={t1},{id}=\"{tb_name}_2\",t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}' + if ct_add_tag is not None: + input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t11={t1},t10={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9},c11={c8},c10={t0} {ts}' + if ct_am_tag is not None: + input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9},c11={c8},c10={t0} {ts}' + if id_noexist_tag is not None: + input_sql = f'{stb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9},c11={c8},c10={t0} {ts}' + if ct_ma_tag is not None: + input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t11={t1},t10={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6} {ts}' + if id_noexist_tag is not None: + input_sql = f'{stb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t11={t1},t10={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6} {ts}' + if ct_min_tag is not None: + input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6} {ts}' + if c_multi_tag is not None: + input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} c10={c9} {ts}' + if t_multi_tag is not None: + input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} t9={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}' + if c_blank_tag is not None: + input_sql = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} {ts}' + if t_blank_tag is not None: + input_sql = f'{stb_name} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}' + if chinese_tag is not None: + input_sql = f'{stb_name},to=L"涛思数据" c0=L"涛思数据" {ts}' + return input_sql + + def genFullTypeSql(self, stb_name="", tb_name="", value="", t0="", t1="127i8", t2="32767i16", t3="2147483647i32", + t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"", + t8="L\"ncharTagValue\"", c0="", c1="127i8", c2="32767i16", c3="2147483647i32", + c4="9223372036854775807i64", c5="11.12345f32", c6="22.123456789f64", c7="\"binaryColValue\"", + c8="L\"ncharColValue\"", c9="7u64", ts=None, + id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None, + ct_add_tag=None, ct_am_tag=None, ct_ma_tag=None, ct_min_tag=None, c_multi_tag=None, t_multi_tag=None, + c_blank_tag=None, t_blank_tag=None, chinese_tag=None, t_add_tag=None, t_mul_tag=None, point_trans_tag=None, + tcp_keyword_tag=None, multi_field_tag=None, protocol=None): + if stb_name == "": + stb_name = tdCom.getLongName(6, "letters") + if tb_name == "": + tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}' + if t0 == "": + t0 = "t" + if c0 == "": + c0 = random.choice(["f", "F", "false", "False", "t", "T", "true", "True"]) + if value == "": + value = random.choice(["f", "F", "false", "False", "t", "T", "true", "True", "TRUE", "FALSE"]) + if id_upper_tag is not None: + id = "ID" + else: + id = "id" + if id_mixul_tag is not None: + id = random.choice(["iD", "Id"]) + else: + id = "id" + if ts is None: + ts = "1626006833639000000" + input_sql = self.gen_influxdb_line(stb_name, tb_name, id, t0, t1, t2, t3, t4, t5, t6, t7, t8, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, ts, + id_noexist_tag, id_change_tag, id_double_tag, ct_add_tag, ct_am_tag, ct_ma_tag, ct_min_tag, c_multi_tag, t_multi_tag, c_blank_tag, t_blank_tag, chinese_tag) + return input_sql, stb_name + + def genMulTagColStr(self, gen_type, count): + """ + gen_type must be "tag"/"col" + """ + if gen_type == "tag": + return ','.join(map(lambda i: f't{i}=f', range(count))) + " " + if gen_type == "col": + return ','.join(map(lambda i: f'c{i}=t', range(count))) + " " + + def genLongSql(self, tag_count, col_count): + stb_name = tdCom.getLongName(7, "letters") + tb_name = f'{stb_name}_1' + tag_str = self.genMulTagColStr("tag", tag_count) + col_str = self.genMulTagColStr("col", col_count) + ts = "1626006833640000000" + long_sql = stb_name + ',' + f'id={tb_name}' + ',' + tag_str + col_str + ts + return long_sql, stb_name + + def getNoIdTbName(self, stb_name): + query_sql = f"select tbname from {stb_name}" + tb_name = self.resHandle(query_sql, True)[0][0] + return tb_name + + def resHandle(self, query_sql, query_tag, protocol=None): + tdSql.execute('reset query cache') + if protocol == "telnet-tcp": + time.sleep(0.5) + row_info = tdSql.query(query_sql, query_tag) + col_info = tdSql.getColNameList(query_sql, query_tag) + res_row_list = [] + sub_list = [] + for row_mem in row_info: + for i in row_mem: + if "11.1234" in str(i) and str(i) != "11.12345f32" and str(i) != "11.12345027923584F32": + sub_list.append("11.12345027923584") + elif "22.1234" in str(i) and str(i) != "22.123456789f64" and str(i) != "22.123456789F64": + sub_list.append("22.123456789") + else: + sub_list.append(str(i)) + res_row_list.append(sub_list) + res_field_list_without_ts = col_info[0][1:] + res_type_list = col_info[1] + return res_row_list, res_field_list_without_ts, res_type_list + + def resCmp(self, input_sql, stb_name, query_sql="select * from", condition="", ts=None, id=True, none_check_tag=None, ts_type=None, precision=None): + expect_list = self.inputHandle(input_sql, ts_type) + if precision == None: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, ts_type) + else: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, precision) + query_sql = f"{query_sql} {stb_name} {condition}" + res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True) + if ts == 0: + res_ts = self.dateToTs(res_row_list[0][0]) + current_time = time.time() + if current_time - res_ts < 60: + tdSql.checkEqual(res_row_list[0][1:], expect_list[0][1:]) + else: + print("timeout") + tdSql.checkEqual(res_row_list[0], expect_list[0]) + else: + if none_check_tag is not None: + none_index_list = [i for i,x in enumerate(res_row_list[0]) if x=="None"] + none_index_list.reverse() + for j in none_index_list: + res_row_list[0].pop(j) + expect_list[0].pop(j) + tdSql.checkEqual(sorted(res_row_list[0]), sorted(expect_list[0])) + tdSql.checkEqual(sorted(res_field_list_without_ts), sorted(expect_list[1])) + tdSql.checkEqual(res_type_list, expect_list[2]) + + def cleanStb(self): + query_sql = "show stables" + res_row_list = tdSql.query(query_sql, True) + stb_list = map(lambda x: x[0], res_row_list) + for stb in stb_list: + tdSql.execute(f'drop table if exists {stb}') + + def initCheckCase(self): + """ + normal tags and cols, one for every elm + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + + def boolTypeCheckCase(self): + """ + check all normal type + """ + tdCom.cleanTb() + full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"] + for t_type in full_type_list: + input_sql, stb_name = self.genFullTypeSql(c0=t_type, t0=t_type) + self.resCmp(input_sql, stb_name) + + def symbolsCheckCase(self): + """ + check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/? + """ + ''' + please test : + binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"' + ''' + tdCom.cleanTb() + binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"' + nchar_symbols = f'L{binary_symbols}' + input_sql, stb_name = self.genFullTypeSql(c7=binary_symbols, c8=nchar_symbols, t7=binary_symbols, t8=nchar_symbols) + self.resCmp(input_sql, stb_name) + + def tsCheckCase(self): + """ + test ts list --> ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"] + # ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过 + """ + tdCom.cleanTb() + ts_list = ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022", 0] + for ts in ts_list: + input_sql, stb_name = self.genFullTypeSql(ts=ts) + self.resCmp(input_sql, stb_name, ts=ts) + + def idSeqCheckCase(self): + """ + check id.index in tags + eg: t0=**,id=**,t1=** + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_change_tag=True) + self.resCmp(input_sql, stb_name) + + def idUpperCheckCase(self): + """ + check id param + eg: id and ID + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True) + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True) + self.resCmp(input_sql, stb_name) + + def noIdCheckCase(self): + """ + id not exist + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + query_sql = f"select tbname from {stb_name}" + res_row_list = self.resHandle(query_sql, True)[0] + if len(res_row_list[0][0]) > 0: + tdSql.checkColNameList(res_row_list, res_row_list) + else: + tdSql.checkColNameList(res_row_list, "please check noIdCheckCase") + + def maxColTagCheckCase(self): + """ + max tag count is 128 + max col count is ?? + """ + for input_sql in [self.genLongSql(127, 1)[0], self.genLongSql(1, 4093)[0]]: + tdCom.cleanTb() + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + for input_sql in [self.genLongSql(129, 1)[0], self.genLongSql(1, 4095)[0]]: + tdCom.cleanTb() + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def idIllegalNameCheckCase(self): + """ + test illegal id name + mix "~!@#$¥%^&*()-+|[]、「」【】;:《》<>?" + """ + tdCom.cleanTb() + rstr = list("~!@#$¥%^&*()-+|[]、「」【】;:《》<>?") + for i in rstr: + stb_name=f"aaa{i}bbb" + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name) + self.resCmp(input_sql, f'`{stb_name}`') + tdSql.execute(f'drop table if exists `{stb_name}`') + + def idStartWithNumCheckCase(self): + """ + id is start with num + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(tb_name=f"\"1aaabbb\"")[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def nowTsCheckCase(self): + """ + check now unsupported + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="now")[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def dateFormatTsCheckCase(self): + """ + check date format ts unsupported + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def illegalTsCheckCase(self): + """ + check ts format like 16260068336390us19 + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="16260068336390us19")[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def tagValueLengthCheckCase(self): + """ + check full type tag value limit + """ + tdCom.cleanTb() + # i8 + for t1 in ["-128i8", "127i8"]: + input_sql, stb_name = self.genFullTypeSql(t1=t1) + self.resCmp(input_sql, stb_name) + for t1 in ["-129i8", "128i8"]: + input_sql = self.genFullTypeSql(t1=t1)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + #i16 + for t2 in ["-32768i16", "32767i16"]: + input_sql, stb_name = self.genFullTypeSql(t2=t2) + self.resCmp(input_sql, stb_name) + for t2 in ["-32769i16", "32768i16"]: + input_sql = self.genFullTypeSql(t2=t2)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + #i32 + for t3 in ["-2147483648i32", "2147483647i32"]: + input_sql, stb_name = self.genFullTypeSql(t3=t3) + self.resCmp(input_sql, stb_name) + for t3 in ["-2147483649i32", "2147483648i32"]: + input_sql = self.genFullTypeSql(t3=t3)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + #i64 + for t4 in ["-9223372036854775808i64", "9223372036854775807i64"]: + input_sql, stb_name = self.genFullTypeSql(t4=t4) + self.resCmp(input_sql, stb_name) + for t4 in ["-9223372036854775809i64", "9223372036854775808i64"]: + input_sql = self.genFullTypeSql(t4=t4)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f32 + for t5 in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: + input_sql, stb_name = self.genFullTypeSql(t5=t5) + self.resCmp(input_sql, stb_name) + # * limit set to 4028234664*(10**38) + for t5 in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]: + input_sql = self.genFullTypeSql(t5=t5)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f64 + for t6 in [f'{-1.79769*(10**308)}f64', f'{-1.79769*(10**308)}f64']: + input_sql, stb_name = self.genFullTypeSql(t6=t6) + self.resCmp(input_sql, stb_name) + # * limit set to 1.797693134862316*(10**308) + for c6 in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: + input_sql = self.genFullTypeSql(c6=c6)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # binary + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}" c0=f 1626006833639000000' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16375, "letters")}" c0=f 1626006833639000000' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # nchar + # * legal nchar could not be larger than 16374/4 + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}" c0=f 1626006833639000000' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4094, "letters")}" c0=f 1626006833639000000' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def colValueLengthCheckCase(self): + """ + check full type col value limit + """ + tdCom.cleanTb() + # i8 + for c1 in ["-128i8", "127i8"]: + input_sql, stb_name = self.genFullTypeSql(c1=c1) + self.resCmp(input_sql, stb_name) + + for c1 in ["-129i8", "128i8"]: + input_sql = self.genFullTypeSql(c1=c1)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + # i16 + for c2 in ["-32768i16"]: + input_sql, stb_name = self.genFullTypeSql(c2=c2) + self.resCmp(input_sql, stb_name) + for c2 in ["-32769i16", "32768i16"]: + input_sql = self.genFullTypeSql(c2=c2)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i32 + for c3 in ["-2147483648i32"]: + input_sql, stb_name = self.genFullTypeSql(c3=c3) + self.resCmp(input_sql, stb_name) + for c3 in ["-2147483649i32", "2147483648i32"]: + input_sql = self.genFullTypeSql(c3=c3)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i64 + for c4 in ["-9223372036854775808i64"]: + input_sql, stb_name = self.genFullTypeSql(c4=c4) + self.resCmp(input_sql, stb_name) + for c4 in ["-9223372036854775809i64", "9223372036854775808i64"]: + input_sql = self.genFullTypeSql(c4=c4)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f32 + for c5 in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: + input_sql, stb_name = self.genFullTypeSql(c5=c5) + self.resCmp(input_sql, stb_name) + # * limit set to 4028234664*(10**38) + for c5 in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]: + input_sql = self.genFullTypeSql(c5=c5)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f64 + for c6 in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']: + input_sql, stb_name = self.genFullTypeSql(c6=c6) + self.resCmp(input_sql, stb_name) + # * limit set to 1.797693134862316*(10**308) + for c6 in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: + input_sql = self.genFullTypeSql(c6=c6)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # # # binary + # stb_name = tdCom.getLongName(7, "letters") + # input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}" 1626006833639000000' + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + # input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16375, "letters")}" 1626006833639000000' + # try: + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + # # nchar + # # * legal nchar could not be larger than 16374/4 + # stb_name = tdCom.getLongName(7, "letters") + # input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}" 1626006833639000000' + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + # input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4094, "letters")}" 1626006833639000000' + # try: + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + def tagColIllegalValueCheckCase(self): + + """ + test illegal tag col value + """ + tdCom.cleanTb() + # bool + for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]: + input_sql1 = self.genFullTypeSql(t0=i)[0] + try: + self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + input_sql2 = self.genFullTypeSql(c0=i)[0] + try: + self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i8 i16 i32 i64 f32 f64 + for input_sql in [ + self.genFullTypeSql(t1="1s2i8")[0], + self.genFullTypeSql(t2="1s2i16")[0], + self.genFullTypeSql(t3="1s2i32")[0], + self.genFullTypeSql(t4="1s2i64")[0], + self.genFullTypeSql(t5="11.1s45f32")[0], + self.genFullTypeSql(t6="11.1s45f64")[0], + self.genFullTypeSql(c1="1s2i8")[0], + self.genFullTypeSql(c2="1s2i16")[0], + self.genFullTypeSql(c3="1s2i32")[0], + self.genFullTypeSql(c4="1s2i64")[0], + self.genFullTypeSql(c5="11.1s45f32")[0], + self.genFullTypeSql(c6="11.1s45f64")[0], + self.genFullTypeSql(c9="1s1u64")[0] + ]: + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # check binary and nchar blank + stb_name = tdCom.getLongName(7, "letters") + input_sql1 = f'{stb_name},t0=t c0=f,c1="abc aaa" 1626006833639000000' + input_sql2 = f'{stb_name},t0=t c0=f,c1=L"abc aaa" 1626006833639000000' + input_sql3 = f'{stb_name},t0=t,t1="abc aaa" c0=f 1626006833639000000' + input_sql4 = f'{stb_name},t0=t,t1=L"abc aaa" c0=f 1626006833639000000' + for input_sql in [input_sql1, input_sql2, input_sql3, input_sql4]: + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # check accepted binary and nchar symbols + # # * ~!@#$¥%^&*()-+={}|[]、「」:; + for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'): + input_sql1 = f'{stb_name},t0=t c0=f,c1="abc{symbol}aaa" 1626006833639000000' + input_sql2 = f'{stb_name},t0=t,t1="abc{symbol}aaa" c0=f 1626006833639000000' + self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + # self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + def duplicateIdTagColInsertCheckCase(self): + """ + check duplicate Id Tag Col + """ + tdCom.cleanTb() + input_sql_id = self.genFullTypeSql(id_double_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + input_sql = self.genFullTypeSql()[0] + input_sql_tag = input_sql.replace("t5", "t6") + try: + self._conn.schemaless_insert([input_sql_tag], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + input_sql = self.genFullTypeSql()[0] + input_sql_col = input_sql.replace("c5", "c6") + try: + self._conn.schemaless_insert([input_sql_col], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + input_sql = self.genFullTypeSql()[0] + input_sql_col = input_sql.replace("c5", "C6") + try: + self._conn.schemaless_insert([input_sql_col], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + ##### stb exist ##### + @tdCom.smlPass + def noIdStbExistCheckCase(self): + """ + case no id when stb exist + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", c0="f") + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", c0="f") + self.resCmp(input_sql, stb_name, condition='where tbname like "t_%"') + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + # TODO cover other case + + def duplicateInsertExistCheckCase(self): + """ + check duplicate insert when stb exist + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + self.resCmp(input_sql, stb_name) + + @tdCom.smlPass + def tagColBinaryNcharLengthCheckCase(self): + """ + check length increase + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + tb_name = tdCom.getLongName(5, "letters") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name,t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"", c7="\"binaryTagValuebinaryTagValue\"", c8="L\"ncharTagValuencharTagValue\"") + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"') + + @tdCom.smlPass + def tagColAddDupIDCheckCase(self): + """ + check column and tag count add, stb and tb duplicate + * tag: alter table ... + * col: when update==0 and ts is same, unchange + * so this case tag&&value will be added, + * col is added without value when update==0 + * col is added with value when update==1 + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + for db_update_tag in [0, 1]: + if db_update_tag == 1 : + self.createDb("test_update", db_update_tag=db_update_tag) + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", c0="f") + self.resCmp(input_sql, stb_name) + self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="f", c0="f", ct_add_tag=True) + if db_update_tag == 1 : + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"') + else: + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + @tdCom.smlPass + def tagColAddCheckCase(self): + """ + check column and tag count add + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", c0="f") + self.resCmp(input_sql, stb_name) + tb_name_1 = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name_1, t0="f", c0="f", ct_add_tag=True) + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name_1}"') + res_row_list = self.resHandle(f"select c10,c11,t10,t11 from {tb_name}", True)[0] + tdSql.checkEqual(res_row_list[0], ['None', 'None', 'None', 'None']) + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + + def tagMd5Check(self): + """ + condition: stb not change + insert two table, keep tag unchange, change col + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(t0="f", c0="f", id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + tb_name1 = self.getNoIdTbName(stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", c0="f", id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + tb_name2 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(1) + tdSql.checkEqual(tb_name1, tb_name2) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", c0="f", id_noexist_tag=True, ct_add_tag=True) + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + tb_name3 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + tdSql.checkNotEqual(tb_name1, tb_name3) + + # * tag binary max is 16384, col+ts binary max 49151 + def tagColBinaryMaxLengthCheckCase(self): + """ + every binary and nchar must be length+2 + """ + tdCom.cleanTb() + stb_name = tdCom.getLongName(7, "letters") + tb_name = f'{stb_name}_1' + input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + # * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2 + input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(5, "letters")}" c0=f 1626006833639000000' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(6, "letters")}" c0=f 1626006833639000000' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + # # * check col,col+ts max in describe ---> 16143 + input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}",c2="{tdCom.getLongName(16374, "letters")}",c3="{tdCom.getLongName(16374, "letters")}",c4="{tdCom.getLongName(12, "letters")}" 1626006833639000000' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(3) + input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}",c2="{tdCom.getLongName(16374, "letters")}",c3="{tdCom.getLongName(16374, "letters")}",c4="{tdCom.getLongName(13, "letters")}" 1626006833639000000' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(3) + + # * tag nchar max is 16374/4, col+ts nchar max 49151 + def tagColNcharMaxLengthCheckCase(self): + """ + check nchar length limit + """ + tdCom.cleanTb() + stb_name = tdCom.getLongName(7, "letters") + tb_name = f'{stb_name}_1' + input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000' + code = self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + # * legal nchar could not be larger than 16374/4 + input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(1, "letters")}" c0=f 1626006833639000000' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(2, "letters")}" c0=f 1626006833639000000' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(4, "letters")}" 1626006833639000000' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(3) + input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(5, "letters")}" 1626006833639000000' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(3) + + def batchInsertCheckCase(self): + """ + test batch insert + """ + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + # tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') + lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", + "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000", + f"{stb_name},t2=5f64,t3=L\"ste\" c1=true,c2=4i64,c3=\"iam\" 1626056811823316532", + "stf567890,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000", + "st123456,t1=4i64,t2=5f64,t3=\"t4\" c1=3i64,c3=L\"passitagain\",c2=true,c4=5f64 1626006833642000000", + f"{stb_name},t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false 1626056811843316532", + f"{stb_name},t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false,c5=32i8,c6=64i16,c7=32i32,c8=88.88f32 1626056812843316532", + "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000", + "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641000000" + ] + self._conn.schemaless_insert(lines, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + def multiInsertCheckCase(self, count): + """ + test multi insert + """ + tdCom.cleanTb() + sql_list = [] + stb_name = tdCom.getLongName(8, "letters") + # tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') + for i in range(count): + input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0] + sql_list.append(input_sql) + print(sql_list) + self._conn.schemaless_insert(sql_list, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + + def batchErrorInsertCheckCase(self): + """ + test batch error insert + """ + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", + f"{stb_name},t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns"] + try: + self._conn.schemaless_insert(lines, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def genSqlList(self, count=5, stb_name="", tb_name=""): + """ + stb --> supertable + tb --> table + ts --> timestamp, same default + col --> column, same default + tag --> tag, same default + d --> different + s --> same + a --> add + m --> minus + """ + d_stb_d_tb_list = list() + s_stb_s_tb_list = list() + s_stb_s_tb_a_col_a_tag_list = list() + s_stb_s_tb_m_col_m_tag_list = list() + s_stb_d_tb_list = list() + s_stb_d_tb_a_col_m_tag_list = list() + s_stb_d_tb_a_tag_m_col_list = list() + s_stb_s_tb_d_ts_list = list() + s_stb_s_tb_d_ts_a_col_m_tag_list = list() + s_stb_s_tb_d_ts_a_tag_m_col_list = list() + s_stb_d_tb_d_ts_list = list() + s_stb_d_tb_d_ts_a_col_m_tag_list = list() + s_stb_d_tb_d_ts_a_tag_m_col_list = list() + for i in range(count): + d_stb_d_tb_list.append(self.genFullTypeSql(t0="f", c0="f")) + s_stb_s_tb_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"')) + s_stb_s_tb_a_col_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ct_add_tag=True)) + s_stb_s_tb_m_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ct_min_tag=True)) + s_stb_d_tb_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)) + s_stb_d_tb_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ct_am_tag=True)) + s_stb_d_tb_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ct_ma_tag=True)) + s_stb_s_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ts=0)) + s_stb_s_tb_d_ts_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ts=0, ct_am_tag=True)) + s_stb_s_tb_d_ts_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ts=0, ct_ma_tag=True)) + s_stb_d_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0)) + s_stb_d_tb_d_ts_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, ct_am_tag=True)) + s_stb_d_tb_d_ts_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, ct_ma_tag=True)) + + return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_col_a_tag_list, s_stb_s_tb_m_col_m_tag_list, \ + s_stb_d_tb_list, s_stb_d_tb_a_col_m_tag_list, s_stb_d_tb_a_tag_m_col_list, s_stb_s_tb_d_ts_list, \ + s_stb_s_tb_d_ts_a_col_m_tag_list, s_stb_s_tb_d_ts_a_tag_m_col_list, s_stb_d_tb_d_ts_list, \ + s_stb_d_tb_d_ts_a_col_m_tag_list, s_stb_d_tb_d_ts_a_tag_m_col_list + + + def genMultiThreadSeq(self, sql_list): + tlist = list() + for insert_sql in sql_list: + t = threading.Thread(target=self._conn.schemaless_insert, args=([insert_sql[0]], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value,)) + tlist.append(t) + return tlist + + def multiThreadRun(self, tlist): + for t in tlist: + t.start() + for t in tlist: + t.join() + + def stbInsertMultiThreadCheckCase(self): + """ + thread input different stb + """ + tdCom.cleanTb() + input_sql = self.genSqlList()[0] + self.multiThreadRun(self.genMultiThreadSeq(input_sql)) + tdSql.query(f"show tables;") + tdSql.checkRows(5) + + def sStbStbDdataInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, result keep first data + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) + self.resCmp(input_sql, stb_name) + s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[1] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) + + def sStbStbDdataAtcInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, add columes and tags, result keep first data + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) + self.resCmp(input_sql, stb_name) + s_stb_s_tb_a_col_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[2] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_col_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) + + def sStbStbDdataMtcInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, minus columes and tags, result keep first data + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) + self.resCmp(input_sql, stb_name) + s_stb_s_tb_m_col_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[3] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_col_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) + + def sStbDtbDdataInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataAcMtInsertMultiThreadCheckCase(self): + """ + #! concurrency conflict + """ + """ + thread input same stb, different tb, different data, add col, mul tag + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + s_stb_d_tb_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[5] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_col_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataAtMcInsertMultiThreadCheckCase(self): + """ + #! concurrency conflict + """ + """ + thread input same stb, different tb, different data, add tag, mul col + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + s_stb_d_tb_a_tag_m_col_list = self.genSqlList(stb_name=stb_name)[6] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_m_col_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbStbDdataDtsInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[7] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + + def sStbStbDdataDtsAcMtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add col, mul tag + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_a_col_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[8] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_col_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name} where t8 is not NULL") + tdSql.checkRows(6) + tdSql.query(f"select * from {tb_name} where c11 is not NULL;") + tdSql.checkRows(5) + + def sStbStbDdataDtsAtMcInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add tag, mul col + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_a_tag_m_col_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[9] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_m_col_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + for c in ["c7", "c8", "c9"]: + tdSql.query(f"select * from {stb_name} where {c} is NULL") + tdSql.checkRows(5) + for t in ["t10", "t11"]: + tdSql.query(f"select * from {stb_name} where {t} is not NULL;") + tdSql.checkRows(6) + + def sStbDtbDdataDtsInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, data, ts + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataDtsAcMtInsertMultiThreadCheckCase(self): + """ + # ! concurrency conflict + """ + """ + thread input same stb, different tb, data, ts, add col, mul tag + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + s_stb_d_tb_d_ts_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[11] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_a_col_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def test(self): + input_sql1 = "rfasta,id=\"rfasta_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ddzhiksj\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bnhwlgvj\",c8=L\"ncharTagValue\",c9=7u64 1626006933640000000ns" + input_sql2 = "rfasta,id=\"rfasta_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 1626006933640000000ns" + try: + self._conn.insert_lines([input_sql1]) + self._conn.insert_lines([input_sql2]) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + # self._conn.insert_lines([input_sql2]) + # input_sql3 = f'abcd,id="cc¥Ec",t0=True,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="ndsfdrum",t8=L"ncharTagValue" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="igwoehkm",c8=L"ncharColValue",c9=7u64 0' + # print(input_sql3) + # input_sql4 = 'hmemeb,id="kilrcrldgf",t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="fysodjql",t8=L"ncharTagValue" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="waszbfvc",c8=L"ncharColValue",c9=7u64 0' + # code = self._conn.insert_lines([input_sql3]) + # print(code) + # self._conn.insert_lines([input_sql4]) + + def runAll(self): + self.initCheckCase() + self.boolTypeCheckCase() + self.symbolsCheckCase() + # self.tsCheckCase() + self.idSeqCheckCase() + self.idUpperCheckCase() + self.noIdCheckCase() + # self.maxColTagCheckCase() + self.idIllegalNameCheckCase() + self.idStartWithNumCheckCase() + self.nowTsCheckCase() + self.dateFormatTsCheckCase() + self.illegalTsCheckCase() + # self.tagValueLengthCheckCase() + self.colValueLengthCheckCase() + self.tagColIllegalValueCheckCase() + self.duplicateIdTagColInsertCheckCase() + self.noIdStbExistCheckCase() + self.duplicateInsertExistCheckCase() + self.tagColBinaryNcharLengthCheckCase() + self.tagColAddDupIDCheckCase() + self.tagColAddCheckCase() + self.tagMd5Check() + # self.tagColBinaryMaxLengthCheckCase() + # self.tagColNcharMaxLengthCheckCase() + self.batchInsertCheckCase() + self.multiInsertCheckCase(10) + self.batchErrorInsertCheckCase() + # MultiThreads + # self.stbInsertMultiThreadCheckCase() + # self.sStbStbDdataInsertMultiThreadCheckCase() + # self.sStbStbDdataAtcInsertMultiThreadCheckCase() + # self.sStbStbDdataMtcInsertMultiThreadCheckCase() + # self.sStbDtbDdataInsertMultiThreadCheckCase() + + # # # ! concurrency conflict + # # self.sStbDtbDdataAcMtInsertMultiThreadCheckCase() + # # self.sStbDtbDdataAtMcInsertMultiThreadCheckCase() + + # self.sStbStbDdataDtsInsertMultiThreadCheckCase() + + # # # ! concurrency conflict + # # self.sStbStbDdataDtsAcMtInsertMultiThreadCheckCase() + # # self.sStbStbDdataDtsAtMcInsertMultiThreadCheckCase() + + # self.sStbDtbDdataDtsInsertMultiThreadCheckCase() + + # # ! concurrency conflict + # # self.sStbDtbDdataDtsAcMtInsertMultiThreadCheckCase() + + + + def run(self): + print("running {}".format(__file__)) + self.createDb() + try: + self.runAll() + except Exception as err: + print(''.join(traceback.format_exception(None, err, err.__traceback__))) + raise err + # self.tagColIllegalValueCheckCase() + # self.test() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py new file mode 100644 index 0000000000000000000000000000000000000000..f9bc5bbaf421394cf936bb4aaa031649a4ffa8f5 --- /dev/null +++ b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py @@ -0,0 +1,1788 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import traceback +import random +from taos.error import SchemalessError +import time +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +from util.types import TDSmlProtocolType +import threading +import json + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + self.defaultJSONStrType_value = "NCHAR" + + def createDb(self, name="test", db_update_tag=0, protocol=None): + if protocol == "telnet-tcp": + name = "opentsdb_telnet" + + if db_update_tag == 0: + tdSql.execute(f"drop database if exists {name}") + tdSql.execute(f"create database if not exists {name} precision 'ms' schemaless 1") + else: + tdSql.execute(f"drop database if exists {name}") + tdSql.execute(f"create database if not exists {name} precision 'ms' update 1 schemaless 1") + tdSql.execute(f'use {name}') + + def timeTrans(self, time_value): + if type(time_value) is int: + if time_value != 0: + if len(str(time_value)) == 13: + ts = int(time_value)/1000 + elif len(str(time_value)) == 10: + ts = int(time_value)/1 + else: + ts = time_value/1000000 + else: + ts = time.time() + elif type(time_value) is dict: + if time_value["type"].lower() == "ns": + ts = time_value["value"]/1000000000 + elif time_value["type"].lower() == "us": + ts = time_value["value"]/1000000 + elif time_value["type"].lower() == "ms": + ts = time_value["value"]/1000 + elif time_value["type"].lower() == "s": + ts = time_value["value"]/1 + else: + ts = time_value["value"]/1000000 + ulsec = repr(ts).split('.')[1][:6] + if len(ulsec) < 6 and int(ulsec) != 0: + ulsec = int(ulsec) * (10 ** (6 - len(ulsec))) + elif int(ulsec) == 0: + ulsec *= 6 + # * follow two rows added for tsCheckCase + td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + return td_ts + #td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts)) + return td_ts + + def dateToTs(self, datetime_input): + return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) + + def typeTrans(self, type_list): + type_num_list = [] + for tp in type_list: + if type(tp) is dict: + tp = tp['type'] + if tp.upper() == "TIMESTAMP": + type_num_list.append(9) + elif tp.upper() == "BOOL": + type_num_list.append(1) + elif tp.upper() == "TINYINT": + type_num_list.append(2) + elif tp.upper() == "SMALLINT": + type_num_list.append(3) + elif tp.upper() == "INT": + type_num_list.append(4) + elif tp.upper() == "BIGINT": + type_num_list.append(5) + elif tp.upper() == "FLOAT": + type_num_list.append(6) + elif tp.upper() == "DOUBLE": + type_num_list.append(7) + elif tp.upper() == "VARCHAR": + type_num_list.append(8) + elif tp.upper() == "NCHAR": + type_num_list.append(10) + elif tp.upper() == "BIGINT UNSIGNED": + type_num_list.append(14) + return type_num_list + + def inputHandle(self, input_json): + stb_name = input_json["metric"] + stb_tag_dict = input_json["tags"] + stb_col_dict = input_json["value"] + ts_value = self.timeTrans(input_json["timestamp"]) + tag_name_list = [] + tag_value_list = [] + td_tag_value_list = [] + td_tag_type_list = [] + + col_name_list = [] + col_value_list = [] + td_col_value_list = [] + td_col_type_list = [] + + # handle tag + for key,value in stb_tag_dict.items(): + if "id" == key.lower(): + tb_name = value + else: + if type(value) is dict: + tag_value_list.append(str(value["value"])) + td_tag_value_list.append(str(value["value"])) + tag_name_list.append(key.lower()) + if value["type"].lower() == "binary": + td_tag_type_list.append("VARCHAR") + else: + td_tag_type_list.append(value["type"].upper()) + tb_name = "" + else: + tag_value_list.append(str(value)) + # td_tag_value_list.append(str(value)) + tag_name_list.append(key.lower()) + tb_name = "" + + if type(value) is bool: + td_tag_type_list.append("BOOL") + td_tag_value_list.append(str(value)) + elif type(value) is int: + td_tag_type_list.append("DOUBLE") + td_tag_value_list.append(str(float(value))) + elif type(value) is float: + td_tag_type_list.append("DOUBLE") + td_tag_value_list.append(str(float(value))) + elif type(value) is str: + if self.defaultJSONStrType_value == "NCHAR": + td_tag_type_list.append("NCHAR") + td_tag_value_list.append(str(value)) + else: + td_tag_type_list.append("VARCHAR") + td_tag_value_list.append(str(value)) + + # handle col + if type(stb_col_dict) is dict: + if stb_col_dict["type"].lower() == "bool": + bool_value = f'{stb_col_dict["value"]}' + col_value_list.append(bool_value) + td_col_type_list.append(stb_col_dict["type"].upper()) + col_name_list.append("_value") + td_col_value_list.append(str(stb_col_dict["value"])) + else: + col_value_list.append(stb_col_dict["value"]) + if stb_col_dict["type"].lower() == "binary": + td_col_type_list.append("VARCHAR") + else: + td_col_type_list.append(stb_col_dict["type"].upper()) + col_name_list.append("_value") + td_col_value_list.append(str(stb_col_dict["value"])) + else: + col_name_list.append("_value") + col_value_list.append(str(stb_col_dict)) + # td_col_value_list.append(str(stb_col_dict)) + if type(stb_col_dict) is bool: + td_col_type_list.append("BOOL") + td_col_value_list.append(str(stb_col_dict)) + elif type(stb_col_dict) is int: + td_col_type_list.append("DOUBLE") + td_col_value_list.append(str(float(stb_col_dict))) + elif type(stb_col_dict) is float: + td_col_type_list.append("DOUBLE") + td_col_value_list.append(str(float(stb_col_dict))) + elif type(stb_col_dict) is str: + if self.defaultJSONStrType_value == "NCHAR": + td_col_type_list.append("NCHAR") + td_col_value_list.append(str(stb_col_dict)) + else: + td_col_type_list.append("VARCHAR") + td_col_value_list.append(str(stb_col_dict)) + + final_field_list = [] + final_field_list.extend(col_name_list) + final_field_list.extend(tag_name_list) + + final_type_list = [] + final_type_list.append("TIMESTAMP") + final_type_list.extend(td_col_type_list) + final_type_list.extend(td_tag_type_list) + final_type_list = self.typeTrans(final_type_list) + + final_value_list = [] + final_value_list.append(ts_value) + final_value_list.extend(td_col_value_list) + final_value_list.extend(td_tag_value_list) + return final_value_list, final_field_list, final_type_list, stb_name, tb_name + + def genTsColValue(self, value, t_type=None, value_type="obj"): + if value_type == "obj": + if t_type == None: + ts_col_value = value + else: + ts_col_value = {"value": value, "type": t_type} + elif value_type == "default": + ts_col_value = value + return ts_col_value + + def genTagValue(self, t0_type="bool", t0_value="", t1_type="tinyint", t1_value=127, t2_type="smallint", t2_value=32767, + t3_type="int", t3_value=2147483647, t4_type="bigint", t4_value=9223372036854775807, + t5_type="float", t5_value=11.12345027923584, t6_type="double", t6_value=22.123456789, + t7_type="binary", t7_value="binaryTagValue", t8_type="nchar", t8_value="ncharTagValue", value_type="obj"): + if t0_value == "": + t0_value = random.choice([True, False]) + if value_type == "obj": + tag_value = { + "t0": {"value": t0_value, "type": t0_type}, + "t1": {"value": t1_value, "type": t1_type}, + "t2": {"value": t2_value, "type": t2_type}, + "t3": {"value": t3_value, "type": t3_type}, + "t4": {"value": t4_value, "type": t4_type}, + "t5": {"value": t5_value, "type": t5_type}, + "t6": {"value": t6_value, "type": t6_type}, + "t7": {"value": t7_value, "type": t7_type}, + "t8": {"value": t8_value, "type": t8_type} + } + elif value_type == "default": + # t5_value = t6_value + tag_value = { + "t0": t0_value, + "t1": t1_value, + "t2": t2_value, + "t3": t3_value, + "t4": t4_value, + "t5": t5_value, + "t6": t6_value, + "t7": t7_value, + "t8": t8_value + } + return tag_value + + def genFullTypeJson(self, ts_value="", col_value="", tag_value="", stb_name="", tb_name="", + id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None, + t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None, + chinese_tag=None, multi_field_tag=None, point_trans_tag=None, value_type="obj"): + if value_type == "obj": + if stb_name == "": + stb_name = tdCom.getLongName(6, "letters") + if tb_name == "": + tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}' + if ts_value == "": + ts_value = self.genTsColValue(1626006833639000000, "ns") + if col_value == "": + col_value = self.genTsColValue(random.choice([True, False]), "bool") + if tag_value == "": + tag_value = self.genTagValue() + # if id_upper_tag is not None: + # id = "ID" + # else: + # id = "id" + # if id_mixul_tag is not None: + # id = random.choice(["iD", "Id"]) + # else: + # id = "id" + # if id_noexist_tag is None: + # tag_value[id] = tb_name + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if id_noexist_tag is not None: + if t_add_tag is not None: + tag_value["t9"] = {"value": "ncharTagValue", "type": "nchar"} + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if id_change_tag is not None: + tag_value.pop('t8') + tag_value["t8"] = {"value": "ncharTagValue", "type": "nchar"} + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if id_double_tag is not None: + tag_value["ID"] = f'"{tb_name}_2"' + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if t_add_tag is not None: + tag_value["t10"] = {"value": "ncharTagValue", "type": "nchar"} + tag_value["t11"] = {"value": True, "type": "bool"} + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if t_mul_tag is not None: + tag_value.pop('t8') + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if c_multi_tag is not None: + col_value = [{"value": True, "type": "bool"}, {"value": False, "type": "bool"}] + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if t_blank_tag is not None: + tag_value = "" + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if chinese_tag is not None: + tag_value = {"t0": {"value": "涛思数据", "type": "nchar"}} + col_value = {"value": "涛思数据", "type": "nchar"} + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if c_blank_tag is not None: + sql_json.pop("value") + if multi_field_tag is not None: + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value, "tags2": tag_value} + if point_trans_tag is not None: + sql_json = {"metric": ".point.trans.test", "timestamp": ts_value, "value": col_value, "tags": tag_value} + + elif value_type == "default": + if stb_name == "": + stb_name = tdCom.getLongName(6, "letters") + if tb_name == "": + tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}' + if ts_value == "": + ts_value = 1626006834 + if col_value == "": + col_value = random.choice([True, False]) + if tag_value == "": + tag_value = self.genTagValue(value_type=value_type) + # if id_upper_tag is not None: + # id = "ID" + # else: + # id = "id" + # if id_mixul_tag is not None: + # id = "iD" + # else: + # id = "id" + # if id_noexist_tag is None: + # tag_value[id] = tb_name + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if id_noexist_tag is not None: + if t_add_tag is not None: + tag_value["t9"] = {"value": "ncharTagValue", "type": "nchar"} + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if id_change_tag is not None: + tag_value.pop('t7') + tag_value["t7"] = {"value": "ncharTagValue", "type": "nchar"} + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if id_double_tag is not None: + tag_value["ID"] = f'"{tb_name}_2"' + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if t_add_tag is not None: + tag_value["t10"] = {"value": "ncharTagValue", "type": "nchar"} + tag_value["t11"] = True + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if t_mul_tag is not None: + tag_value.pop('t7') + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if c_multi_tag is not None: + col_value = True,False + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if t_blank_tag is not None: + tag_value = "" + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value} + if c_blank_tag is not None: + sql_json.pop("value") + if multi_field_tag is not None: + sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value, "tags2": tag_value} + if point_trans_tag is not None: + sql_json = {"metric": ".point.trans.test", "timestamp": ts_value, "value": col_value, "tags": tag_value} + return sql_json, stb_name + + def genMulTagColDict(self, genType, count=1, value_type="obj"): + """ + genType must be tag/col + """ + tag_dict = dict() + col_dict = dict() + if value_type == "obj": + if genType == "tag": + for i in range(0, count): + tag_dict[f't{i}'] = {'value': True, 'type': 'bool'} + return tag_dict + if genType == "col": + col_dict = {'value': True, 'type': 'bool'} + return col_dict + elif value_type == "default": + if genType == "tag": + for i in range(0, count): + tag_dict[f't{i}'] = True + return tag_dict + if genType == "col": + col_dict = True + return col_dict + + def genLongJson(self, tag_count, value_type="obj"): + stb_name = tdCom.getLongName(7, mode="letters") + # tb_name = f'{stb_name}_1' + tag_dict = self.genMulTagColDict("tag", tag_count, value_type) + col_dict = self.genMulTagColDict("col", 1, value_type) + # tag_dict["id"] = tb_name + ts_dict = {'value': 1626006833639000000, 'type': 'ns'} + long_json = {"metric": stb_name, "timestamp": ts_dict, "value": col_dict, "tags": tag_dict} + return long_json, stb_name + + def getNoIdTbName(self, stb_name): + query_sql = f"select tbname from {stb_name}" + tb_name = self.resHandle(query_sql, True)[0][0] + return tb_name + + def resHandle(self, query_sql, query_tag): + tdSql.execute('reset query cache') + row_info = tdSql.query(query_sql, query_tag) + col_info = tdSql.getColNameList(query_sql, query_tag) + res_row_list = [] + sub_list = [] + for row_mem in row_info: + for i in row_mem: + if "11.1234" in str(i) and str(i) != "11.12345f32" and str(i) != "11.12345027923584F32": + sub_list.append("11.12345027923584") + elif "22.1234" in str(i) and str(i) != "22.123456789f64" and str(i) != "22.123456789F64": + sub_list.append("22.123456789") + else: + sub_list.append(str(i)) + res_row_list.append(sub_list) + res_field_list_without_ts = col_info[0][1:] + res_type_list = col_info[1] + return res_row_list, res_field_list_without_ts, res_type_list + + def resCmp(self, input_json, stb_name, query_sql="select * from", condition="", ts=None, id=True, none_check_tag=None, none_type_check=None): + expect_list = self.inputHandle(input_json) + print("----", json.dumps(input_json)) + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + print("!!!!!----", json.dumps(input_json)) + query_sql = f"{query_sql} {stb_name} {condition}" + res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True) + if ts == 0: + res_ts = self.dateToTs(res_row_list[0][0]) + current_time = time.time() + if current_time - res_ts < 60: + tdSql.checkEqual(res_row_list[0][1:], expect_list[0][1:]) + else: + print("timeout") + tdSql.checkEqual(res_row_list[0], expect_list[0]) + else: + if none_check_tag is not None: + none_index_list = [i for i,x in enumerate(res_row_list[0]) if x=="None"] + none_index_list.reverse() + for j in none_index_list: + res_row_list[0].pop(j) + expect_list[0].pop(j) + tdSql.checkEqual(sorted(res_row_list[0]), sorted(expect_list[0])) + tdSql.checkEqual(sorted(res_field_list_without_ts), sorted(expect_list[1])) + tdSql.checkEqual(res_type_list, expect_list[2]) + + def initCheckCase(self, value_type="obj"): + """ + normal tags and cols, one for every elm + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(value_type=value_type) + self.resCmp(input_json, stb_name) + + def boolTypeCheckCase(self): + """ + check all normal type + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"] + for t_type in full_type_list: + input_json_list = [self.genFullTypeJson(tag_value=self.genTagValue(t0_value=t_type))[0], + self.genFullTypeJson(col_value=self.genTsColValue(value=t_type, t_type="bool"))[0]] + for input_json in input_json_list: + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def symbolsCheckCase(self, value_type="obj"): + """ + check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/? + """ + ''' + please test : + binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"' + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"' + nchar_symbols = binary_symbols + input_sql1, stb_name1 = self.genFullTypeJson(col_value=self.genTsColValue(value=binary_symbols, t_type="binary", value_type=value_type), + tag_value=self.genTagValue(t7_value=binary_symbols, t8_value=nchar_symbols, value_type=value_type)) + input_sql2, stb_name2 = self.genFullTypeJson(col_value=self.genTsColValue(value=nchar_symbols, t_type="nchar", value_type=value_type), + tag_value=self.genTagValue(t7_value=binary_symbols, t8_value=nchar_symbols, value_type=value_type)) + self.resCmp(input_sql1, stb_name1) + self.resCmp(input_sql2, stb_name2) + + def tsCheckCase(self, value_type="obj"): + """ + test ts list --> ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"] + # ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + ts_list = ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006834", 0] + for ts in ts_list: + if "s" in str(ts): + input_json, stb_name = self.genFullTypeJson(ts_value=self.genTsColValue(value=int(tdCom.splitNumLetter(ts)[0]), t_type=tdCom.splitNumLetter(ts)[1])) + self.resCmp(input_json, stb_name, ts=ts) + else: + input_json, stb_name = self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="s", value_type=value_type)) + self.resCmp(input_json, stb_name, ts=ts) + if int(ts) == 0: + if value_type == "obj": + input_json_list = [self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="")), + self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="ns")), + self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="us")), + self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="ms")), + self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="s"))] + elif value_type == "default": + input_json_list = [self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), value_type=value_type))] + for input_json in input_json_list: + self.resCmp(input_json[0], input_json[1], ts=ts) + else: + input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type=""))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + # check result + #! bug + tdSql.execute(f"drop database if exists test_ts") + tdSql.execute(f"create database if not exists test_ts precision 'ms' schemaless 1") + tdSql.execute("use test_ts") + input_json = [{"metric": "test_ms", "timestamp": {"value": 1626006833640, "type": "ms"}, "value": True, "tags": {"t0": True}}, + {"metric": "test_ms", "timestamp": {"value": 1626006833641, "type": "ms"}, "value": False, "tags": {"t0": True}}] + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + res = tdSql.query('select * from test_ms', True) + tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.640000") + tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.641000") + + tdSql.execute(f"drop database if exists test_ts") + tdSql.execute(f"create database if not exists test_ts precision 'us' schemaless 1") + tdSql.execute("use test_ts") + input_json = [{"metric": "test_us", "timestamp": {"value": 1626006833639000, "type": "us"}, "value": True, "tags": {"t0": True}}, + {"metric": "test_us", "timestamp": {"value": 1626006833639001, "type": "us"}, "value": False, "tags": {"t0": True}}] + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + res = tdSql.query('select * from test_us', True) + tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.639000") + tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.639001") + + tdSql.execute(f"drop database if exists test_ts") + tdSql.execute(f"create database if not exists test_ts precision 'ns' schemaless 1") + tdSql.execute("use test_ts") + input_json = [{"metric": "test_ns", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": True, "tags": {"t0": True}}, + {"metric": "test_ns", "timestamp": {"value": 1626006833639000001, "type": "ns"}, "value": False, "tags": {"t0": True}}] + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + res = tdSql.query('select * from test_ns', True) + tdSql.checkEqual(str(res[0][0]), "1626006833639000000") + tdSql.checkEqual(str(res[1][0]), "1626006833639000001") + self.createDb() + + def idSeqCheckCase(self, value_type="obj"): + """ + check id.index in tags + eg: t0=**,id=**,t1=** + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(id_change_tag=True, value_type=value_type) + self.resCmp(input_json, stb_name) + + def idLetterCheckCase(self, value_type="obj"): + """ + check id param + eg: id and ID + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(id_upper_tag=True, value_type=value_type) + self.resCmp(input_json, stb_name) + input_json, stb_name = self.genFullTypeJson(id_mixul_tag=True, value_type=value_type) + self.resCmp(input_json, stb_name) + input_json, stb_name = self.genFullTypeJson(id_change_tag=True, id_upper_tag=True, value_type=value_type) + self.resCmp(input_json, stb_name) + + def noIdCheckCase(self, value_type="obj"): + """ + id not exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(id_noexist_tag=True, value_type=value_type) + self.resCmp(input_json, stb_name) + query_sql = f"select tbname from {stb_name}" + res_row_list = self.resHandle(query_sql, True)[0] + if len(res_row_list[0][0]) > 0: + tdSql.checkColNameList(res_row_list, res_row_list) + else: + tdSql.checkColNameList(res_row_list, "please check noIdCheckCase") + + def maxColTagCheckCase(self, value_type="obj"): + """ + max tag count is 128 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + for input_json in [self.genLongJson(128, value_type)[0]]: + tdCom.cleanTb() + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + for input_json in [self.genLongJson(129, value_type)[0]]: + tdCom.cleanTb() + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def idIllegalNameCheckCase(self, value_type="obj"): + """ + test illegal id name + mix "`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?" + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + rstr = list("`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?") + for i in rstr: + input_json = self.genFullTypeJson(tb_name=f'aa{i}bb', value_type=value_type)[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def idStartWithNumCheckCase(self, value_type="obj"): + """ + id is start with num + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json = self.genFullTypeJson(tb_name="1aaabbb", value_type=value_type)[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def nowTsCheckCase(self, value_type="obj"): + """ + check now unsupported + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="now", t_type="ns", value_type=value_type))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def dateFormatTsCheckCase(self, value_type="obj"): + """ + check date format ts unsupported + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="2021-07-21\ 19:01:46.920", t_type="ns", value_type=value_type))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def illegalTsCheckCase(self, value_type="obj"): + """ + check ts format like 16260068336390us19 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="16260068336390us19", t_type="us", value_type=value_type))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def tbnameCheckCase(self, value_type="obj"): + """ + check length 192 + check upper tbname + chech upper tag + length of stb_name tb_name <= 192 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tdSql.execute('reset query cache') + stb_name_192 = tdCom.getLongName(len=192, mode="letters") + tb_name_192 = tdCom.getLongName(len=192, mode="letters") + input_json, stb_name = self.genFullTypeJson(stb_name=stb_name_192, tb_name=tb_name_192, value_type=value_type) + self.resCmp(input_json, stb_name) + tdSql.query(f'select * from {stb_name}') + tdSql.checkRows(1) + for input_json in [self.genFullTypeJson(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"), value_type=value_type)[0]]: + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + stbname = tdCom.getLongName(len=10, mode="letters") + input_json = {'metric': f'A{stbname}', 'timestamp': {'value': 1626006833639000000, 'type': 'ns'}, 'value': {'value': False, 'type': 'bool'}, 'tags': {'t1': {'value': 127, 'type': 'tinyint'}, "t2": 127}} + stb_name = f'`A{stbname}`' + self.resCmp(input_json, stb_name) + tdSql.execute(f"drop table {stb_name}") + + def tagNameLengthCheckCase(self): + """ + check tag name limit <= 62 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tag_name = tdCom.getLongName(61, "letters") + tag_name = f't{tag_name}' + stb_name = tdCom.getLongName(7, "letters") + input_json = {'metric': stb_name, 'timestamp': {'value': 1626006833639000000, 'type': 'ns'}, 'value': "bcdaaa", 'tags': {tag_name: {'value': False, 'type': 'bool'}}} + self.resCmp(input_json, stb_name) + input_json = {'metric': stb_name, 'timestamp': {'value': 1626006833639000001, 'type': 'ns'}, 'value': "bcdaaaa", 'tags': {tdCom.getLongName(65, "letters"): {'value': False, 'type': 'bool'}}} + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def tagValueLengthCheckCase(self, value_type="obj"): + """ + check full type tag value limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # i8 + for t1 in [-127, 127]: + input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t1_value=t1, value_type=value_type)) + self.resCmp(input_json, stb_name) + for t1 in [-128, 128]: + input_json = self.genFullTypeJson(tag_value=self.genTagValue(t1_value=t1))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + #i16 + for t2 in [-32767, 32767]: + input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t2_value=t2, value_type=value_type)) + self.resCmp(input_json, stb_name) + for t2 in [-32768, 32768]: + input_json = self.genFullTypeJson(tag_value=self.genTagValue(t2_value=t2))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + #i32 + for t3 in [-2147483647, 2147483647]: + input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t3_value=t3, value_type=value_type)) + self.resCmp(input_json, stb_name) + for t3 in [-2147483648, 2147483648]: + input_json = self.genFullTypeJson(tag_value=self.genTagValue(t3_value=t3))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + #i64 + for t4 in [-9223372036854775807, 9223372036854775807]: + input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t4_value=t4, value_type=value_type)) + self.resCmp(input_json, stb_name) + + for t4 in [-9223372036854775808, 9223372036854775808]: + input_json = self.genFullTypeJson(tag_value=self.genTagValue(t4_value=t4))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f32 + for t5 in [-3.4028234663852885981170418348451692544*(10**38), 3.4028234663852885981170418348451692544*(10**38)]: + input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t5_value=t5, value_type=value_type)) + self.resCmp(input_json, stb_name) + # * limit set to 3.4028234664*(10**38) + for t5 in [-3.4028234664*(10**38), 3.4028234664*(10**38)]: + input_json = self.genFullTypeJson(tag_value=self.genTagValue(t5_value=t5))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f64 + for t6 in [-1.79769*(10**308), -1.79769*(10**308)]: + input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t6_value=t6, value_type=value_type)) + self.resCmp(input_json, stb_name) + for t6 in [float(-1.797693134862316*(10**308)), -1.797693134862316*(10**308)]: + input_json = self.genFullTypeJson(tag_value=self.genTagValue(t6_value=t6, value_type=value_type))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + if value_type == "obj": + # binary + stb_name = tdCom.getLongName(7, "letters") + input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': True, 'type': 'bool'}, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1":{'value': tdCom.getLongName(16374, "letters"), 'type': 'binary'}}} + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': True, 'type': 'bool'}, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1":{'value': tdCom.getLongName(16375, "letters"), 'type': 'binary'}}} + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # # nchar + # # * legal nchar could not be larger than 16374/4 + stb_name = tdCom.getLongName(7, "letters") + input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': True, 'type': 'bool'}, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1":{'value': tdCom.getLongName(4093, "letters"), 'type': 'nchar'}}} + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + + input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': True, 'type': 'bool'}, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1":{'value': tdCom.getLongName(4094, "letters"), 'type': 'nchar'}}} + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + elif value_type == "default": + stb_name = tdCom.getLongName(7, "letters") + if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary": + input_json = {"metric": stb_name, "timestamp": 1626006834, "value": True, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1": tdCom.getLongName(16374, "letters")}} + elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar": + input_json = {"metric": stb_name, "timestamp": 1626006834, "value": True, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1": tdCom.getLongName(4093, "letters")}} + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary": + input_json = {"metric": stb_name, "timestamp": 1626006834, "value": True, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1": tdCom.getLongName(16375, "letters")}} + elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar": + input_json = {"metric": stb_name, "timestamp": 1626006834, "value": True, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1": tdCom.getLongName(4094, "letters")}} + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def colValueLengthCheckCase(self, value_type="obj"): + """ + check full type col value limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # i8 + for value in [-128, 127]: + input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint", value_type=value_type)) + self.resCmp(input_json, stb_name) + tdCom.cleanTb() + for value in [-129, 128]: + input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint"))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + # i16 + tdCom.cleanTb() + for value in [-32768]: + input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint", value_type=value_type)) + self.resCmp(input_json, stb_name) + tdCom.cleanTb() + for value in [-32769, 32768]: + input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint"))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i32 + tdCom.cleanTb() + for value in [-2147483648]: + input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int", value_type=value_type)) + self.resCmp(input_json, stb_name) + tdCom.cleanTb() + for value in [-2147483649, 2147483648]: + input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int"))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i64 + tdCom.cleanTb() + for value in [-9223372036854775808]: + input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint", value_type=value_type)) + self.resCmp(input_json, stb_name) + # ! bug + # tdCom.cleanTb() + # for value in [-9223372036854775809, 9223372036854775808]: + # print(value) + # input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint"))[0] + # print(json.dumps(input_json)) + # try: + # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + # f32 + tdCom.cleanTb() + for value in [-3.4028234663852885981170418348451692544*(10**38), 3.4028234663852885981170418348451692544*(10**38)]: + input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float", value_type=value_type)) + self.resCmp(input_json, stb_name) + # * limit set to 4028234664*(10**38) + tdCom.cleanTb() + for value in [-3.4028234664*(10**38), 3.4028234664*(10**38)]: + input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float"))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f64 + tdCom.cleanTb() + for value in [-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308), -1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)]: + input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type)) + self.resCmp(input_json, stb_name) + # * limit set to 1.797693134862316*(10**308) + tdCom.cleanTb() + for value in [-1.797693134862316*(10**308), -1.797693134862316*(10**308)]: + input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # if value_type == "obj": + # # binary + # tdCom.cleanTb() + # stb_name = tdCom.getLongName(7, "letters") + # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16374, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}} + # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + + # tdCom.cleanTb() + # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16375, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}} + # try: + # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + # # nchar + # # * legal nchar could not be larger than 16374/4 + # tdCom.cleanTb() + # stb_name = tdCom.getLongName(7, "letters") + # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4093, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}} + # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + + # tdCom.cleanTb() + # input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4094, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}} + # try: + # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + # elif value_type == "default": + # # binary + # tdCom.cleanTb() + # stb_name = tdCom.getLongName(7, "letters") + # if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary": + # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16374, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}} + # elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar": + # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(4093, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}} + # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + # tdCom.cleanTb() + # if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary": + # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16375, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}} + # elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar": + # input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(4094, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}} + # try: + # self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + def tagColIllegalValueCheckCase(self, value_type="obj"): + + """ + test illegal tag col value + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # bool + for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]: + try: + input_json1 = self.genFullTypeJson(tag_value=self.genTagValue(t0_value=i))[0] + self._conn.schemaless_insert([json.dumps(input_json1)], 2, None) + input_json2 = self.genFullTypeJson(col_value=self.genTsColValue(value=i, t_type="bool"))[0] + self._conn.schemaless_insert([json.dumps(input_json2)], 2, None) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i8 i16 i32 i64 f32 f64 + for input_json in [ + self.genFullTypeJson(tag_value=self.genTagValue(t1_value="1s2"))[0], + self.genFullTypeJson(tag_value=self.genTagValue(t2_value="1s2"))[0], + self.genFullTypeJson(tag_value=self.genTagValue(t3_value="1s2"))[0], + self.genFullTypeJson(tag_value=self.genTagValue(t4_value="1s2"))[0], + self.genFullTypeJson(tag_value=self.genTagValue(t5_value="11.1s45"))[0], + self.genFullTypeJson(tag_value=self.genTagValue(t6_value="11.1s45"))[0], + ]: + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # check binary and nchar blank + input_sql1 = self.genFullTypeJson(col_value=self.genTsColValue(value="abc aaa", t_type="binary", value_type=value_type))[0] + input_sql2 = self.genFullTypeJson(col_value=self.genTsColValue(value="abc aaa", t_type="nchar", value_type=value_type))[0] + input_sql3 = self.genFullTypeJson(tag_value=self.genTagValue(t7_value="abc aaa", value_type=value_type))[0] + input_sql4 = self.genFullTypeJson(tag_value=self.genTagValue(t8_value="abc aaa", value_type=value_type))[0] + for input_json in [input_sql1, input_sql2, input_sql3, input_sql4]: + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # check accepted binary and nchar symbols + # # * ~!@#$¥%^&*()-+={}|[]、「」:; + for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'): + input_json1 = self.genFullTypeJson(col_value=self.genTsColValue(value=f"abc{symbol}aaa", t_type="binary", value_type=value_type))[0] + input_json2 = self.genFullTypeJson(tag_value=self.genTagValue(t8_value=f"abc{symbol}aaa", value_type=value_type))[0] + self._conn.schemaless_insert([json.dumps(input_json1)], TDSmlProtocolType.JSON.value, None) + self._conn.schemaless_insert([json.dumps(input_json2)], TDSmlProtocolType.JSON.value, None) + + def duplicateIdTagColInsertCheckCase(self, value_type="obj"): + """ + check duplicate Id Tag Col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json = self.genFullTypeJson(id_double_tag=True, value_type=value_type)[0] + print(input_json) + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + input_json = self.genFullTypeJson(tag_value=self.genTagValue(t5_value=11.12345027923584, t6_type="float", t6_value=22.12345027923584, value_type=value_type))[0] + try: + self._conn.schemaless_insert([json.dumps(input_json).replace("t6", "t5")], 2, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + ##### stb exist ##### + def noIdStbExistCheckCase(self, value_type="obj"): + """ + case no id when stb exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(tb_name="sub_table_0123456", col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type)) + self.resCmp(input_json, stb_name) + input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, id_noexist_tag=True, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type)) + self.resCmp(input_json, stb_name, condition='where tbname like "t_%"') + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(1) + + def duplicateInsertExistCheckCase(self, value_type="obj"): + """ + check duplicate insert when stb exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(value_type=value_type) + self.resCmp(input_json, stb_name) + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + self.resCmp(input_json, stb_name) + + def tagColBinaryNcharLengthCheckCase(self, value_type="obj"): + """ + check length increase + """ + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(value_type=value_type) + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + self.resCmp(input_json, stb_name) + tb_name = tdCom.getLongName(5, "letters") + input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, tag_value=self.genTagValue(t7_value="binaryTagValuebinaryTagValue", t8_value="ncharTagValuencharTagValue", value_type=value_type)) + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + self.resCmp(input_json, stb_name, condition=f'where tbname like "{tb_name}"') + + def lengthIcreaseCrashCheckCase(self): + """ + check length increase + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = "test_crash" + input_json = self.genFullTypeJson(stb_name=stb_name)[0] + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + os.system('python3 query/schemalessQueryCrash.py &') + time.sleep(2) + tb_name = tdCom.getLongName(5, "letters") + input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, tag_value=self.genTagValue(t7_value="binaryTagValuebinaryTagValue", t8_value="ncharTagValuencharTagValue")) + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + time.sleep(3) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + def tagColAddDupIDCheckCase(self, value_type="obj"): + """ + check tag count add, stb and tb duplicate + * tag: alter table ... + * col: when update==0 and ts is same, unchange + * so this case tag&&value will be added, + * col is added without value when update==0 + * col is added with value when update==1 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + for db_update_tag in [0, 1]: + if db_update_tag == 1 : + self.createDb("test_update", db_update_tag=db_update_tag) + input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type)) + self.resCmp(input_json, stb_name) + input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=False, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), t_add_tag=True) + if db_update_tag == 1 : + self.resCmp(input_json, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"') + tdSql.checkData(0, 11, None) + tdSql.checkData(0, 12, None) + else: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"') + tdSql.checkData(0, 1, True) + tdSql.checkData(0, 11, None) + tdSql.checkData(0, 12, None) + self.createDb() + + def tagAddCheckCase(self, value_type="obj"): + """ + check tag count add + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type)) + self.resCmp(input_json, stb_name) + tb_name_1 = tdCom.getLongName(7, "letters") + input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name_1, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), t_add_tag=True) + self.resCmp(input_json, stb_name, condition=f'where tbname like "{tb_name_1}"') + res_row_list = self.resHandle(f"select t10,t11 from {tb_name}", True)[0] + tdSql.checkEqual(res_row_list[0], ['None', 'None']) + self.resCmp(input_json, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + + def tagMd5Check(self, value_type="obj"): + """ + condition: stb not change + insert two table, keep tag unchange, change col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True) + self.resCmp(input_json, stb_name) + tb_name1 = self.getNoIdTbName(stb_name) + input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True) + self.resCmp(input_json, stb_name) + tb_name2 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(1) + tdSql.checkEqual(tb_name1, tb_name2) + input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True, t_add_tag=True) + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + tb_name3 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + tdSql.checkNotEqual(tb_name1, tb_name3) + + # * tag binary max is 16384, col+ts binary max 49151 + def tagColBinaryMaxLengthCheckCase(self, value_type="obj"): + """ + every binary and nchar must be length+2 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(7, "letters") + tb_name = f'{stb_name}_1' + tag_value = {"t0": {"value": True, "type": "bool"}} + tag_value["id"] = tb_name + col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type) + input_json = {"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": col_value, "tags": tag_value} + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + + # * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2 + if value_type == "obj": + tag_value["t1"] = {"value": tdCom.getLongName(16374, "letters"), "type": "binary"} + tag_value["t2"] = {"value": tdCom.getLongName(5, "letters"), "type": "binary"} + elif value_type == "default": + if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary": + tag_value["t1"] = tdCom.getLongName(16374, "letters") + tag_value["t2"] = tdCom.getLongName(5, "letters") + elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar": + tag_value["t1"] = tdCom.getLongName(4093, "letters") + tag_value["t2"] = tdCom.getLongName(1, "letters") + tag_value.pop('id') + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + if value_type == "obj": + tag_value["t2"] = {"value": tdCom.getLongName(6, "letters"), "type": "binary"} + elif value_type == "default": + if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary": + tag_value["t2"] = tdCom.getLongName(6, "letters") + elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar": + tag_value["t2"] = tdCom.getLongName(2, "letters") + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + # * tag nchar max is 16374/4, col+ts nchar max 49151 + def tagColNcharMaxLengthCheckCase(self, value_type="obj"): + """ + check nchar length limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(7, "letters") + tb_name = f'{stb_name}_1' + tag_value = {"t0": True} + tag_value["id"] = tb_name + col_value= True + input_json = {"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": col_value, "tags": tag_value} + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + + # * legal nchar could not be larger than 16374/4 + if value_type == "obj": + tag_value["t1"] = {"value": tdCom.getLongName(4093, "letters"), "type": "nchar"} + tag_value["t2"] = {"value": tdCom.getLongName(1, "letters"), "type": "nchar"} + elif value_type == "default": + if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary": + tag_value["t1"] = tdCom.getLongName(16374, "letters") + tag_value["t2"] = tdCom.getLongName(5, "letters") + elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar": + tag_value["t1"] = tdCom.getLongName(4093, "letters") + tag_value["t2"] = tdCom.getLongName(1, "letters") + tag_value.pop('id') + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + if value_type == "obj": + tag_value["t2"] = {"value": tdCom.getLongName(2, "letters"), "type": "binary"} + elif value_type == "default": + if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary": + tag_value["t2"] = tdCom.getLongName(6, "letters") + elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar": + tag_value["t2"] = tdCom.getLongName(2, "letters") + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + def batchInsertCheckCase(self, value_type="obj"): + """ + test batch insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = "stb_name" + tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') + input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": 1, "type": "bigint"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}}, + {"metric": "st123456", "timestamp": {"value": 1626006833640000000, "type": "ns"}, "value": {"value": 2, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}, + {"metric": "stb_name", "timestamp": {"value": 1626056811823316532, "type": "ns"}, "value": {"value": 3, "type": "bigint"}, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste", "type": "nchar"}}}, + {"metric": "stf567890", "timestamp": {"value": 1626006933640000000, "type": "ns"}, "value": {"value": 4, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}, + {"metric": "st123456", "timestamp": {"value": 1626006833642000000, "type": "ns"}, "value": {"value": 5, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t2": {"value": 5, "type": "double"}, "t3": {"value": "t4", "type": "binary"}}}, + {"metric": "stb_name", "timestamp": {"value": 1626056811843316532, "type": "ns"}, "value": {"value": 6, "type": "bigint"}, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste2", "type": "nchar"}}}, + {"metric": "stb_name", "timestamp": {"value": 1626056812843316532, "type": "ns"}, "value": {"value": 7, "type": "bigint"}, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste2", "type": "nchar"}}}, + {"metric": "st123456", "timestamp": {"value": 1626006933640000000, "type": "ns"}, "value": {"value": 8, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}, + {"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}] + if value_type != "obj": + input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": 1, "tags": {"t1": 3, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}}, + {"metric": "st123456", "timestamp": {"value": 1626006833640000000, "type": "ns"}, "value": 2, "tags": {"t1": {"value": 4, "type": "double"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}, + {"metric": "stb_name", "timestamp": {"value": 1626056811823316532, "type": "ns"}, "value": 3, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste", "type": "nchar"}}}, + {"metric": "stf567890", "timestamp": {"value": 1626006933640000000, "type": "ns"}, "value": 4, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}, + {"metric": "st123456", "timestamp": {"value": 1626006833642000000, "type": "ns"}, "value": {"value": 5, "type": "double"}, "tags": {"t1": {"value": 4, "type": "double"}, "t2": 5.0, "t3": {"value": "t4", "type": "binary"}}}, + {"metric": "stb_name", "timestamp": {"value": 1626056811843316532, "type": "ns"}, "value": {"value": 6, "type": "double"}, "tags": {"t2": 5.0, "t3": {"value": "ste2", "type": "nchar"}}}, + {"metric": "stb_name", "timestamp": {"value": 1626056812843316532, "type": "ns"}, "value": {"value": 7, "type": "double"}, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste2", "type": "nchar"}}}, + {"metric": "st123456", "timestamp": {"value": 1626006933640000000, "type": "ns"}, "value": {"value": 8, "type": "double"}, "tags": {"t1": {"value": 4, "type": "double"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}, + {"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "double"}, "tags": {"t1": 4, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}] + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + tdSql.query('show stables') + tdSql.checkRows(3) + tdSql.query('show tables') + tdSql.checkRows(6) + tdSql.query('select * from st123456') + tdSql.checkRows(5) + + def multiInsertCheckCase(self, count, value_type="obj"): + """ + test multi insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + sql_list = list() + stb_name = tdCom.getLongName(8, "letters") + tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') + for i in range(count): + input_json = self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True)[0] + sql_list.append(input_json) + self._conn.schemaless_insert([json.dumps(sql_list)], TDSmlProtocolType.JSON.value, None) + tdSql.query('show tables') + tdSql.checkRows(count) + + def batchErrorInsertCheckCase(self): + """ + test batch error insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": "tt", "type": "bool"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}}, + {"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def multiColsInsertCheckCase(self, value_type="obj"): + """ + test multi cols insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json = self.genFullTypeJson(c_multi_tag=True, value_type=value_type)[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def blankColInsertCheckCase(self, value_type="obj"): + """ + test blank col insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json = self.genFullTypeJson(c_blank_tag=True, value_type=value_type)[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def blankTagInsertCheckCase(self, value_type="obj"): + """ + test blank tag insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json = self.genFullTypeJson(t_blank_tag=True, value_type=value_type)[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def chineseCheckCase(self): + """ + check nchar ---> chinese + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(chinese_tag=True) + self.resCmp(input_json, stb_name) + + def multiFieldCheckCase(self, value_type="obj"): + ''' + multi_field + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json = self.genFullTypeJson(multi_field_tag=True, value_type=value_type)[0] + try: + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def spellCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + input_json_list = [{"metric": f'{stb_name}_1', "timestamp": {"value": 1626006833639000000, "type": "Ns"}, "value": {"value": 1, "type": "Bigint"}, "tags": {"t1": {"value": 127, "type": "tinYint"}}}, + {"metric": f'{stb_name}_2', "timestamp": {"value": 1626006833639000001, "type": "nS"}, "value": {"value": 32767, "type": "smallInt"}, "tags": {"t1": {"value": 32767, "type": "smallInt"}}}, + {"metric": f'{stb_name}_3', "timestamp": {"value": 1626006833639000002, "type": "NS"}, "value": {"value": 2147483647, "type": "iNt"}, "tags": {"t1": {"value": 2147483647, "type": "iNt"}}}, + {"metric": f'{stb_name}_4', "timestamp": {"value": 1626006833639019, "type": "Us"}, "value": {"value": 9223372036854775807, "type": "bigInt"}, "tags": {"t1": {"value": 9223372036854775807, "type": "bigInt"}}}, + {"metric": f'{stb_name}_5', "timestamp": {"value": 1626006833639018, "type": "uS"}, "value": {"value": 11.12345027923584, "type": "flOat"}, "tags": {"t1": {"value": 11.12345027923584, "type": "flOat"}}}, + {"metric": f'{stb_name}_6', "timestamp": {"value": 1626006833639017, "type": "US"}, "value": {"value": 22.123456789, "type": "douBle"}, "tags": {"t1": {"value": 22.123456789, "type": "douBle"}}}, + {"metric": f'{stb_name}_7', "timestamp": {"value": 1626006833640, "type": "Ms"}, "value": {"value": "vozamcts", "type": "binaRy"}, "tags": {"t1": {"value": "vozamcts", "type": "binaRy"}}}, + {"metric": f'{stb_name}_8', "timestamp": {"value": 1626006833641, "type": "mS"}, "value": {"value": "vozamcts", "type": "nchAr"}, "tags": {"t1": {"value": "vozamcts", "type": "nchAr"}}}, + {"metric": f'{stb_name}_9', "timestamp": {"value": 1626006833642, "type": "MS"}, "value": {"value": "vozamcts", "type": "nchAr"}, "tags": {"t1": {"value": "vozamcts", "type": "nchAr"}}}, + {"metric": f'{stb_name}_10', "timestamp": {"value": 1626006834, "type": "S"}, "value": {"value": "vozamcts", "type": "nchAr"}, "tags": {"t1": {"value": "vozamcts", "type": "nchAr"}}}] + + for input_sql in input_json_list: + stb_name = input_sql["metric"] + self.resCmp(input_sql, stb_name) + + def tbnameTagsColsNameCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json = {'metric': 'rFa$sta', 'timestamp': {'value': 1626006834, 'type': 's'}, 'value': {'value': True, 'type': 'bool'}, 'tags': {'Tt!0': {'value': False, 'type': 'bool'}, 'tT@1': {'value': 127, 'type': 'tinyint'}, 't@2': {'value': 32767, 'type': 'smallint'}, 't$3': {'value': 2147483647, 'type': 'int'}, 't%4': {'value': 9223372036854775807, 'type': 'bigint'}, 't^5': {'value': 11.12345027923584, 'type': 'float'}, 't&6': {'value': 22.123456789, 'type': 'double'}, 't*7': {'value': 'binaryTagValue', 'type': 'binary'}, 't!@#$%^&*()_+[];:<>?,9': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': 'rFas$ta_1'}} + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + query_sql = 'select * from `rFa$sta`' + query_res = tdSql.query(query_sql, True) + tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), True, 'rFas$ta_1', 'ncharTagValue', 2147483647, 9223372036854775807, 22.123456789, 'binaryTagValue', 32767, 11.12345027923584, False, 127)]) + col_tag_res = tdSql.getColNameList(query_sql) + tdSql.checkEqual(col_tag_res, ['_ts', '_value', 'id', 't!@#$%^&*()_+[];:<>?,9', 't$3', 't%4', 't&6', 't*7', 't@2', 't^5', 'Tt!0', 'tT@1']) + tdSql.execute('drop table `rFa$sta`') + + def pointTransCheckCase(self, value_type="obj"): + """ + metric value "." trans to "_" + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json = self.genFullTypeJson(point_trans_tag=True, value_type=value_type)[0] + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + tdSql.execute("drop table `.point.trans.test`") + + def genSqlList(self, count=5, stb_name="", tb_name="", value_type="obj"): + """ + stb --> supertable + tb --> table + ts --> timestamp, same default + col --> column, same default + tag --> tag, same default + d --> different + s --> same + a --> add + m --> minus + """ + d_stb_d_tb_list = list() + s_stb_s_tb_list = list() + s_stb_s_tb_a_tag_list = list() + s_stb_s_tb_m_tag_list = list() + s_stb_d_tb_list = list() + s_stb_d_tb_m_tag_list = list() + s_stb_d_tb_a_tag_list = list() + s_stb_s_tb_d_ts_list = list() + s_stb_s_tb_d_ts_m_tag_list = list() + s_stb_s_tb_d_ts_a_tag_list = list() + s_stb_d_tb_d_ts_list = list() + s_stb_d_tb_d_ts_m_tag_list = list() + s_stb_d_tb_d_ts_a_tag_list = list() + for i in range(count): + d_stb_d_tb_list.append(self.genFullTypeJson(col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))) + s_stb_s_tb_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type))) + s_stb_s_tb_a_tag_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), t_add_tag=True)) + s_stb_s_tb_m_tag_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), t_mul_tag=True)) + s_stb_d_tb_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True)) + s_stb_d_tb_m_tag_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, t_mul_tag=True)) + s_stb_d_tb_a_tag_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, t_add_tag=True)) + s_stb_s_tb_d_ts_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), ts_value = self.genTsColValue(1626006833639000000, "ns"))) + s_stb_s_tb_d_ts_m_tag_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), ts_value = self.genTsColValue(1626006833639000000, "ns"), t_mul_tag=True)) + s_stb_s_tb_d_ts_a_tag_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), ts_value = self.genTsColValue(1626006833639000000, "ns"), t_add_tag=True)) + s_stb_d_tb_d_ts_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, ts_value = self.genTsColValue(1626006833639000000, "ns"))) + s_stb_d_tb_d_ts_m_tag_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, ts_value = self.genTsColValue(0, "ns"), t_mul_tag=True)) + s_stb_d_tb_d_ts_a_tag_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, ts_value = self.genTsColValue(0, "ns"), t_add_tag=True)) + + return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_tag_list, s_stb_s_tb_m_tag_list, \ + s_stb_d_tb_list, s_stb_d_tb_m_tag_list, s_stb_d_tb_a_tag_list, s_stb_s_tb_d_ts_list, \ + s_stb_s_tb_d_ts_m_tag_list, s_stb_s_tb_d_ts_a_tag_list, s_stb_d_tb_d_ts_list, \ + s_stb_d_tb_d_ts_m_tag_list, s_stb_d_tb_d_ts_a_tag_list + + def genMultiThreadSeq(self, sql_list): + tlist = list() + for insert_sql in sql_list: + t = threading.Thread(target=self._conn.schemaless_insert,args=([json.dumps(insert_sql[0])], TDSmlProtocolType.JSON.value, None)) + tlist.append(t) + return tlist + + def multiThreadRun(self, tlist): + for t in tlist: + t.start() + for t in tlist: + t.join() + + def stbInsertMultiThreadCheckCase(self, value_type="obj"): + """ + thread input different stb + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json = self.genSqlList(value_type=value_type)[0] + self.multiThreadRun(self.genMultiThreadSeq(input_json)) + tdSql.query(f"show tables;") + tdSql.checkRows(5) + + def sStbStbDdataInsertMultiThreadCheckCase(self, value_type="obj"): + """ + thread input same stb tb, different data, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) + self.resCmp(input_json, stb_name) + s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name, value_type=value_type)[1] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) + + def sStbStbDdataAtInsertMultiThreadCheckCase(self, value_type="obj"): + """ + thread input same stb tb, different data, add columes and tags, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) + self.resCmp(input_json, stb_name) + s_stb_s_tb_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name, value_type=value_type)[2] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) + + def sStbStbDdataMtInsertMultiThreadCheckCase(self, value_type="obj"): + """ + thread input same stb tb, different data, minus columes and tags, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) + self.resCmp(input_json, stb_name) + s_stb_s_tb_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name, value_type=value_type)[3] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) + + def sStbDtbDdataInsertMultiThreadCheckCase(self, value_type="obj"): + """ + thread input same stb, different tb, different data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) + self.resCmp(input_json, stb_name) + s_stb_d_tb_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[4] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataMtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) + self.resCmp(input_json, stb_name) + s_stb_d_tb_m_tag_list = [({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "omfdhyom", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'), + ({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "vqowydbc", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'), + ({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "plgkckpv", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'), + ({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "cujyqvlj", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'), + ({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "twjxisat", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz')] + + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(2) + + def sStbDtbDdataAtInsertMultiThreadCheckCase(self, value_type="obj"): + """ + thread input same stb, different tb, different data, add tag, mul col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) + self.resCmp(input_json, stb_name) + s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[6] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbStbDdataDtsInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) + self.resCmp(input_json, stb_name) + s_stb_s_tb_d_ts_list = [({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "hkgjiwdj", "tags": {"id": tb_name, "t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "vozamcts", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz'), + ({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "rljjrrul", "tags": {"id": tb_name, "t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "bmcanhbs", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz'), + ({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "basanglx", "tags": {"id": tb_name, "t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "enqkyvmb", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz'), + ({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "clsajzpp", "tags": {"id": tb_name, "t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "eivaegjk", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz'), + ({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "jitwseso", "tags": {"id": tb_name, "t0": {"value": True, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "yhlwkddq", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + + def sStbStbDdataDtsMtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) + self.resCmp(input_json, stb_name) + s_stb_s_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'tuzsfrom', 'type': 'binary'}, 'id': tb_name}}, 'punftb'), + ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'llqzvgvw', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'nttjdzgi', 'type': 'binary'}, 'id': tb_name}}, 'punftb'), + ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'tclbosqc', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'uatpzgpi', 'type': 'binary'}, 'id': tb_name}}, 'punftb'), + ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rlpuzodt', 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'cwnpdnng', 'type': 'binary'}, 'id': tb_name}}, 'punftb'), + ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rhnikvfq', 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'afcibyeb', 'type': 'binary'}, 'id': tb_name}}, 'punftb')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name} where t8 is not NULL") + tdSql.checkRows(6) + + def sStbStbDdataDtsAtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add tag, mul col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) + self.resCmp(input_json, stb_name) + s_stb_s_tb_d_ts_a_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'tuzsfrom', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'), + ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'llqzvgvw', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'nttjdzgi', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'), + ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'tclbosqc', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'uatpzgpi', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'), + ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rlpuzodt', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'cwnpdnng', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'), + ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'rhnikvfq', 'type': 'binary'}, 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'afcibyeb', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + for t in ["t10", "t11"]: + tdSql.query(f"select * from {stb_name} where {t} is not NULL;") + tdSql.checkRows(0) + + def sStbDtbDdataDtsInsertMultiThreadCheckCase(self, value_type="obj"): + """ + thread input same stb, different tb, data, ts + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type)) + self.resCmp(input_json, stb_name) + s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[10] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataDtsMtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, data, ts, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary")) + self.resCmp(input_json, stb_name) + s_stb_d_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'), + ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'llqzvgvw', 'type': 'binary'}, 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'), + ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'tclbosqc', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'), + ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'rlpuzodt', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'), + ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'rhnikvfq', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(3) + + def test(self): + try: + input_json = f'test_nchar 0 L"涛思数据" t0=f,t1=L"涛思数据",t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64' + self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) + # input_json, stb_name = self.genFullTypeJson() + # self.resCmp(input_json, stb_name) + except SchemalessError as err: + print(err.errno) + + def runAll(self): + for value_type in ["obj", "default"]: + self.initCheckCase(value_type) + self.symbolsCheckCase(value_type) + # self.tsCheckCase(value_type) + self.idSeqCheckCase(value_type) + self.idLetterCheckCase(value_type) + self.noIdCheckCase(value_type) + self.maxColTagCheckCase(value_type) + self.idIllegalNameCheckCase(value_type) + self.idStartWithNumCheckCase(value_type) + self.nowTsCheckCase(value_type) + self.dateFormatTsCheckCase(value_type) + self.illegalTsCheckCase(value_type) + self.tbnameCheckCase(value_type) + # self.tagValueLengthCheckCase(value_type) + self.colValueLengthCheckCase(value_type) + self.tagColIllegalValueCheckCase(value_type) + # self.duplicateIdTagColInsertCheckCase(value_type) + self.noIdStbExistCheckCase(value_type) + self.duplicateInsertExistCheckCase(value_type) + # self.tagColBinaryNcharLengthCheckCase(value_type) + # self.tagColAddDupIDCheckCase(value_type) + # self.tagAddCheckCase(value_type) + # self.tagMd5Check(value_type) + # self.tagColBinaryMaxLengthCheckCase(value_type) + # self.tagColNcharMaxLengthCheckCase(value_type) + # self.batchInsertCheckCase(value_type) + # self.multiInsertCheckCase(10, value_type) + self.multiColsInsertCheckCase(value_type) + self.blankColInsertCheckCase(value_type) + self.blankTagInsertCheckCase(value_type) + self.multiFieldCheckCase(value_type) + # self.stbInsertMultiThreadCheckCase(value_type) + self.pointTransCheckCase(value_type) + self.tagNameLengthCheckCase() + self.boolTypeCheckCase() + self.batchErrorInsertCheckCase() + self.chineseCheckCase() + # self.spellCheckCase() + self.tbnameTagsColsNameCheckCase() + # # MultiThreads + # self.sStbStbDdataInsertMultiThreadCheckCase() + # self.sStbStbDdataAtInsertMultiThreadCheckCase() + # self.sStbStbDdataMtInsertMultiThreadCheckCase() + # self.sStbDtbDdataInsertMultiThreadCheckCase() + # self.sStbDtbDdataAtInsertMultiThreadCheckCase() + # self.sStbDtbDdataDtsInsertMultiThreadCheckCase() + # self.sStbDtbDdataMtInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsMtInsertMultiThreadCheckCase() + # self.sStbDtbDdataDtsMtInsertMultiThreadCheckCase() + # self.lengthIcreaseCrashCheckCase() + + def run(self): + print("running {}".format(__file__)) + self.createDb() + try: + self.runAll() + except Exception as err: + print(''.join(traceback.format_exception(None, err, err.__traceback__))) + raise err + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py new file mode 100644 index 0000000000000000000000000000000000000000..23404330ed450bca999dc593b0675d4eb7d54eb0 --- /dev/null +++ b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py @@ -0,0 +1,1489 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import traceback +import random +from taos.error import SchemalessError +import time +import numpy as np +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +from util.types import TDSmlProtocolType, TDSmlTimestampType +import threading + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + self.smlChildTableName_value = "id" + + def createDb(self, name="test", db_update_tag=0, protocol=None): + if protocol == "telnet-tcp": + name = "opentsdb_telnet" + + if db_update_tag == 0: + tdSql.execute(f"drop database if exists {name}") + tdSql.execute(f"create database if not exists {name} precision 'us' schemaless 1") + else: + tdSql.execute(f"drop database if exists {name}") + tdSql.execute(f"create database if not exists {name} precision 'ns' update 1 schemaless 1") + tdSql.execute(f'use {name}') + + def timeTrans(self, time_value, ts_type): + if int(time_value) == 0: + ts = time.time() + else: + if ts_type == TDSmlTimestampType.MILLI_SECOND.value or ts_type == None: + ts = int(''.join(list(filter(str.isdigit, time_value))))/1000 + elif ts_type == TDSmlTimestampType.SECOND.value: + ts = int(''.join(list(filter(str.isdigit, time_value))))/1 + ulsec = repr(ts).split('.')[1][:6] + if len(ulsec) < 6 and int(ulsec) != 0: + ulsec = int(ulsec) * (10 ** (6 - len(ulsec))) + elif int(ulsec) == 0: + ulsec *= 6 + # * follow two rows added for tsCheckCase + td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + return td_ts + #td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts)) + return td_ts + #return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f")) + + def dateToTs(self, datetime_input): + return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) + + def getTdTypeValue(self, value, vtype="col"): + if vtype == "col": + if value.lower().endswith("i8"): + td_type = "TINYINT" + td_tag_value = ''.join(list(value)[:-2]) + elif value.lower().endswith("i16"): + td_type = "SMALLINT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("i32"): + td_type = "INT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("i64"): + td_type = "BIGINT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("u64"): + td_type = "BIGINT UNSIGNED" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("f32"): + td_type = "FLOAT" + td_tag_value = ''.join(list(value)[:-3]) + td_tag_value = '{}'.format(np.float32(td_tag_value)) + elif value.lower().endswith("f64"): + td_type = "DOUBLE" + td_tag_value = ''.join(list(value)[:-3]) + if "e" in value.lower(): + td_tag_value = str(float(td_tag_value)) + elif value.lower().startswith('l"'): + td_type = "NCHAR" + td_tag_value = ''.join(list(value)[2:-1]) + elif value.startswith('"') and value.endswith('"'): + td_type = "BINARY" + td_tag_value = ''.join(list(value)[1:-1]) + elif value.lower() == "t" or value.lower() == "true": + td_type = "BOOL" + td_tag_value = "True" + elif value.lower() == "f" or value.lower() == "false": + td_type = "BOOL" + td_tag_value = "False" + elif value.isdigit(): + td_type = "DOUBLE" + td_tag_value = str(float(value)) + else: + td_type = "DOUBLE" + if "e" in value.lower(): + td_tag_value = str(float(value)) + else: + td_tag_value = value + elif vtype == "tag": + td_type = "NCHAR" + td_tag_value = str(value) + return td_type, td_tag_value + + def typeTrans(self, type_list): + type_num_list = [] + for tp in type_list: + if tp.upper() == "TIMESTAMP": + type_num_list.append(9) + elif tp.upper() == "BOOL": + type_num_list.append(1) + elif tp.upper() == "TINYINT": + type_num_list.append(2) + elif tp.upper() == "SMALLINT": + type_num_list.append(3) + elif tp.upper() == "INT": + type_num_list.append(4) + elif tp.upper() == "BIGINT": + type_num_list.append(5) + elif tp.upper() == "FLOAT": + type_num_list.append(6) + elif tp.upper() == "DOUBLE": + type_num_list.append(7) + elif tp.upper() == "BINARY": + type_num_list.append(8) + elif tp.upper() == "NCHAR": + type_num_list.append(10) + elif tp.upper() == "BIGINT UNSIGNED": + type_num_list.append(14) + return type_num_list + + def inputHandle(self, input_sql, ts_type, protocol=None): + input_sql_split_list = input_sql.split(" ") + if protocol == "telnet-tcp": + input_sql_split_list.pop(0) + stb_name = input_sql_split_list[0] + stb_tag_list = input_sql_split_list[3:] + stb_tag_list[-1] = stb_tag_list[-1].strip() + stb_col_value = input_sql_split_list[2] + ts_value = self.timeTrans(input_sql_split_list[1], ts_type) + + tag_name_list = [] + tag_value_list = [] + td_tag_value_list = [] + td_tag_type_list = [] + + col_name_list = [] + col_value_list = [] + td_col_value_list = [] + td_col_type_list = [] + + for elm in stb_tag_list: + if self.smlChildTableName_value == "ID": + if "id=" in elm.lower(): + tb_name = elm.split('=')[1] + else: + tag_name_list.append(elm.split("=")[0].lower()) + tag_value_list.append(elm.split("=")[1]) + tb_name = "" + td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + else: + if "id" == elm.split("=")[0].lower(): + tag_name_list.insert(0, elm.split("=")[0]) + tag_value_list.insert(0, elm.split("=")[1]) + td_tag_value_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + else: + tag_name_list.append(elm.split("=")[0]) + tag_value_list.append(elm.split("=")[1]) + tb_name = "" + td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + + col_name_list.append('_value') + col_value_list.append(stb_col_value) + + td_col_value_list.append(self.getTdTypeValue(stb_col_value)[1]) + td_col_type_list.append(self.getTdTypeValue(stb_col_value)[0]) + + final_field_list = [] + final_field_list.extend(col_name_list) + final_field_list.extend(tag_name_list) + + final_type_list = [] + final_type_list.append("TIMESTAMP") + final_type_list.extend(td_col_type_list) + final_type_list.extend(td_tag_type_list) + final_type_list = self.typeTrans(final_type_list) + + final_value_list = [] + final_value_list.append(ts_value) + final_value_list.extend(td_col_value_list) + final_value_list.extend(td_tag_value_list) + return final_value_list, final_field_list, final_type_list, stb_name, tb_name + + def genFullTypeSql(self, stb_name="", tb_name="", value="", t0="", t1="127i8", t2="32767i16", t3="2147483647i32", + t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"", + t8="L\"ncharTagValue\"", ts="1626006833641", + id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None, + t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None, + chinese_tag=None, multi_field_tag=None, point_trans_tag=None, protocol=None, tcp_keyword_tag=None): + if stb_name == "": + stb_name = tdCom.getLongName(len=6, mode="letters") + if tb_name == "": + tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}' + if t0 == "": + t0 = "t" + if value == "": + value = random.choice(["f", "F", "false", "False", "t", "T", "true", "True", "TRUE", "FALSE"]) + if id_upper_tag is not None: + id = "ID" + else: + id = "id" + if id_mixul_tag is not None: + id = random.choice(["iD", "Id"]) + else: + id = "id" + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if id_noexist_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if t_add_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t9={t8}' + if id_change_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} {id}={tb_name} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if id_double_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}_1\" t0={t0} t1={t1} {id}=\"{tb_name}_2\" t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if t_add_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t11={t1} t10={t8}' + if t_mul_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}' + if id_noexist_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}' + if c_multi_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}' + if c_blank_tag is not None: + sql_seq = f'{stb_name} {ts} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if t_blank_tag is not None: + sql_seq = f'{stb_name} {ts} {value}' + if chinese_tag is not None: + sql_seq = f'{stb_name} {ts} L"涛思数据" t0={t0} t1=L"涛思数据"' + if multi_field_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} {value}' + if point_trans_tag is not None: + sql_seq = f'.point.trans.test {ts} {value} t0={t0}' + if tcp_keyword_tag is not None: + sql_seq = f'put {ts} {value} t0={t0}' + if protocol == "telnet-tcp": + sql_seq = 'put ' + sql_seq + '\n' + return sql_seq, stb_name + + def genMulTagColStr(self, genType, count=1): + """ + genType must be tag/col + """ + tag_str = "" + col_str = "" + if genType == "tag": + for i in range(0, count): + if i < (count-1): + tag_str += f't{i}=f ' + else: + tag_str += f't{i}=f' + return tag_str + if genType == "col": + col_str = "t" + return col_str + + def genLongSql(self, tag_count): + stb_name = tdCom.getLongName(7, mode="letters") + tag_str = self.genMulTagColStr("tag", tag_count) + col_str = self.genMulTagColStr("col") + ts = "1626006833641" + long_sql = stb_name + ' ' + ts + ' ' + col_str + ' ' + ' ' + tag_str + return long_sql, stb_name + + def getNoIdTbName(self, stb_name, protocol=None): + query_sql = f"select tbname from {stb_name}" + tb_name = self.resHandle(query_sql, True, protocol)[0][0] + return tb_name + + def resHandle(self, query_sql, query_tag, protocol=None): + tdSql.execute('reset query cache') + if protocol == "telnet-tcp": + time.sleep(0.5) + row_info = tdSql.query(query_sql, query_tag) + col_info = tdSql.getColNameList(query_sql, query_tag) + res_row_list = [] + sub_list = [] + for row_mem in row_info: + for i in row_mem: + sub_list.append(str(i)) + res_row_list.append(sub_list) + res_field_list_without_ts = col_info[0][1:] + res_type_list = col_info[1] + return res_row_list, res_field_list_without_ts, res_type_list + + def resCmp(self, input_sql, stb_name, query_sql="select * from", condition="", ts=None, ts_type=None, id=True, none_check_tag=None, precision=None, protocol=None): + expect_list = self.inputHandle(input_sql, ts_type, protocol) + if protocol == "telnet-tcp": + tdCom.tcpClient(input_sql) + else: + if precision == None: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, ts_type) + else: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, precision) + query_sql = f"{query_sql} {stb_name} {condition}" + res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True, protocol) + if ts == 0: + res_ts = self.dateToTs(res_row_list[0][0]) + current_time = time.time() + if current_time - res_ts < 60: + tdSql.checkEqual(res_row_list[0][1:], expect_list[0][1:]) + else: + print("timeout") + tdSql.checkEqual(res_row_list[0], expect_list[0]) + else: + if none_check_tag is not None: + none_index_list = [i for i,x in enumerate(res_row_list[0]) if x=="None"] + none_index_list.reverse() + for j in none_index_list: + res_row_list[0].pop(j) + expect_list[0].pop(j) + tdSql.checkEqual(res_row_list[0], expect_list[0]) + tdSql.checkEqual(res_field_list_without_ts, expect_list[1]) + for i in range(len(res_type_list)): + tdSql.checkEqual(res_type_list[i], expect_list[2][i]) + + def initCheckCase(self, protocol=None): + """ + normal tags and cols, one for every elm + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def boolTypeCheckCase(self, protocol=None): + """ + check all normal type + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"] + for t_type in full_type_list: + input_sql, stb_name = self.genFullTypeSql(t0=t_type, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def symbolsCheckCase(self, protocol=None): + """ + check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/? + """ + ''' + please test : + binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"' + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"' + nchar_symbols = f'L{binary_symbols}' + input_sql1, stb_name1 = self.genFullTypeSql(value=binary_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol) + input_sql2, stb_name2 = self.genFullTypeSql(value=nchar_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol) + self.resCmp(input_sql1, stb_name1, protocol=protocol) + self.resCmp(input_sql2, stb_name2, protocol=protocol) + + def tsCheckCase(self): + """ + test ts list --> ["1626006833640ms", "1626006834s", "1626006822639022"] + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(ts=1626006833640) + self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value) + input_sql, stb_name = self.genFullTypeSql(ts=1626006833640) + self.resCmp(input_sql, stb_name, ts_type=None) + input_sql, stb_name = self.genFullTypeSql(ts=1626006834) + self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.SECOND.value) + + tdSql.execute(f"drop database if exists test_ts") + tdSql.execute(f"create database if not exists test_ts precision 'ms' schemaless 1") + tdSql.execute("use test_ts") + input_sql = ['test_ms 1626006833640 t t0=t', 'test_ms 1626006833641 f t0=t'] + self._conn.schemaless_insert(input_sql, TDSmlProtocolType.TELNET.value, None) + res = tdSql.query('select * from test_ms', True) + tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.640000") + tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.641000") + + def openTstbTelnetTsCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 0 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name, ts=0) + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006833640 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value) + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006834 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.SECOND.value) + for ts in [1, 12, 123, 1234, 12345, 123456, 1234567, 12345678, 162600683, 16260068341, 162600683412, 16260068336401]: + try: + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} {ts} 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + self._conn.schemaless_insert(input_sql, TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def idSeqCheckCase(self, protocol=None): + """ + check id.index in tags + eg: t0=**,id=**,t1=** + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def idLetterCheckCase(self, protocol=None): + """ + check id param + eg: id and ID + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + input_sql, stb_name = self.genFullTypeSql(id_mixul_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def noIdCheckCase(self, protocol=None): + """ + id not exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + query_sql = f"select tbname from {stb_name}" + res_row_list = self.resHandle(query_sql, True)[0] + if len(res_row_list[0][0]) > 0: + tdSql.checkColNameList(res_row_list, res_row_list) + else: + tdSql.checkColNameList(res_row_list, "please check noIdCheckCase") + + def maxColTagCheckCase(self): + """ + max tag count is 128 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + for input_sql in [self.genLongSql(128)[0]]: + tdCom.cleanTb() + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + for input_sql in [self.genLongSql(129)[0]]: + tdCom.cleanTb() + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def stbTbNameCheckCase(self, protocol=None): + """ + test illegal id name + mix "`~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?" + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + rstr = list("~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?") + for i in rstr: + input_sql, stb_name = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"", protocol=protocol) + self.resCmp(input_sql, f'`{stb_name}`', protocol=protocol) + tdSql.execute(f'drop table if exists `{stb_name}`') + + def idStartWithNumCheckCase(self, protocol=None): + """ + id is start with num + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(tb_name="1aaabbb", protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def nowTsCheckCase(self): + """ + check now unsupported + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="now")[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def dateFormatTsCheckCase(self): + """ + check date format ts unsupported + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def illegalTsCheckCase(self): + """ + check ts format like 16260068336390us19 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="16260068336390us19")[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def tbnameCheckCase(self): + """ + check length 192 + check upper tbname + chech upper tag + length of stb_name tb_name <= 192 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + stb_name_192 = tdCom.getLongName(len=192, mode="letters") + tb_name_192 = tdCom.getLongName(len=192, mode="letters") + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name_192, tb_name=tb_name_192) + self.resCmp(input_sql, stb_name) + tdSql.query(f'select * from {stb_name}') + tdSql.checkRows(1) + if self.smlChildTableName_value == "ID": + for input_sql in [self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0], self.genFullTypeSql(tb_name=tdCom.getLongName(len=193, mode="letters"))[0]]: + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + input_sql = 'Abcdffgg 1626006833640 False T1=127i8 id=Abcddd' + else: + input_sql = self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + input_sql = 'Abcdffgg 1626006833640 False T1=127i8' + stb_name = f'`{input_sql.split(" ")[0]}`' + self.resCmp(input_sql, stb_name) + tdSql.execute('drop table `Abcdffgg`') + + def tagNameLengthCheckCase(self): + """ + check tag name limit <= 62 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tag_name = tdCom.getLongName(61, "letters") + tag_name = f'T{tag_name}' + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833640 L"bcdaaa" {tag_name}=f' + self.resCmp(input_sql, stb_name) + input_sql = f'{stb_name} 1626006833640 L"gggcdaaa" {tdCom.getLongName(65, "letters")}=f' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def tagValueLengthCheckCase(self): + """ + check full type tag value limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # nchar + # * legal nchar could not be larger than 16374/4 + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4093, "letters")}' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + + input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4094, "letters")}' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def colValueLengthCheckCase(self): + """ + check full type col value limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # i8 + for value in ["-128i8", "127i8"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-129i8", "128i8"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + # i16 + tdCom.cleanTb() + for value in ["-32768i16"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-32769i16", "32768i16"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i32 + tdCom.cleanTb() + for value in ["-2147483648i32"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-2147483649i32", "2147483648i32"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i64 + tdCom.cleanTb() + for value in ["-9223372036854775808i64"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-9223372036854775809i64", "9223372036854775808i64"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f32 + tdCom.cleanTb() + for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + # * limit set to 4028234664*(10**38) + tdCom.cleanTb() + for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f64 + tdCom.cleanTb() + for value in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + # # * limit set to 1.797693134862316*(10**308) + # tdCom.cleanTb() + # for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: + # input_sql = self.genFullTypeSql(value=value)[0] + # try: + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + # # # binary + # tdCom.cleanTb() + # stb_name = tdCom.getLongName(7, "letters") + # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t' + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + + # tdCom.cleanTb() + # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t' + # try: + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + # # nchar + # # * legal nchar could not be larger than 16374/4 + # tdCom.cleanTb() + # stb_name = tdCom.getLongName(7, "letters") + # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4093, "letters")}" t0=t' + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + + # tdCom.cleanTb() + # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4094, "letters")}" t0=t' + # try: + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + def tagColIllegalValueCheckCase(self): + + """ + test illegal tag col value + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # bool + for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]: + input_sql1, stb_name = self.genFullTypeSql(t0=i) + self.resCmp(input_sql1, stb_name) + input_sql2, stb_name = self.genFullTypeSql(value=i) + self.resCmp(input_sql2, stb_name) + + # i8 i16 i32 i64 f32 f64 + for input_sql in [ + self.genFullTypeSql(value="1s2i8")[0], + self.genFullTypeSql(value="1s2i16")[0], + self.genFullTypeSql(value="1s2i32")[0], + self.genFullTypeSql(value="1s2i64")[0], + self.genFullTypeSql(value="11.1s45f32")[0], + self.genFullTypeSql(value="11.1s45f64")[0], + ]: + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # check accepted binary and nchar symbols + # # * ~!@#$¥%^&*()-+={}|[]、「」:; + for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'): + input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc{symbol}aaa" t0=t' + input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=t t1="abc{symbol}aaa"' + self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.TELNET.value, None) + # self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.TELNET.value, None) + + def blankCheckCase(self): + ''' + check blank case + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc aaa" t0=t', + # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"', + # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"', + # f'{tdCom.getLongName(7, "letters")} 1626006833640 L"aba aa" t0=L"abcaaa3" '] + input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"', + f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"'] + for input_sql in input_sql_list: + stb_name = input_sql.split(" ")[0] + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + tdSql.query(f'select * from {stb_name}') + tdSql.checkRows(1) + + def duplicateIdTagColInsertCheckCase(self): + """ + check duplicate Id Tag Col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql_id = self.genFullTypeSql(id_double_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + input_sql = self.genFullTypeSql()[0] + input_sql_tag = input_sql.replace("t5", "t6") + try: + self._conn.schemaless_insert([input_sql_tag], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + ##### stb exist ##### + @tdCom.smlPass + def noIdStbExistCheckCase(self): + """ + case no id when stb exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f") + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", value="f") + self.resCmp(input_sql, stb_name, condition='where tbname like "t_%"') + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + def duplicateInsertExistCheckCase(self): + """ + check duplicate insert when stb exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + self.resCmp(input_sql, stb_name) + + @tdCom.smlPass + def tagColBinaryNcharLengthCheckCase(self): + """ + check length increase + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + tb_name = tdCom.getLongName(5, "letters") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name,t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"") + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"') + + @tdCom.smlPass + def tagColAddDupIDCheckCase(self): + """ + check tag count add, stb and tb duplicate + * tag: alter table ... + * col: when update==0 and ts is same, unchange + * so this case tag&&value will be added, + * col is added without value when update==0 + * col is added with value when update==1 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + for db_update_tag in [0, 1]: + if db_update_tag == 1 : + self.createDb("test_update", db_update_tag=db_update_tag) + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="t", value="t") + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="t", value="f", t_add_tag=True) + if db_update_tag == 1 : + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"') + tdSql.checkData(0, 11, None) + tdSql.checkData(0, 12, None) + else: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"') + tdSql.checkData(0, 1, True) + tdSql.checkData(0, 11, None) + tdSql.checkData(0, 12, None) + self.createDb() + + @tdCom.smlPass + def tagColAddCheckCase(self): + """ + check tag count add + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f") + self.resCmp(input_sql, stb_name) + tb_name_1 = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name_1, t0="f", value="f", t_add_tag=True) + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name_1}"') + res_row_list = self.resHandle(f"select t10,t11 from {tb_name}", True)[0] + tdSql.checkEqual(res_row_list[0], ['None', 'None']) + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + + def tagMd5Check(self): + """ + condition: stb not change + insert two table, keep tag unchange, change col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(t0="f", value="f", id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + tb_name1 = self.getNoIdTbName(stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + tb_name2 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(1) + tdSql.checkEqual(tb_name1, tb_name2) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True, t_add_tag=True) + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + tb_name3 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + tdSql.checkNotEqual(tb_name1, tb_name3) + + # * tag nchar max is 16374/4, col+ts nchar max 49151 + def tagColNcharMaxLengthCheckCase(self): + """ + check nchar length limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833640 f t2={tdCom.getLongName(1, "letters")}' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + + # * legal nchar could not be larger than 16374/4 + input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(1, "letters")}' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(2, "letters")}' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + def batchInsertCheckCase(self): + """ + test batch insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') + + lines = ["st123456 1626006833640 1i64 t1=3i64 t2=4f64 t3=\"t3\"", + "st123456 1626006833641 2i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64", + f'{stb_name} 1626006833642 3i64 t2=5f64 t3=L\"ste\"', + "stf567890 1626006833643 4i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64", + "st123456 1626006833644 5i64 t1=4i64 t2=5f64 t3=\"t4\"", + f'{stb_name} 1626006833645 6i64 t2=5f64 t3=L\"ste2\"', + f'{stb_name} 1626006833646 7i64 t2=5f64 t3=L\"ste2\"', + "st123456 1626006833647 8i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64", + "st123456 1626006833648 9i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64" + ] + self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.MILLI_SECOND.value) + tdSql.query('show stables') + tdSql.checkRows(3) + tdSql.query('show tables') + tdSql.checkRows(6) + tdSql.query('select * from st123456') + tdSql.checkRows(5) + + def multiInsertCheckCase(self, count): + """ + test multi insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + sql_list = [] + stb_name = tdCom.getLongName(8, "letters") + tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 nchar(10))') + for i in range(count): + input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0] + sql_list.append(input_sql) + self._conn.schemaless_insert(sql_list, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.MILLI_SECOND.value) + tdSql.query('show tables') + tdSql.checkRows(count) + + def batchErrorInsertCheckCase(self): + """ + test batch error insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + lines = ["st123456 1626006833640 3i 64 t1=3i64 t2=4f64 t3=\"t3\"", + f"{stb_name} 1626056811823316532ns tRue t2=5f64 t3=L\"ste\""] + try: + self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def multiColsInsertCheckCase(self): + """ + test multi cols insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(c_multi_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def blankColInsertCheckCase(self): + """ + test blank col insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(c_blank_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def blankTagInsertCheckCase(self): + """ + test blank tag insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(t_blank_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def chineseCheckCase(self): + """ + check nchar ---> chinese + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(chinese_tag=True) + self.resCmp(input_sql, stb_name) + + def multiFieldCheckCase(self): + ''' + multi_field + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(multi_field_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def spellCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + input_sql_list = [f'{stb_name}_1 1626006833640 127I8 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_2 1626006833640 32767I16 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_3 1626006833640 2147483647I32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_4 1626006833640 9223372036854775807I64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_5 1626006833640 11.12345027923584F32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_6 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_7 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_8 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_9 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_10 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64'] + for input_sql in input_sql_list: + stb_name = input_sql.split(' ')[0] + self.resCmp(input_sql, stb_name) + + def pointTransCheckCase(self, protocol=None): + """ + metric value "." trans to "_" + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(point_trans_tag=True, protocol=protocol)[0] + if protocol == 'telnet-tcp': + stb_name = f'`{input_sql.split(" ")[1]}`' + else: + stb_name = f'`{input_sql.split(" ")[0]}`' + self.resCmp(input_sql, stb_name, protocol=protocol) + tdSql.execute("drop table `.point.trans.test`") + + def defaultTypeCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + input_sql_list = [f'{stb_name}_1 1626006833640 9223372036854775807 t0=f t1=127 t2=32767i16 t3=2147483647i32 t4=9223372036854775807 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_2 1626006833641 22.123456789 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_3 1626006833642 10e5F32 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10e5F64 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_4 1626006833643 10.0e5F64 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10.0e5F32 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_5 1626006833644 -10.0e5 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=-10.0e5 t7="vozamcts" t8=L"ncharTagValue"'] + for input_sql in input_sql_list: + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name) + + def tbnameTagsColsNameCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + if self.smlChildTableName_value == "ID": + input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + query_sql = 'select * from `rFa$sta`' + query_res = tdSql.query(query_sql, True) + tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, 'true', '127Ii8', '32767i16', '2147483647i32', '9223372036854775807i64', '11.12345f32', '22.123456789f64', '"ddzhiksj"', 'L"ncharTagValue"')]) + col_tag_res = tdSql.getColNameList(query_sql) + tdSql.checkEqual(col_tag_res, ['ts', '_value', 'tt!0', 'tt@1', 't#2', '"t$3"', 't%4', 't^5', 't&6', 't*7', 't!@#$%^&*()_+[];:<>?,9']) + tdSql.execute('drop table `rFa$sta`') + else: + input_sql = 'rFa$sta 1626006834 9223372036854775807 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + query_sql = 'select * from `rFa$sta`' + query_res = tdSql.query(query_sql, True) + tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, '2147483647i32', 'L"ncharTagValue"', '32767i16', '9223372036854775807i64', '22.123456789f64', '"ddzhiksj"', '11.12345f32', 'true', '127Ii8')]) + col_tag_res = tdSql.getColNameList(query_sql) + tdSql.checkEqual(col_tag_res, ['_ts', '_value', '"t$3"', 't!@#$%^&*()_+[];:<>?,9', 't#2', 't%4', 't&6', 't*7', 't^5', 'Tt!0', 'tT@1']) + tdSql.execute('drop table `rFa$sta`') + + def tcpKeywordsCheckCase(self, protocol="telnet-tcp"): + """ + stb = "put" + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(tcp_keyword_tag=True, protocol=protocol)[0] + stb_name = f'`{input_sql.split(" ")[1]}`' + self.resCmp(input_sql, stb_name, protocol=protocol) + + def genSqlList(self, count=5, stb_name="", tb_name=""): + """ + stb --> supertable + tb --> table + ts --> timestamp, same default + col --> column, same default + tag --> tag, same default + d --> different + s --> same + a --> add + m --> minus + """ + d_stb_d_tb_list = list() + s_stb_s_tb_list = list() + s_stb_s_tb_a_tag_list = list() + s_stb_s_tb_m_tag_list = list() + s_stb_d_tb_list = list() + s_stb_d_tb_m_tag_list = list() + s_stb_d_tb_a_tag_list = list() + s_stb_s_tb_d_ts_list = list() + s_stb_s_tb_d_ts_m_tag_list = list() + s_stb_s_tb_d_ts_a_tag_list = list() + s_stb_d_tb_d_ts_list = list() + s_stb_d_tb_d_ts_m_tag_list = list() + s_stb_d_tb_d_ts_a_tag_list = list() + for i in range(count): + d_stb_d_tb_list.append(self.genFullTypeSql(t0="f", value="f")) + s_stb_s_tb_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"')) + s_stb_s_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_add_tag=True)) + s_stb_s_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_mul_tag=True)) + s_stb_d_tb_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)) + s_stb_d_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_mul_tag=True)) + s_stb_d_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_add_tag=True)) + s_stb_s_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0)) + s_stb_s_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_mul_tag=True)) + s_stb_s_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_add_tag=True)) + s_stb_d_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0)) + s_stb_d_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_mul_tag=True)) + s_stb_d_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_add_tag=True)) + + return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_tag_list, s_stb_s_tb_m_tag_list, \ + s_stb_d_tb_list, s_stb_d_tb_m_tag_list, s_stb_d_tb_a_tag_list, s_stb_s_tb_d_ts_list, \ + s_stb_s_tb_d_ts_m_tag_list, s_stb_s_tb_d_ts_a_tag_list, s_stb_d_tb_d_ts_list, \ + s_stb_d_tb_d_ts_m_tag_list, s_stb_d_tb_d_ts_a_tag_list + + + def genMultiThreadSeq(self, sql_list): + tlist = list() + for insert_sql in sql_list: + t = threading.Thread(target=self._conn.schemaless_insert,args=([insert_sql[0]], TDSmlProtocolType.TELNET.value, None)) + tlist.append(t) + return tlist + + def multiThreadRun(self, tlist): + for t in tlist: + t.start() + for t in tlist: + t.join() + + def stbInsertMultiThreadCheckCase(self): + """ + thread input different stb + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genSqlList()[0] + print(input_sql) + self.multiThreadRun(self.genMultiThreadSeq(input_sql)) + tdSql.query(f"show tables;") + tdSql.checkRows(5) + + def sStbStbDdataInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[1] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + + def sStbStbDdataAtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, add columes and tags, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[2] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + + def sStbStbDdataMtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, minus columes and tags, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[3] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) + + def sStbDtbDdataInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataMtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833640 "omfdhyom" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "vqowydbc" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "plgkckpv" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "cujyqvlj" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "twjxisat" t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(3) + + def sStbDtbDdataAtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data, add tag, mul col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name)[6] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbStbDdataDtsInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_list = [(f'{stb_name} 0 "hkgjiwdj" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "rljjrrul" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="bmcanhbs" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "basanglx" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enqkyvmb" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "clsajzpp" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="eivaegjk" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "jitwseso" id={tb_name} t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="yhlwkddq" t8=L"ncharTagValue"', 'dwpthv')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + + def sStbStbDdataDtsMtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[8] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name} where t8 is not NULL") + tdSql.checkRows(6) if self.smlChildTableName_value == "ID" else tdSql.checkRows(1) + + def sStbStbDdataDtsAtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add tag, mul col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_a_tag_list = [(f'{stb_name} 0 "clummqfy" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="hpxzrdiw" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "yqeztggb" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="gdtblmrc" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "gbkinqdk" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="iqniuvco" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "ldxxejbd" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vxkipags" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "tlvzwjes" id={tb_name} t0=true t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enwrlrtj" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + for t in ["t10", "t11"]: + tdSql.query(f"select * from {stb_name} where {t} is not NULL;") + tdSql.checkRows(0) if self.smlChildTableName_value == "ID" else tdSql.checkRows(5) + + def sStbDtbDdataDtsInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, data, ts + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataDtsMtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, data, ts, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "zbvwckcd" t0=True t1=126i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "vymcjfwc" t0=False t1=125i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "laumkwfn" t0=False t1=124i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "nyultzxr" t0=false t1=123i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def test(self): + try: + input_sql = f'test_nchar 0 L"涛思数据" t0=f t1=L"涛思数据" t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + except SchemalessError as err: + print(err.errno) + + def runAll(self): + self.initCheckCase() + self.boolTypeCheckCase() + self.symbolsCheckCase() + self.tsCheckCase() + self.openTstbTelnetTsCheckCase() + self.idSeqCheckCase() + self.idLetterCheckCase() + self.noIdCheckCase() + self.maxColTagCheckCase() + self.stbTbNameCheckCase() + self.idStartWithNumCheckCase() + self.nowTsCheckCase() + self.dateFormatTsCheckCase() + self.illegalTsCheckCase() + self.tbnameCheckCase() + self.tagNameLengthCheckCase() + # self.tagValueLengthCheckCase() + self.colValueLengthCheckCase() + self.tagColIllegalValueCheckCase() + self.blankCheckCase() + self.duplicateIdTagColInsertCheckCase() + self.noIdStbExistCheckCase() + self.duplicateInsertExistCheckCase() + self.tagColBinaryNcharLengthCheckCase() + self.tagColAddDupIDCheckCase() + self.tagColAddCheckCase() + self.tagMd5Check() + # self.tagColNcharMaxLengthCheckCase() + # self.batchInsertCheckCase() + # self.multiInsertCheckCase(10) + self.batchErrorInsertCheckCase() + self.multiColsInsertCheckCase() + self.blankColInsertCheckCase() + self.blankTagInsertCheckCase() + self.chineseCheckCase() + self.multiFieldCheckCase() + self.spellCheckCase() + self.pointTransCheckCase() + self.defaultTypeCheckCase() + self.tbnameTagsColsNameCheckCase() + # # # MultiThreads + # self.stbInsertMultiThreadCheckCase() + # self.sStbStbDdataInsertMultiThreadCheckCase() + # self.sStbStbDdataAtInsertMultiThreadCheckCase() + # self.sStbStbDdataMtInsertMultiThreadCheckCase() + # self.sStbDtbDdataInsertMultiThreadCheckCase() + # self.sStbDtbDdataMtInsertMultiThreadCheckCase() + # self.sStbDtbDdataAtInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsInsertMultiThreadCheckCase() + # # self.sStbStbDdataDtsMtInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsAtInsertMultiThreadCheckCase() + # self.sStbDtbDdataDtsInsertMultiThreadCheckCase() + # self.sStbDtbDdataDtsMtInsertMultiThreadCheckCase() + + def run(self): + print("running {}".format(__file__)) + + try: + self.createDb() + self.runAll() + # self.createDb(protocol="telnet-tcp") + # self.initCheckCase('telnet-tcp') + # self.boolTypeCheckCase('telnet-tcp') + # self.symbolsCheckCase('telnet-tcp') + # self.idSeqCheckCase('telnet-tcp') + # self.idLetterCheckCase('telnet-tcp') + # self.noIdCheckCase('telnet-tcp') + # self.stbTbNameCheckCase('telnet-tcp') + # self.idStartWithNumCheckCase('telnet-tcp') + # self.pointTransCheckCase('telnet-tcp') + # self.tcpKeywordsCheckCase() + except Exception as err: + print(''.join(traceback.format_exception(None, err, err.__traceback__))) + raise err + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/1-insert/performanceInsert.json b/tests/system-test/1-insert/performanceInsert.json new file mode 100644 index 0000000000000000000000000000000000000000..de410c30f2fa1846d0318def447d1d09aff2cfea --- /dev/null +++ b/tests/system-test/1-insert/performanceInsert.json @@ -0,0 +1,79 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos/", + "host": "test216", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 8, + "thread_count_create_tbl": 8, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 1000, + "num_of_records_per_req": 100000, + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "vgroups": 24 + }, + "super_tables": [ + { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 100000, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "batch_create_tbl_num": 50000, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 5, + "interlace_rows": 100000, + "insert_interval": 0, + "max_sql_len": 10000000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2022-05-01 00:00:00.000", + "sample_format": "csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + }, + { + "type": "TINYINT", + "count": 1 + }, + {"type": "DOUBLE"}, + + { + "type": "BINARY", + "len": 40, + "count": 1 + }, + { + "type": "nchar", + "len": 20, + "count": 1 + } + ], + "tags": [ + { + "type": "TINYINT", + "count": 1 + }, + { + "type": "BINARY", + "len": 16, + "count": 1 + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/system-test/1-insert/performanceQuery.json b/tests/system-test/1-insert/performanceQuery.json new file mode 100644 index 0000000000000000000000000000000000000000..fe2991bd0f5f74401b437e24b6a6f8e4cd5ed721 --- /dev/null +++ b/tests/system-test/1-insert/performanceQuery.json @@ -0,0 +1,42 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "test216", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 100, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 0, + "threads": 8, + "sqls": [ + { + "sql": "select count(*) from stb_0 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb_1 ", + "result": "./query_res1.txt" + }, + { + "sql": "select last(*) from stb_2 ", + "result": "./query_res2.txt" + }, + { + "sql": "select first(*) from stb_3 ", + "result": "./query_res3.txt" + }, + { + "sql": "select avg(c0),min(c2),max(c1) from stb_4", + "result": "./query_res4.txt" + }, + { + "sql": "select avg(c0),min(c2),max(c1) from stb_5 where ts <= '2022-05-01 20:00:00.500' and ts >= '2022-05-01 00:00:00.000' ", + "result": "./query_res5.txt" + } + ] + } +} \ No newline at end of file diff --git a/tests/system-test/1-insert/test_stmt_insert_query_ex.py b/tests/system-test/1-insert/test_stmt_insert_query_ex.py new file mode 100644 index 0000000000000000000000000000000000000000..376b60d615941323bedcf40d591817e30c8da05a --- /dev/null +++ b/tests/system-test/1-insert/test_stmt_insert_query_ex.py @@ -0,0 +1,282 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import threading as thd +import multiprocessing as mp +from numpy.lib.function_base import insert +import taos +from taos import * +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import datetime as dt +from datetime import datetime +from ctypes import * +import time +# constant define +WAITS = 5 # wait seconds + +class TDTestCase: + # + # --------------- main frame ------------------- + def caseDescription(self): + ''' + limit and offset keyword function test cases; + case1: limit offset base function test + case2: offset return valid + ''' + return + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # init + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + # tdSql.prepare() + # self.create_tables(); + self.ts = 1500000000000 + + # stop + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + + # --------------- case ------------------- + + + def newcon(self,host,cfg): + user = "root" + password = "taosdata" + port =6030 + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + print(con) + return con + + def test_stmt_insert_multi(self,conn): + # type: (TaosConnection) -> None + + dbname = "pytest_taos_stmt_multi" + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) + conn.select_db(dbname) + + conn.execute( + "create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\ + bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)", + ) + # conn.load_table_info("log") + + start = datetime.now() + stmt = conn.statement("insert into log values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") + + params = new_multi_binds(16) + params[0].timestamp((1626861392589, 1626861392590, 1626861392591)) + params[1].bool((True, None, False)) + params[2].tinyint([-128, -128, None]) # -128 is tinyint null + params[3].tinyint([0, 127, None]) + params[4].smallint([3, None, 2]) + params[5].int([3, 4, None]) + params[6].bigint([3, 4, None]) + params[7].tinyint_unsigned([3, 4, None]) + params[8].smallint_unsigned([3, 4, None]) + params[9].int_unsigned([3, 4, None]) + params[10].bigint_unsigned([3, 4, None]) + params[11].float([3, None, 1]) + params[12].double([3, None, 1.2]) + params[13].binary(["abc", "dddafadfadfadfadfa", None]) + params[14].nchar(["涛思数据", None, "a long string with 中文字符"]) + params[15].timestamp([None, None, 1626861392591]) + # print(type(stmt)) + stmt.bind_param_batch(params) + stmt.execute() + end = datetime.now() + print("elapsed time: ", end - start) + assert stmt.affected_rows == 3 + + #query + querystmt=conn.statement("select ?,bu from log") + queryparam=new_bind_params(1) + print(type(queryparam)) + queryparam[0].binary("ts") + querystmt.bind_param(queryparam) + querystmt.execute() + result=querystmt.use_result() + # rows=result.fetch_all() + # print( querystmt.use_result()) + + # result = conn.query("select * from log") + rows=result.fetch_all() + # rows=result.fetch_all() + print(rows) + assert rows[1][0] == "ts" + assert rows[0][1] == 3 + + #query + querystmt1=conn.statement("select * from log where bu < ?") + queryparam1=new_bind_params(1) + print(type(queryparam1)) + queryparam1[0].int(4) + querystmt1.bind_param(queryparam1) + querystmt1.execute() + result1=querystmt1.use_result() + rows1=result1.fetch_all() + assert str(rows1[0][0]) == "2021-07-21 17:56:32.589000" + assert rows1[0][10] == 3 + + + stmt.close() + + # conn.execute("drop database if exists %s" % dbname) + conn.close() + + except Exception as err: + # conn.execute("drop database if exists %s" % dbname) + conn.close() + raise err + + def test_stmt_set_tbname_tag(self,conn): + dbname = "pytest_taos_stmt_set_tbname_tag" + + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s PRECISION 'us' " % dbname) + conn.select_db(dbname) + conn.execute("create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\ + bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(100), nn nchar(100), tt timestamp) tags (t1 timestamp, t2 bool,\ + t3 tinyint, t4 tinyint, t5 smallint, t6 int, t7 bigint, t8 tinyint unsigned, t9 smallint unsigned, \ + t10 int unsigned, t11 bigint unsigned, t12 float, t13 double, t14 binary(100), t15 nchar(100), t16 timestamp)") + + stmt = conn.statement("insert into ? using log tags (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \ + values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + tags = new_bind_params(16) + tags[0].timestamp(1626861392589123, PrecisionEnum.Microseconds) + tags[1].bool(True) + tags[2].null() + tags[3].tinyint(2) + tags[4].smallint(3) + tags[5].int(4) + tags[6].bigint(5) + tags[7].tinyint_unsigned(6) + tags[8].smallint_unsigned(7) + tags[9].int_unsigned(8) + tags[10].bigint_unsigned(9) + tags[11].float(10.1) + tags[12].double(10.11) + tags[13].binary("hello") + tags[14].nchar("stmt") + tags[15].timestamp(1626861392589, PrecisionEnum.Milliseconds) + stmt.set_tbname_tags("tb1", tags) + params = new_multi_binds(16) + params[0].timestamp((1626861392589111, 1626861392590111, 1626861392591111)) + params[1].bool((True, None, False)) + params[2].tinyint([-128, -128, None]) # -128 is tinyint null + params[3].tinyint([0, 127, None]) + params[4].smallint([3, None, 2]) + params[5].int([3, 4, None]) + params[6].bigint([3, 4, None]) + params[7].tinyint_unsigned([3, 4, None]) + params[8].smallint_unsigned([3, 4, None]) + params[9].int_unsigned([3, 4, None]) + params[10].bigint_unsigned([3, 4, 5]) + params[11].float([3, None, 1]) + params[12].double([3, None, 1.2]) + params[13].binary(["abc", "dddafadfadfadfadfa", None]) + params[14].nchar(["涛思数据", None, "a? long string with 中文字符"]) + params[15].timestamp([None, None, 1626861392591]) + + stmt.bind_param_batch(params) + stmt.execute() + + assert stmt.affected_rows == 3 + + #query + querystmt1=conn.statement("select * from log where bu < ?") + queryparam1=new_bind_params(1) + print(type(queryparam1)) + queryparam1[0].int(5) + querystmt1.bind_param(queryparam1) + querystmt1.execute() + result1=querystmt1.use_result() + rows1=result1.fetch_all() + print("1",rows1) + + querystmt2=conn.statement("select abs(?) from log where bu < ?") + queryparam2=new_bind_params(2) + print(type(queryparam2)) + queryparam2[0].int(5) + queryparam2[1].int(5) + querystmt2.bind_param(queryparam2) + querystmt2.execute() + result2=querystmt2.use_result() + rows2=result2.fetch_all() + print("2",rows2) + + querystmt3=conn.statement("select abs(?) from log where nn= 'a? long string with 中文字符' ") + queryparam3=new_bind_params(1) + print(type(queryparam3)) + queryparam3[0].int(5) + querystmt3.bind_param(queryparam3) + querystmt3.execute() + result3=querystmt3.use_result() + rows3=result3.fetch_all() + print("3",rows3) + # assert str(rows1[0][0]) == "2021-07-21 17:56:32.589111" + # assert rows1[0][10] == 3 + # assert rows1[1][10] == 4 + + # conn.execute("drop database if exists %s" % dbname) + conn.close() + + except Exception as err: + # conn.execute("drop database if exists %s" % dbname) + conn.close() + raise err + + def run(self): + buildPath = self.getBuildPath() + config = buildPath+ "../sim/dnode1/cfg/" + host="localhost" + connectstmt=self.newcon(host,config) + self.test_stmt_insert_multi(connectstmt) + connectstmt=self.newcon(host,config) + self.test_stmt_set_tbname_tag(connectstmt) + + return + + +# add case with filename +# +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/1-insert/test_stmt_muti_insert_query.py b/tests/system-test/1-insert/test_stmt_muti_insert_query.py new file mode 100644 index 0000000000000000000000000000000000000000..486bcd806219c73fa344e5422727c46fe03cde5e --- /dev/null +++ b/tests/system-test/1-insert/test_stmt_muti_insert_query.py @@ -0,0 +1,181 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import threading as thd +import multiprocessing as mp +from numpy.lib.function_base import insert +import taos +from taos import * +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import datetime as dt +from datetime import datetime +from ctypes import * +import time +# constant define +WAITS = 5 # wait seconds + +class TDTestCase: + # + # --------------- main frame ------------------- + def caseDescription(self): + ''' + limit and offset keyword function test cases; + case1: limit offset base function test + case2: offset return valid + ''' + return + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # init + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + # tdSql.prepare() + # self.create_tables(); + self.ts = 1500000000000 + + # stop + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + + # --------------- case ------------------- + + + def newcon(self,host,cfg): + user = "root" + password = "taosdata" + port =6030 + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + print(con) + return con + + def test_stmt_insert_multi(self,conn): + # type: (TaosConnection) -> None + + dbname = "pytest_taos_stmt_multi" + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) + conn.select_db(dbname) + + conn.execute( + "create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\ + bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)", + ) + # conn.load_table_info("log") + + start = datetime.now() + stmt = conn.statement("insert into log values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") + + params = new_multi_binds(16) + params[0].timestamp((1626861392589, 1626861392590, 1626861392591)) + params[1].bool((True, None, False)) + params[2].tinyint([-128, -128, None]) # -128 is tinyint null + params[3].tinyint([0, 127, None]) + params[4].smallint([3, None, 2]) + params[5].int([3, 4, None]) + params[6].bigint([3, 4, None]) + params[7].tinyint_unsigned([3, 4, None]) + params[8].smallint_unsigned([3, 4, None]) + params[9].int_unsigned([3, 4, None]) + params[10].bigint_unsigned([3, 4, None]) + params[11].float([3, None, 1]) + params[12].double([3, None, 1.2]) + params[13].binary(["abc", "dddafadfadfadfadfa", None]) + params[14].nchar(["涛思数据", None, "a long string with 中文字符"]) + params[15].timestamp([None, None, 1626861392591]) + # print(type(stmt)) + stmt.bind_param_batch(params) + stmt.execute() + end = datetime.now() + print("elapsed time: ", end - start) + assert stmt.affected_rows == 3 + + #query + querystmt=conn.statement("select ?,bu from log") + queryparam=new_bind_params(1) + print(type(queryparam)) + queryparam[0].binary("ts") + querystmt.bind_param(queryparam) + querystmt.execute() + result=querystmt.use_result() + # rows=result.fetch_all() + # print( querystmt.use_result()) + + # result = conn.query("select * from log") + rows=result.fetch_all() + # rows=result.fetch_all() + print(rows) + assert rows[1][0] == "ts" + assert rows[0][1] == 3 + + #query + querystmt1=conn.statement("select * from log where bu < ?") + queryparam1=new_bind_params(1) + print(type(queryparam1)) + queryparam1[0].int(4) + querystmt1.bind_param(queryparam1) + querystmt1.execute() + result1=querystmt1.use_result() + rows1=result1.fetch_all() + print(rows1) + assert str(rows1[0][0]) == "2021-07-21 17:56:32.589000" + assert rows1[0][10] == 3 + + + stmt.close() + + # conn.execute("drop database if exists %s" % dbname) + conn.close() + + except Exception as err: + # conn.execute("drop database if exists %s" % dbname) + conn.close() + raise err + + def run(self): + buildPath = self.getBuildPath() + config = buildPath+ "../sim/dnode1/cfg/" + host="localhost" + connectstmt=self.newcon(host,config) + self.test_stmt_insert_multi(connectstmt) + return + + +# add case with filename +# +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/1-insert/test_stmt_set_tbname_tag.py b/tests/system-test/1-insert/test_stmt_set_tbname_tag.py new file mode 100644 index 0000000000000000000000000000000000000000..54d5cfbafb0b3f98d55f310accccb19ef693c08b --- /dev/null +++ b/tests/system-test/1-insert/test_stmt_set_tbname_tag.py @@ -0,0 +1,176 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import threading as thd +import multiprocessing as mp +from numpy.lib.function_base import insert +import taos +from taos import * +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import datetime as dt +from datetime import datetime +from ctypes import * +import time +# constant define +WAITS = 5 # wait seconds + +class TDTestCase: + # + # --------------- main frame ------------------- + def caseDescription(self): + ''' + limit and offset keyword function test cases; + case1: limit offset base function test + case2: offset return valid + ''' + return + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # init + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + # tdSql.prepare() + # self.create_tables(); + self.ts = 1500000000000 + + # stop + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + + # --------------- case ------------------- + + + def newcon(self,host,cfg): + user = "root" + password = "taosdata" + port =6030 + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + print(con) + return con + + def test_stmt_set_tbname_tag(self,conn): + dbname = "pytest_taos_stmt_set_tbname_tag" + + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s PRECISION 'us' " % dbname) + conn.select_db(dbname) + conn.execute("create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\ + bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(100), nn nchar(100), tt timestamp , vc varchar(100)) tags (t1 timestamp, t2 bool,\ + t3 tinyint, t4 tinyint, t5 smallint, t6 int, t7 bigint, t8 tinyint unsigned, t9 smallint unsigned, \ + t10 int unsigned, t11 bigint unsigned, t12 float, t13 double, t14 binary(100), t15 nchar(100), t16 timestamp)") + + stmt = conn.statement("insert into ? using log tags (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \ + values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + tags = new_bind_params(16) + tags[0].timestamp(1626861392589123, PrecisionEnum.Microseconds) + tags[1].bool(True) + tags[2].null() + tags[3].tinyint(2) + tags[4].smallint(3) + tags[5].int(4) + tags[6].bigint(5) + tags[7].tinyint_unsigned(6) + tags[8].smallint_unsigned(7) + tags[9].int_unsigned(8) + tags[10].bigint_unsigned(9) + tags[11].float(10.1) + tags[12].double(10.11) + tags[13].binary("hello") + tags[14].nchar("stmt") + tags[15].timestamp(1626861392589, PrecisionEnum.Milliseconds) + stmt.set_tbname_tags("tb1", tags) + params = new_multi_binds(16) + params[0].timestamp((1626861392589111, 1626861392590111, 1626861392591111)) + params[1].bool((True, None, False)) + params[2].tinyint([-128, -128, None]) # -128 is tinyint null + params[3].tinyint([0, 127, None]) + params[4].smallint([3, None, 2]) + params[5].int([3, 4, None]) + params[6].bigint([3, 4, None]) + params[7].tinyint_unsigned([3, 4, None]) + params[8].smallint_unsigned([3, 4, None]) + params[9].int_unsigned([3, 4, None]) + params[10].bigint_unsigned([3, 4, 5]) + params[11].float([3, None, 1]) + params[12].double([3, None, 1.2]) + params[13].binary(["abc", "dddafadfadfadfadfa", None]) + params[14].nchar(["涛思数据", None, "a long string with 中文字符"]) + params[15].timestamp([None, None, 1626861392591]) + params[16].binary(["涛思数据16", None, "a long string with 中文-字符"]) + + stmt.bind_param_batch(params) + stmt.execute() + + assert stmt.affected_rows == 3 + + #query + querystmt1=conn.statement("select * from log where bu < ?") + queryparam1=new_bind_params(1) + print(type(queryparam1)) + queryparam1[0].int(5) + querystmt1.bind_param(queryparam1) + querystmt1.execute() + result1=querystmt1.use_result() + rows1=result1.fetch_all() + print(rows1) + # assert str(rows1[0][0]) == "2021-07-21 17:56:32.589111" + # assert rows1[0][10] == 3 + # assert rows1[1][10] == 4 + + # conn.execute("drop database if exists %s" % dbname) + conn.close() + + except Exception as err: + # conn.execute("drop database if exists %s" % dbname) + conn.close() + raise err + + def run(self): + buildPath = self.getBuildPath() + config = buildPath+ "../sim/dnode1/cfg/" + host="localhost" + connectstmt=self.newcon(host,config) + self.test_stmt_set_tbname_tag(connectstmt) + + return + + +# add case with filename +# +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/To_iso8601.py b/tests/system-test/2-query/To_iso8601.py index cd22ffb90c1fbf86e81dfabecbcb1ae0e536cd39..57bcca638ce26aace35d76707c12699fe2e8d1c4 100644 --- a/tests/system-test/2-query/To_iso8601.py +++ b/tests/system-test/2-query/To_iso8601.py @@ -95,7 +95,7 @@ class TDTestCase: # tdSql.query("select to_iso8601(-1) from ntb") tdSql.query("select to_iso8601(9223372036854775807) from ntb") tdSql.checkRows(3) - + # bug TD-14896 # tdSql.query("select to_iso8601(10000000000) from ntb") # tdSql.checkData(0,0,None) # tdSql.query("select to_iso8601(-1) from ntb") @@ -106,11 +106,6 @@ class TDTestCase: tdSql.error("select to_iso8601(1.5) from db.ntb") tdSql.error("select to_iso8601('a') from ntb") tdSql.error("select to_iso8601(c2) from ntb") - - - - - tdSql.query("select to_iso8601(now) from stb") tdSql.query("select to_iso8601(now()) from stb") tdSql.checkRows(3) @@ -126,7 +121,7 @@ class TDTestCase: tdSql.checkRows(3) tdSql.query("select to_iso8601(ts)+'a' from stb ") tdSql.checkRows(3) - # tdSql.query() + tdSql.query("select to_iso8601(today()) *null from stb") tdSql.checkRows(3) tdSql.checkData(0,0,None) @@ -152,7 +147,9 @@ class TDTestCase: tdSql.checkRows(3) tdSql.checkData(0,0,None) + # bug TD-14896 # tdSql.query("select to_iso8601(-1) from ntb") + # tdSql.checkRows(3) diff --git a/tests/system-test/2-query/apercentile.py b/tests/system-test/2-query/apercentile.py new file mode 100644 index 0000000000000000000000000000000000000000..150c4d3f17e30ab5f4d25fb19af2bb80ee202776 --- /dev/null +++ b/tests/system-test/2-query/apercentile.py @@ -0,0 +1,107 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.rowNum = 10 + self.ts = 1537146000000 + + def check_apercentile(self,data,expect_data,param,percent,column): + if param == "default": + if abs((expect_data-data) <= expect_data * 0.2): + tdLog.info(f"apercentile function values check success with col{column}, param = {param},percent = {percent}") + else: + tdLog.notice(f"apercentile function value has not as expected with col{column}, param = {param},percent = {percent}") + sys.exit(1) + elif param == "t-digest": + if abs((expect_data-data) <= expect_data * 0.2): + tdLog.info(f"apercentile function values check success with col{column}, param = {param},percent = {percent}") + else: + tdLog.notice(f"apercentile function value has not as expected with col{column}, param = {param},percent = {percent}") + sys.exit(1) + + def run(self): + tdSql.prepare() + + intData = [] + floatData = [] + percent_list = [0,50,100] + param_list = ['default','t-digest'] + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + for i in range(self.rowNum): + tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + intData.append(i + 1) + floatData.append(i + 0.1) + + # percentile verifacation + + tdSql.error("select apercentile(ts ,20) from test") + tdSql.error("select apercentile(col7 ,20) from test") + tdSql.error("select apercentile(col8 ,20) from test") + tdSql.error("select apercentile(col9 ,20) from test") + + column_list = [1,2,3,4,5,6,11,12,13,14] + + for i in column_list: + for j in percent_list: + for k in param_list: + tdSql.query(f"select apercentile(col{i},{j},'{k}') from test") + data = tdSql.getData(0, 0) + tdSql.query(f"select percentile(col{i},{j}) from test") + expect_data = tdSql.getData(0, 0) + self.check_apercentile(data,expect_data,k,j,i) + + error_param_list = [-1,101,'"a"'] + for i in error_param_list: + tdSql.error(f'select apercentile(col1,{i}) from test') + + tdSql.execute("create table meters (ts timestamp, voltage int) tags(loc nchar(20))") + tdSql.execute("create table t0 using meters tags('beijing')") + tdSql.execute("create table t1 using meters tags('shanghai')") + for i in range(self.rowNum): + tdSql.execute("insert into t0 values(%d, %d)" % (self.ts + i, i + 1)) + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i, i + 1)) + + column_list = ['voltage'] + for i in column_list: + for j in percent_list: + for k in param_list: + tdSql.query(f"select apercentile({i}, {j},'{k}') from t0") + data = tdSql.getData(0, 0) + tdSql.query(f"select percentile({i},{j}) from t0") + expect_data = tdSql.getData(0,0) + self.check_apercentile(data,expect_data,k,j,i) + tdSql.query(f"select apercentile({i}, {j},'{k}') from meters") + tdSql.checkRows(1) + table_list = ["meters","t0"] + for i in error_param_list: + for j in table_list: + for k in column_list: + tdSql.error(f'select apercentile({k},{i}) from {j}') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/avg.py b/tests/system-test/2-query/avg.py new file mode 100644 index 0000000000000000000000000000000000000000..20ee6df7fcf94e3b02641b735c6ad7fd1ce862ff --- /dev/null +++ b/tests/system-test/2-query/avg.py @@ -0,0 +1,424 @@ +import taos +import sys +import datetime +import inspect + +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def prepare_datas(self): + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + def check_avg(self ,origin_query , check_query): + avg_result = tdSql.getResult(origin_query) + origin_result = tdSql.getResult(check_query) + + check_status = True + for row_index , row in enumerate(avg_result): + for col_index , elem in enumerate(row): + if avg_result[row_index][col_index] != origin_result[row_index][col_index]: + check_status = False + if not check_status: + tdLog.notice("avg function value has not as expected , sql is \"%s\" "%origin_query ) + sys.exit(1) + else: + tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query ) + + def test_errors(self): + error_sql_lists = [ + "select avg from t1", + # "select avg(-+--+c1) from t1", + # "select +-avg(c1) from t1", + # "select ++-avg(c1) from t1", + # "select ++--avg(c1) from t1", + # "select - -avg(c1)*0 from t1", + # "select avg(tbname+1) from t1 ", + "select avg(123--123)==1 from t1", + "select avg(c1) as 'd1' from t1", + "select avg(c1 ,c2 ) from t1", + "select avg(c1 ,NULL) from t1", + "select avg(,) from t1;", + "select avg(avg(c1) ab from t1)", + "select avg(c1) as int from t1", + "select avg from stb1", + # "select avg(-+--+c1) from stb1", + # "select +-avg(c1) from stb1", + # "select ++-avg(c1) from stb1", + # "select ++--avg(c1) from stb1", + # "select - -avg(c1)*0 from stb1", + # "select avg(tbname+1) from stb1 ", + "select avg(123--123)==1 from stb1", + "select avg(c1) as 'd1' from stb1", + "select avg(c1 ,c2 ) from stb1", + "select avg(c1 ,NULL) from stb1", + "select avg(,) from stb1;", + "select avg(avg(c1) ab from stb1)", + "select avg(c1) as int from stb1" + ] + for error_sql in error_sql_lists: + tdSql.error(error_sql) + + def support_types(self): + type_error_sql_lists = [ + "select avg(ts) from t1" , + "select avg(c7) from t1", + "select avg(c8) from t1", + "select avg(c9) from t1", + "select avg(ts) from ct1" , + "select avg(c7) from ct1", + "select avg(c8) from ct1", + "select avg(c9) from ct1", + "select avg(ts) from ct3" , + "select avg(c7) from ct3", + "select avg(c8) from ct3", + "select avg(c9) from ct3", + "select avg(ts) from ct4" , + "select avg(c7) from ct4", + "select avg(c8) from ct4", + "select avg(c9) from ct4", + "select avg(ts) from stb1" , + "select avg(c7) from stb1", + "select avg(c8) from stb1", + "select avg(c9) from stb1" , + + "select avg(ts) from stbbb1" , + "select avg(c7) from stbbb1", + + "select avg(ts) from tbname", + "select avg(c9) from tbname" + + ] + + for type_sql in type_error_sql_lists: + tdSql.error(type_sql) + + + type_sql_lists = [ + "select avg(c1) from t1", + "select avg(c2) from t1", + "select avg(c3) from t1", + "select avg(c4) from t1", + "select avg(c5) from t1", + "select avg(c6) from t1", + + "select avg(c1) from ct1", + "select avg(c2) from ct1", + "select avg(c3) from ct1", + "select avg(c4) from ct1", + "select avg(c5) from ct1", + "select avg(c6) from ct1", + + "select avg(c1) from ct3", + "select avg(c2) from ct3", + "select avg(c3) from ct3", + "select avg(c4) from ct3", + "select avg(c5) from ct3", + "select avg(c6) from ct3", + + "select avg(c1) from stb1", + "select avg(c2) from stb1", + "select avg(c3) from stb1", + "select avg(c4) from stb1", + "select avg(c5) from stb1", + "select avg(c6) from stb1", + + "select avg(c6) as alisb from stb1", + "select avg(c6) alisb from stb1", + ] + + for type_sql in type_sql_lists: + tdSql.query(type_sql) + + def basic_avg_function(self): + + # basic query + tdSql.query("select c1 from ct3") + tdSql.checkRows(0) + tdSql.query("select c1 from t1") + tdSql.checkRows(12) + tdSql.query("select c1 from stb1") + tdSql.checkRows(25) + + # used for empty table , ct3 is empty + tdSql.query("select avg(c1) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c2) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c3) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c4) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c5) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c6) from ct3") + + # used for regular table + tdSql.query("select avg(c1) from t1") + tdSql.checkData(0, 0, 5.000000000) + + + tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") + tdSql.checkData(1, 5, 1.11000) + tdSql.checkData(3, 4, 33) + tdSql.checkData(5, 5, None) + self.check_avg(" select avg(c1) , avg(c2) , avg(c3) from t1 " , " select sum(c1)/count(c1) , sum(c2)/count(c2) , sum(c3)/count(c3) from t1 ") + + # used for sub table + tdSql.query("select avg(c1) from ct1") + tdSql.checkData(0, 0, 4.846153846) + + tdSql.query("select avg(c1) from ct3") + tdSql.checkRows(0) + + self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from t1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from t1 ") + self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from stb1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from stb1 ") + + # used for stable table + + tdSql.query("select avg(c1) from stb1") + tdSql.checkRows(1) + + self.check_avg(" select avg(abs(ceil(c1))) , avg(abs(ceil(c2))) , avg(abs(ceil(c3))) from stb1 " , " select sum(abs(ceil(c1)))/count(c1) , sum(abs(ceil(c2)))/count(c2) , sum(abs(ceil(c3)))/count(c3) from stb1 ") + + # used for not exists table + tdSql.error("select avg(c1) from stbbb1") + tdSql.error("select avg(c1) from tbname") + tdSql.error("select avg(c1) from ct5") + + # mix with common col + tdSql.error("select c1, avg(c1) from ct1") + tdSql.error("select c1, avg(c1) from ct4") + + + # mix with common functions + tdSql.error("select c1, avg(c1),c5, floor(c5) from ct4 ") + tdSql.error("select c1, avg(c1),c5, floor(c5) from stb1 ") + + # mix with agg functions , not support + tdSql.error("select c1, avg(c1),c5, count(c5) from stb1 ") + tdSql.error("select c1, avg(c1),c5, count(c5) from ct1 ") + tdSql.error("select c1, count(c5) from ct1 ") + tdSql.error("select c1, count(c5) from stb1 ") + + # agg functions mix with agg functions + + tdSql.query(" select max(c5), count(c5) , avg(c5) from stb1 ") + tdSql.checkData(0, 0, 8.88000 ) + tdSql.checkData(0, 1, 22 ) + tdSql.checkData(0, 2, 2.270454591 ) + + tdSql.query(" select max(c5), count(c5) , avg(c5) ,elapsed(ts) , spread(c1) from ct1; ") + tdSql.checkData(0, 0, 8.88000 ) + tdSql.checkData(0, 1, 13 ) + tdSql.checkData(0, 2, 0.768461603 ) + + # bug fix for count + tdSql.query("select count(c1) from ct4 ") + tdSql.checkData(0,0,9) + tdSql.query("select count(*) from ct4 ") + tdSql.checkData(0,0,12) + tdSql.query("select count(c1) from stb1 ") + tdSql.checkData(0,0,22) + tdSql.query("select count(*) from stb1 ") + tdSql.checkData(0,0,25) + + # bug fix for compute + tdSql.error("select c1, avg(c1) -0 ,ceil(c1)-0 from ct4 ") + tdSql.error(" select c1, avg(c1) -0 ,avg(ceil(c1-0.1))-0.1 from ct4") + + # mix with nest query + self.check_avg("select avg(col) from (select abs(c1) col from stb1)" , "select avg(abs(c1)) from stb1") + self.check_avg("select avg(col) from (select ceil(abs(c1)) col from stb1)" , "select avg(abs(c1)) from stb1") + + tdSql.query(" select abs(avg(abs(abs(c1)))) from stb1 ") + tdSql.checkData(0, 0, 4.500000000) + tdSql.query(" select abs(avg(abs(abs(c1)))) from t1 ") + tdSql.checkData(0, 0, 5.000000000) + + tdSql.query(" select abs(avg(abs(abs(c1)))) from stb1 ") + tdSql.checkData(0, 0, 4.500000000) + + tdSql.query(" select avg(c1) from stb1 where c1 is null ") + tdSql.checkRows(0) + + + def avg_func_filter(self): + tdSql.execute("use db") + tdSql.query(" select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from ct4 where c1>5 ") + tdSql.checkRows(1) + tdSql.checkData(0,0,7.000000000) + tdSql.checkData(0,1,7.000000000) + tdSql.checkData(0,2,7.000000000) + tdSql.checkData(0,3,6.900000000) + tdSql.checkData(0,4,3.000000000) + + tdSql.query("select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from ct4 where c1=5 ") + tdSql.checkRows(1) + tdSql.checkData(0,0,5.000000000) + tdSql.checkData(0,1,5.000000000) + tdSql.checkData(0,2,5.000000000) + tdSql.checkData(0,3,4.900000000) + tdSql.checkData(0,4,2.000000000) + + tdSql.query("select avg(c1) ,avg(c2) , avg(c1) -0 , avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2))-0.5) from ct4 where c1>log(c1,2) limit 1 ") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 4.500000000) + tdSql.checkData(0, 1, 49999.500000000) + tdSql.checkData(0, 5, 1.625000000) + + def avg_Arithmetic(self): + pass + + def check_boundary_values(self): + + tdSql.execute("drop database if exists bound_test") + tdSql.execute("create database if not exists bound_test") + time.sleep(3) + tdSql.execute("use bound_test") + tdSql.execute( + "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + ) + tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute( + f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + tdSql.execute( + f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483645, 9223372036854775805, 32765, 125, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483644, 9223372036854775804, 32764, 124, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + + tdSql.error( + f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + self.check_avg("select avg(c1), avg(c2), avg(c3) , avg(c4), avg(c5) ,avg(c6) from sub1_bound " , " select sum(c1)/count(c1), sum(c2)/count(c2) ,sum(c3)/count(c3), sum(c4)/count(c4), sum(c5)/count(c5) ,sum(c6)/count(c6) from sub1_bound ") + + + # check basic elem for table per row + tdSql.query("select avg(c1) ,avg(c2) , avg(c3) , avg(c4), avg(c5), avg(c6) from sub1_bound ") + tdSql.checkRows(1) + tdSql.checkData(0,0,920350133.571428537) + tdSql.checkData(0,1,1.3176245766935393e+18) + tdSql.checkData(0,2,14042.142857143) + tdSql.checkData(0,3,53.571428571) + tdSql.checkData(0,4,5.828571332045761e+37) + # tdSql.checkData(0,5,None) + + + # check + - * / in functions + tdSql.query(" select avg(c1+1) ,avg(c2) , avg(c3*1) , avg(c4/2), avg(c5)/2, avg(c6) from sub1_bound ") + tdSql.checkData(0,0,920350134.5714285) + tdSql.checkData(0,1,1.3176245766935393e+18) + tdSql.checkData(0,2,14042.142857143) + tdSql.checkData(0,3,26.785714286) + tdSql.checkData(0,4,2.9142856660228804e+37) + # tdSql.checkData(0,5,None) + + + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table ==============") + + self.prepare_datas() + + tdLog.printNoPrefix("==========step2:test errors ==============") + + self.test_errors() + + tdLog.printNoPrefix("==========step3:support types ============") + + self.support_types() + + tdLog.printNoPrefix("==========step4: avg basic query ============") + + self.basic_avg_function() + + tdLog.printNoPrefix("==========step5: avg boundary query ============") + + self.check_boundary_values() + + tdLog.printNoPrefix("==========step6: avg filter query ============") + + self.avg_func_filter() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/between.py b/tests/system-test/2-query/between.py index 3b9465dd263cc6774fdf580630bb578629e4ce8b..44750abd4648260ceb68ba03239cb128e4eaf53b 100644 --- a/tests/system-test/2-query/between.py +++ b/tests/system-test/2-query/between.py @@ -175,16 +175,17 @@ class TDTestCase: tdLog.printNoPrefix("==========step10:invalid query type") - tdSql.query("select * from supt where location between 'beijing' and 'shanghai'") - tdSql.checkRows(23) - # 非0值均解析为1,因此"between 负值 and o"解析为"between 1 and 0" - tdSql.query("select * from supt where isused between 0 and 1") - tdSql.checkRows(23) - tdSql.query("select * from supt where isused between -1 and 0") - tdSql.checkRows(0) - tdSql.error("select * from supt where isused between false and true") - tdSql.query("select * from supt where family between '拖拉机' and '自行车'") - tdSql.checkRows(23) + # TODO tag is not finished + # tdSql.query("select * from supt where location between 'beijing' and 'shanghai'") + # tdSql.checkRows(23) + # # 非0值均解析为1,因此"between 负值 and o"解析为"between 1 and 0" + # tdSql.query("select * from supt where isused between 0 and 1") + # tdSql.checkRows(23) + # tdSql.query("select * from supt where isused between -1 and 0") + # tdSql.checkRows(0) + # tdSql.error("select * from supt where isused between false and true") + # tdSql.query("select * from supt where family between '拖拉机' and '自行车'") + # tdSql.checkRows(23) tdLog.printNoPrefix("==========step11:query HEX/OCT/BIN type") diff --git a/tests/system-test/2-query/bottom.py b/tests/system-test/2-query/bottom.py new file mode 100644 index 0000000000000000000000000000000000000000..a4390372dfa13ae4d6db6e545fc472b0395aed53 --- /dev/null +++ b/tests/system-test/2-query/bottom.py @@ -0,0 +1,106 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * + + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + # bottom verifacation + tdSql.error("select bottom(ts, 10) from test") + tdSql.error("select bottom(col1, 0) from test") + tdSql.error("select bottom(col1, 101) from test") + tdSql.error("select bottom(col2, 0) from test") + tdSql.error("select bottom(col2, 101) from test") + tdSql.error("select bottom(col3, 0) from test") + tdSql.error("select bottom(col3, 101) from test") + tdSql.error("select bottom(col4, 0) from test") + tdSql.error("select bottom(col4, 101) from test") + tdSql.error("select bottom(col5, 0) from test") + tdSql.error("select bottom(col5, 101) from test") + tdSql.error("select bottom(col6, 0) from test") + tdSql.error("select bottom(col6, 101) from test") + tdSql.error("select bottom(col7, 10) from test") + tdSql.error("select bottom(col8, 10) from test") + tdSql.error("select bottom(col9, 10) from test") + + tdSql.query("select bottom(col1, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + tdSql.query("select bottom(col2, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + + tdSql.query("select bottom(col3, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + + tdSql.query("select bottom(col4, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + + tdSql.query("select bottom(col11, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + + tdSql.query("select bottom(col12, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + + tdSql.query("select bottom(col13, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + + tdSql.query("select bottom(col13,50) from test") + tdSql.checkRows(10) + + tdSql.query("select bottom(col14, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + tdSql.query("select ts,bottom(col1, 2) from test1") + tdSql.checkRows(2) + tdSql.query("select ts,bottom(col1, 2),ts from test group by tbname") + tdSql.checkRows(2) + + tdSql.query('select bottom(col2,1) from test interval(1y) order by col2') + tdSql.checkData(0,0,1) + + + tdSql.error('select * from test where bottom(col2,1)=1') + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/concat.py b/tests/system-test/2-query/concat.py index 1167b444d2eb6f753a5d662586afb0dfe30dff0b..59fae9b59d62599e3bca23c393ecc854aed9c186 100644 --- a/tests/system-test/2-query/concat.py +++ b/tests/system-test/2-query/concat.py @@ -36,19 +36,19 @@ class TDTestCase: concat_condition.extend( ( char_col, - f"upper( {char_col} )", + # f"upper( {char_col} )", ) ) concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) - concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) - concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) # concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) for num_col in NUM_COL: - concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) @@ -96,7 +96,6 @@ class TDTestCase: [ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] - def __concat_err_check(self,tbname): sqls = [] @@ -139,7 +138,11 @@ class TDTestCase: def __test_current(self): # sourcery skip: use-itertools-product tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [ + "ct1", + "ct2", + "ct4", + ] for tb in tbname: for i in range(2,8): self.__concat_check(tb,i) @@ -147,7 +150,10 @@ class TDTestCase: def __test_error(self): tdLog.printNoPrefix("==========err sql condition check , must return error==========") - tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + tbname = [ + "t1", + "stb1", + ] for tb in tbname: for errsql in self.__concat_err_check(tb): diff --git a/tests/system-test/2-query/concat2.py b/tests/system-test/2-query/concat2.py new file mode 100644 index 0000000000000000000000000000000000000000..717766e7ffcaafcc164cc1519d0a3a657d5e387c --- /dev/null +++ b/tests/system-test/2-query/concat2.py @@ -0,0 +1,293 @@ +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __concat_condition(self): # sourcery skip: extract-method + concat_condition = [] + for char_col in CHAR_COL: + concat_condition.extend( + ( + char_col, + # f"upper( {char_col} )", + ) + ) + concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) + concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) + # concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + # concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) + + for num_col in NUM_COL: + # concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) + + concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + + concat_condition.append('''"test1234!@#$%^&*():'> 0 " + return "" + + def __concat_num(self, concat_lists, num): + return [ concat_lists[i] for i in range(num) ] + + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __concat_check(self, tbname, num): + concat_condition = self.__concat_condition() + for i in range(len(concat_condition) - num + 1 ): + condition = self.__concat_num(concat_condition[i:], num) + concat_filter = f"concat( {','.join( condition ) }) " + where_condition = self.__where_condition(condition[0]) + # group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " ) + concat_group_having = self.__group_condition(concat_filter, having=f"{concat_filter} is not null " ) + # group_no_having= self.__group_condition(condition[0] ) + concat_group_no_having= self.__group_condition(concat_filter) + groups = ["", concat_group_having, concat_group_no_having] + + if num > 8 or num < 2 : + [tdSql.error(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + break + + tdSql.query(f"select {','.join(condition)} from {tbname} ") + rows = tdSql.queryRows + concat_data = [] + for m in range(rows): + concat_data.append("".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_data.append(None) + tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} ") + tdSql.checkRows(rows) + for j in range(tdSql.queryRows): + assert tdSql.getData(j, 0) in concat_data + + [ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __concat_err_check(self,tbname): + sqls = [] + + for char_col in CHAR_COL: + sqls.extend( + ( + f"select concat( {char_col} ) from {tbname} ", + f"select concat(ceil( {char_col} )) from {tbname} ", + f"select {char_col} from {tbname} group by concat( {char_col} ) ", + ) + ) + + sqls.extend( f"select concat( {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat( {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat( {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat( {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat( {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select concat( {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select concat( {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select concat( {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat( {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select concat({char_col}, 11) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select concat({num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select concat() from {tbname} ", + f"select concat(*) from {tbname} ", + f"select concat(ccccccc) from {tbname} ", + f"select concat(111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = [ + "t1", + "stb1", + ] + for tb in tbname: + for i in range(2,8): + self.__concat_check(tb,i) + tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = [ + "ct1", + "ct4", + ] + + for tb in tbname: + for errsql in self.__concat_err_check(tb): + tdSql.error(sql=errsql) + self.__concat_check(tb,1) + self.__concat_check(tb,9) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/concat_ws.py b/tests/system-test/2-query/concat_ws.py index 876a1c88055b0ab3ca3b1046d180365fc089ae0d..2c179b97ce0757670f31498c4dfa3926018854d9 100644 --- a/tests/system-test/2-query/concat_ws.py +++ b/tests/system-test/2-query/concat_ws.py @@ -36,22 +36,22 @@ class TDTestCase: concat_ws_condition.extend( ( char_col, - f"upper( {char_col} )", + # f"upper( {char_col} )", ) ) concat_ws_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) concat_ws_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) - concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) - concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) concat_ws_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) # concat_ws_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) concat_ws_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) for num_col in NUM_COL: - concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + # concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) concat_ws_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) - concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + # concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) concat_ws_condition.append('''"test1234!@#$%^&*():'> 0 " + return "" + + def __concat_ws_num(self, concat_ws_lists, num): + return [ concat_ws_lists[i] for i in range(num) ] + + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __concat_ws_check(self, tbname, num): + concat_ws_condition = self.__concat_ws_condition() + for i in range(len(concat_ws_condition) - num + 1 ): + condition = self.__concat_ws_num(concat_ws_condition[i:], num) + concat_ws_filter = f"concat_ws('_', {','.join( condition ) }) " + where_condition = self.__where_condition(condition[0]) + # group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " ) + concat_ws_group_having = self.__group_condition(concat_ws_filter, having=f"{concat_ws_filter} is not null " ) + # group_no_having= self.__group_condition(condition[0] ) + concat_ws_group_no_having= self.__group_condition(concat_ws_filter) + groups = ["", concat_ws_group_having, concat_ws_group_no_having] + + if num > 8 or num < 2 : + [tdSql.error(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + break + + tdSql.query(f"select {','.join(condition)} from {tbname} ") + rows = tdSql.queryRows + concat_ws_data = [] + for m in range(rows): + concat_ws_data.append("_".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_ws_data.append(None) + tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} ") + tdSql.checkRows(rows) + for j in range(tdSql.queryRows): + assert tdSql.getData(j, 0) in concat_ws_data + + [ tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __concat_ws_err_check(self,tbname): + sqls = [] + + for char_col in CHAR_COL: + sqls.extend( + ( + f"select concat_ws('_', {char_col} ) from {tbname} ", + f"select concat_ws('_', ceil( {char_col} )) from {tbname} ", + f"select {char_col} from {tbname} group by concat_ws('_', {char_col} ) ", + ) + ) + + sqls.extend( f"select concat_ws('_', {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat_ws('_', {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat_ws('_', {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat_ws('_', {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat_ws('_', {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select concat_ws('_', {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select concat_ws('_', {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select concat_ws('_', {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat_ws('_', {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat_ws('_', {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select concat_ws('_', {char_col}, 11) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select concat_ws('_', {num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat_ws('_', {ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat_ws('_', {bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat_ws('_', {char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select concat_ws('_', ) from {tbname} ", + f"select concat_ws('_', *) from {tbname} ", + f"select concat_ws('_', ccccccc) from {tbname} ", + f"select concat_ws('_', 111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = [ + "ct1", + "ct2", + "ct4", + ] + for tb in tbname: + for i in range(2,8): + self.__concat_ws_check(tb,i) + tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = [ + "t1", + "stb1" + ] + + for tb in tbname: + for errsql in self.__concat_ws_err_check(tb): + tdSql.error(sql=errsql) + self.__concat_ws_check(tb,1) + self.__concat_ws_check(tb,9) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/csum.py b/tests/system-test/2-query/csum.py new file mode 100644 index 0000000000000000000000000000000000000000..a331311fd2e841da5fd4f6da86ccb27834fcbc69 --- /dev/null +++ b/tests/system-test/2-query/csum.py @@ -0,0 +1,428 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def csum_query_form(self, col="c1", alias="", table_expr="t1", condition=""): + + ''' + csum function: + :param col: string, column name, required parameters; + :param alias: string, result column another name,or add other funtion; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)',or give result column another name, like 'c2' + :return: csum query statement,default: select csum(c1) from t1 + ''' + + return f"select csum({col}) {alias} from {table_expr} {condition}" + + def checkcsum(self,col="c1", alias="", table_expr="t1", condition="" ): + line = sys._getframe().f_back.f_lineno + pre_sql = self.csum_query_form( + col=col, table_expr=table_expr, condition=condition + ).replace("csum", "count") + tdSql.query(pre_sql) + + if tdSql.queryRows == 0: + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + if "order by tbname" in condition: + tdSql.error(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + return + + if "group" in condition: + + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition) + + tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}") + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + print("data is ", pre_data) + pre_csum = np.cumsum(pre_data) + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for j in range(len(pre_csum)): + print(f"case in {line}:", end='') + tdSql.checkData(pre_row+j, 1, pre_csum[j]) + pre_row += len(pre_csum) + return + elif "union" in condition: + union_sql_0 = self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + union_csum_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + union_csum_1 = tdSql.queryResult + + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if i < row_union_0: + tdSql.checkData(i, 0, union_csum_0[i][0]) + else: + tdSql.checkData(i, 0, union_csum_1[i-row_union_0][0]) + return + + else: + tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_csum = np.cumsum(pre_result)[offset_val:] + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if pre_csum[i] >1.7e+308 or pre_csum[i] < -1.7e+308: + continue + else: + tdSql.checkData(i, 0, pre_csum[i]) + + pass + + def csum_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checkcsum() + case2 = {"col": "c2"} + self.checkcsum(**case2) + case3 = {"col": "c5"} + self.checkcsum(**case3) + case4 = {"col": "c7"} + self.checkcsum(**case4) + case5 = {"col": "c8"} + self.checkcsum(**case5) + case6 = {"col": "c9"} + self.checkcsum(**case6) + + # case7~8: nested query + # case7 = {"table_expr": "(select c1 from stb1)"} + # self.checkcsum(**case7) + # case8 = {"table_expr": "(select csum(c1) c1 from stb1 group by tbname)"} + # self.checkcsum(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checkcsum(**case9) + # case10 = {"alias": ", _c0"} + # self.checkcsum(**case10) + # case11 = {"alias": ", st1"} + # self.checkcsum(**case11) + # case12 = {"alias": ", c1"} + # self.checkcsum(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checkcsum(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checkcsum(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checkcsum(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checkcsum(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checkcsum(**case17) + # case18~19: with group by + # case18 = { + # "table_expr": "t1", + # "condition": "group by c6" + # } + # self.checkcsum(**case18) + # case19 = { + # "table_expr": "stb1", + # "condition": "partition by tbname" # partition by tbname + # } + # self.checkcsum(**case19) + + # # case20~21: with order by + # case20 = {"condition": "order by ts"} + # self.checkcsum(**case20) + + # # case22: with union + # case22 = { + # "condition": "union all select csum(c1) from t2" + # } + # self.checkcsum(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checkcsum(**case23) + # case24 = { + # "table_expr": "stb1", + # "condition": "group by tbname slimit 1 soffset 1" + # } + # self.checkcsum(**case24) + + pass + + def csum_error_query(self) -> None : + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.csum_query_form(col="")) # no col + tdSql.error("csum(c1) from stb1") # no select + tdSql.error("select csum from t1") # no csum condition + tdSql.error("select csum c1 from t1") # no brackets + tdSql.error("select csum(c1) t1") # no from + tdSql.error("select csum( c1 ) from ") # no table_expr + # tdSql.error(self.csum_query_form(col="st1")) # tag col + tdSql.error(self.csum_query_form(col=1)) # col is a value + tdSql.error(self.csum_query_form(col="'c1'")) # col is a string + tdSql.error(self.csum_query_form(col=None)) # col is NULL 1 + tdSql.error(self.csum_query_form(col="NULL")) # col is NULL 2 + tdSql.error(self.csum_query_form(col='""')) # col is "" + tdSql.error(self.csum_query_form(col='c%')) # col is spercial char 1 + tdSql.error(self.csum_query_form(col='c_')) # col is spercial char 2 + tdSql.error(self.csum_query_form(col='c.')) # col is spercial char 3 + tdSql.error(self.csum_query_form(col='c3')) # timestamp col + tdSql.error(self.csum_query_form(col='ts')) # Primary key + tdSql.error(self.csum_query_form(col='avg(c1)')) # expr col + tdSql.error(self.csum_query_form(col='c6')) # bool col + tdSql.error(self.csum_query_form(col='c4')) # binary col + tdSql.error(self.csum_query_form(col='c10')) # nachr col + tdSql.error(self.csum_query_form(col='c10')) # not table_expr col + tdSql.error(self.csum_query_form(col='t1')) # tbname + tdSql.error(self.csum_query_form(col='stb1')) # stbname + tdSql.error(self.csum_query_form(col='db')) # datbasename + tdSql.error(self.csum_query_form(col=True)) # col is BOOL 1 + tdSql.error(self.csum_query_form(col='True')) # col is BOOL 2 + tdSql.error(self.csum_query_form(col='*')) # col is all col + tdSql.error("select csum[c1] from t1") # sql form error 1 + tdSql.error("select csum{c1} from t1") # sql form error 2 + tdSql.error(self.csum_query_form(col="[c1]")) # sql form error 3 + # tdSql.error(self.csum_query_form(col="c1, c2")) # sql form error 3 + # tdSql.error(self.csum_query_form(col="c1, 2")) # sql form error 3 + tdSql.error(self.csum_query_form(alias=", count(c1)")) # mix with aggregate function 1 + tdSql.error(self.csum_query_form(alias=", avg(c1)")) # mix with aggregate function 2 + tdSql.error(self.csum_query_form(alias=", min(c1)")) # mix with select function 1 + tdSql.error(self.csum_query_form(alias=", top(c1, 5)")) # mix with select function 2 + tdSql.error(self.csum_query_form(alias=", spread(c1)")) # mix with calculation function 1 + tdSql.error(self.csum_query_form(alias=", diff(c1)")) # mix with calculation function 2 + # tdSql.error(self.csum_query_form(alias=" + 2")) # mix with arithmetic 1 + tdSql.error(self.csum_query_form(alias=" + avg(c1)")) # mix with arithmetic 2 + tdSql.error(self.csum_query_form(alias=", c2")) # mix with other 1 + # tdSql.error(self.csum_query_form(table_expr="stb1")) # select stb directly + stb_join = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + tdSql.error(self.csum_query_form(**stb_join)) # stb join + interval_sql = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + tdSql.error(self.csum_query_form(**interval_sql)) # interval + group_normal_col = { + "table_expr": "t1", + "condition": "group by c6" + } + tdSql.error(self.csum_query_form(**group_normal_col)) # group by normal col + slimit_soffset_sql = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + # tdSql.error(self.csum_query_form(**slimit_soffset_sql)) + order_by_tbname_sql = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" + } + tdSql.error(self.csum_query_form(**order_by_tbname_sql)) + + pass + + def csum_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def csum_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def csum_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 2 + self.csum_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + self.csum_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + self.csum_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.csum_test_table(tbnum) + self.csum_test_data(tbnum, per_table_rows, nowtime) + self.csum_current_query() + self.csum_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.csum_current_query() + self.csum_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.csum_current_query() + self.csum_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.csum_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/diff.py b/tests/system-test/2-query/diff.py index 03b3899dc659d79ca8ae0750710fe293b5f83a3b..0d8b0de3dca8d0db11eb98e9b04defff07df741c 100644 --- a/tests/system-test/2-query/diff.py +++ b/tests/system-test/2-query/diff.py @@ -15,59 +15,51 @@ class TDTestCase: self.perfix = 'dev' self.tables = 10 - def insertData(self): - print("==============step1") - tdSql.execute( - "create table if not exists st (ts timestamp, col int) tags(dev nchar(50))") - - for i in range(self.tables): - tdSql.execute("create table %s%d using st tags(%d)" % (self.perfix, i, i)) - rows = 15 + i - for j in range(rows): - tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, i, self.ts + i * 20 * 10000 + j * 10000, j)) def run(self): tdSql.prepare() - tdSql.execute("create table ntb(ts timestamp,c1 int,c2 double,c3 float)") - tdSql.execute("insert into ntb values(now,1,1.0,10.5)(now+1s,10,-100.0,5.1)(now+10s,-1,15.1,5.0)") + tdSql.execute( + "create table ntb(ts timestamp,c1 int,c2 double,c3 float)") + tdSql.execute( + "insert into ntb values(now,1,1.0,10.5)(now+1s,10,-100.0,5.1)(now+10s,-1,15.1,5.0)") tdSql.query("select diff(c1,0) from ntb") tdSql.checkRows(2) - tdSql.checkData(0,0,9) - tdSql.checkData(1,0,-11) + tdSql.checkData(0, 0, 9) + tdSql.checkData(1, 0, -11) tdSql.query("select diff(c1,1) from ntb") tdSql.checkRows(2) - tdSql.checkData(0,0,9) - tdSql.checkData(1,0,None) - + tdSql.checkData(0, 0, 9) + tdSql.checkData(1, 0, None) + tdSql.query("select diff(c2,0) from ntb") tdSql.checkRows(2) - tdSql.checkData(0,0,-101) - tdSql.checkData(1,0,115.1) + tdSql.checkData(0, 0, -101) + tdSql.checkData(1, 0, 115.1) tdSql.query("select diff(c2,1) from ntb") tdSql.checkRows(2) - tdSql.checkData(0,0,None) - tdSql.checkData(1,0,115.1) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 115.1) tdSql.query("select diff(c3,0) from ntb") tdSql.checkRows(2) - tdSql.checkData(0,0,-5.4) - tdSql.checkData(1,0,-0.1) + tdSql.checkData(0, 0, -5.4) + tdSql.checkData(1, 0, -0.1) tdSql.query("select diff(c3,1) from ntb") tdSql.checkRows(2) - tdSql.checkData(0,0,None) - tdSql.checkData(1,0,None) - + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') tdSql.execute("create table stb_1 using stb tags('beijing')") - tdSql.execute("insert into stb_1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ', 0, 0, 0, 0)" % (self.ts - 1)) - - # diff verifacation + tdSql.execute( + "insert into stb_1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ', 0, 0, 0, 0)" % (self.ts - 1)) + + # diff verifacation tdSql.query("select diff(col1) from stb_1") tdSql.checkRows(0) - + tdSql.query("select diff(col2) from stb_1") tdSql.checkRows(0) @@ -87,38 +79,23 @@ class TDTestCase: tdSql.checkRows(0) for i in range(self.rowNum): - tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - - # tdSql.error("select diff(ts) from stb") + tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + tdSql.error("select diff(ts) from stb") tdSql.error("select diff(ts) from stb_1") - # tdSql.error("select diff(col7) from stb") - - # tdSql.error("select diff(col8) from stb") + + # tdSql.error("select diff(col7) from stb") + + tdSql.error("select diff(col8) from stb") tdSql.error("select diff(col8) from stb_1") - # tdSql.error("select diff(col9) from stb") + tdSql.error("select diff(col9) from stb") tdSql.error("select diff(col9) from stb_1") tdSql.error("select diff(col11) from stb_1") tdSql.error("select diff(col12) from stb_1") tdSql.error("select diff(col13) from stb_1") tdSql.error("select diff(col14) from stb_1") - - tdSql.query("select ts,diff(col1),ts from stb_1") - tdSql.checkRows(11) - tdSql.checkData(0, 0, "2018-09-17 09:00:00.000") - tdSql.checkData(1, 0, "2018-09-17 09:00:00.000") - tdSql.checkData(1, 2, "2018-09-17 09:00:00.000") - tdSql.checkData(9, 0, "2018-09-17 09:00:00.009") - tdSql.checkData(9, 2, "2018-09-17 09:00:00.009") - - # tdSql.query("select ts,diff(col1),ts from stb group by tbname") - # tdSql.checkRows(10) - # tdSql.checkData(0, 0, "2018-09-17 09:00:00.000") - # tdSql.checkData(0, 1, "2018-09-17 09:00:00.000") - # tdSql.checkData(0, 3, "2018-09-17 09:00:00.000") - # tdSql.checkData(9, 0, "2018-09-17 09:00:00.009") - # tdSql.checkData(9, 1, "2018-09-17 09:00:00.009") - # tdSql.checkData(9, 3, "2018-09-17 09:00:00.009") + tdSql.error("select ts,diff(col1),ts from stb_1") tdSql.query("select diff(col1) from stb_1") tdSql.checkRows(10) @@ -137,10 +114,27 @@ class TDTestCase: tdSql.query("select diff(col6) from stb_1") tdSql.checkRows(10) - + + tdSql.execute('''create table stb1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table stb1_1 using stb tags('shanghai')") + + for i in range(self.rowNum): + tdSql.execute("insert into stb1_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + for i in range(self.rowNum): + tdSql.execute("insert into stb1_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts - i-1, i-1, i-1, i-1, i-1, -i - 0.1, -i - 0.1, -i % 2, i - 1, i - 1, i + 1, i + 1, i + 1, i + 1)) + tdSql.query("select diff(col1,0) from stb1_1") + tdSql.checkRows(19) + tdSql.query("select diff(col1,1) from stb1_1") + tdSql.checkRows(19) + tdSql.checkData(0,0,None) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) + tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/elapsed.py b/tests/system-test/2-query/elapsed.py new file mode 100644 index 0000000000000000000000000000000000000000..017090128d40f66eb7f395c75c41cafff2934a47 --- /dev/null +++ b/tests/system-test/2-query/elapsed.py @@ -0,0 +1,1604 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record + self.num = 10 + + def caseDescription(self): + + ''' + case1 : [TD-11804] test case for elapsed function : + + this test case is for aggregate function elapsed , elapsed function can only used for the timestamp primary key column (ts) , + it has two input parameters, the first parameter is necessary, basic SQL as follow: + + =================================================================================================================================== + SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; + =================================================================================================================================== + + elapsed function can acting on ordinary tables and super tables , notice that this function is related to the timeline. + If it acts on a super table , it must be group by tbname . by the way ,this function support nested query. + + The scenarios covered by the test cases are as follows: + + ==================================================================================================================================== + + case: select * from table|stable[group by tbname]|regular_table + + case:select elapsed(ts) from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + case:select elapsed(ts) , elapsed(ts,unit_time1)*regular_num1 , elapsed(ts,unit_time1)+regular_num2 from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + //mixup with all functions only once query (it's different with nest query) + case:select elapsed(ts), count(*), avg(col), twa(col), irate(col), sum(col), stddev(col), leastsquares(col, 1, 1),min(col), max(col), first(col), last(col), percentile(col, 20), apercentile(col, 30), last_row(col), spread(col)from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + //mixup with ordinary col + case:select ts ,elapsed(ts)*10 ,col+5 from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + //nest query + case:select elapsed(ts) from (select elapsed(ts), count(*), avg(col), twa(col), irate(col), sum(col), stddev(col), leastsquares(col, 1, 1),min(col), max(col), first(col), last(col), percentile(col, 20), apercentile(col, 30), last_row(col), spread(col)from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]) where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + //clause about filter condition + case:select elapsed(ts) from table|stable[group by tbname] where [ts|col|tag >|<|=|>=|<=|=|<>|!= value] | [between ... and ...] |[in] |[is null|not null] interval (unit_time) ; + case:select elapsed(ts) from table|stable[group by tbname] where clause1 and clause 2 and clause3 interval (unit_time) ; + + //JOIN query + case:select elapsed(ts) from TABLE1 as tb1 , TABLE2 as tb2 where join_condition [TABLE1 and TABLE2 can be stable|table|sub_table|empty_table] + + //UNION ALL query + case:select elapsed(ts) from TABLE1 union all select elapsed(ts) from TABLE2 [TABLE1 and TABLE2 can be stable|table|sub_table|empty_table] + + // Window aggregation + + case:select elapsed(ts) from t1 where clause session(ts, time_units) ; + case:select elapsed(ts) from t1 where clause state_window(regular_nums); + + // Continuous query + case:create table select elapsed(ts) ,avg(col) from (select elapsed(ts) ts_inter ,avg(col) col from stable|table interval (unit_time) [fill(LINEAR,NEXT,PREV,VALUE,NULL)][group by tbname]) interval (unit_time) [fill(LINEAR,NEXT,PREV,VALUE,NULL) sliding(unit_time_windows); + + ======================================================================================================================================== + + this test case notice successful execution and correctness of results. + + ''' + return + + def prepare_data(self): + + tdLog.info (" ====================================== prepare data ==================================================") + + tdSql.execute('drop database if exists testdb ;') + tdSql.execute('create database testdb keep 36500;') + tdSql.execute('use testdb;') + + tdSql.execute('create stable stable_1(ts timestamp ,tscol timestamp, q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, q_float float ,\ + q_double double , bin_chars binary(20)) tags(loc nchar(20) ,ind int,tstag timestamp);') + tdSql.execute('create stable stable_2(ts timestamp ,tscol timestamp, q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, q_float float ,\ + q_double double, bin_chars binary(20) ) tags(loc nchar(20),ind int,tstag timestamp);') + # create empty stables + tdSql.execute('create stable stable_empty(ts timestamp ,tscol timestamp, q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, q_float float ,\ + q_double double, bin_chars binary(20) ) tags(loc nchar(20),ind int,tstag timestamp);') + tdSql.execute('create stable stable_sub_empty(ts timestamp ,tscol timestamp, q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, q_float float ,\ + q_double double, bin_chars binary(20) ) tags(loc nchar(20),ind int,tstag timestamp);') + + # create empty sub_talbes and regular tables + tdSql.execute('create table sub_empty_1 using stable_sub_empty tags("sub_empty_1",3,"2015-01-01 00:02:00")') + tdSql.execute('create table sub_empty_2 using stable_sub_empty tags("sub_empty_2",3,"2015-01-01 00:02:00")') + tdSql.execute('create table regular_empty (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , bin_chars binary(20)) ;') + + tdSql.execute('create table sub_table1_1 using stable_1 tags("sub1_1",1,"2015-01-01 00:00:00")') + tdSql.execute('create table sub_table1_2 using stable_1 tags("sub1_2",2,"2015-01-01 00:01:00")') + tdSql.execute('create table sub_table1_3 using stable_1 tags("sub1_3",3,"2015-01-01 00:02:00")') + + tdSql.execute('create table sub_table2_1 using stable_2 tags("sub2_1",1,"2015-01-01 00:00:00")') + tdSql.execute('create table sub_table2_2 using stable_2 tags("sub2_2",2,"2015-01-01 00:01:00")') + tdSql.execute('create table sub_table2_3 using stable_2 tags("sub2_3",3,"2015-01-01 00:02:00")') + + tdSql.execute('create table regular_table_1 (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double, bin_chars binary(20)) ;') + tdSql.execute('create table regular_table_2 (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , bin_chars binary(20)) ;') + tdSql.execute('create table regular_table_3 (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , bin_chars binary(20)) ;') + + tablenames = ["sub_table1_1","sub_table1_2","sub_table1_3","sub_table2_1","sub_table2_2","sub_table2_3","regular_table_1","regular_table_2","regular_table_3"] + + tdLog.info("insert into records ") + + for tablename in tablenames: + + for i in range(self.num): + sql= 'insert into %s values(%d, %d,%d, %d, %d, %d, %f, %f, "%s")' % (tablename,self.ts + i*10000, self.ts + i*10,2147483647-i, 9223372036854775807-i, 32767-i, 127-i, i, i,("bintest"+str(i))) + print(sql) + tdSql.execute(sql) + + tdLog.info("=============================================data prepared done!=========================") + + def abnormal_common_test(self): + + tdLog.info (" ====================================== elapsed illeagal params ==================================================") + + tablenames = ["sub_table1_1","sub_table1_2","sub_table1_3","sub_table2_1","sub_table2_2","sub_table2_3","regular_table_1","regular_table_2","regular_table_3"] + + abnormal_list = ["()","(NULL)","(*)","(abc)","( , )","(NULL,*)","( ,NULL)","(%)","(+)","(*,)","(*, /)","(ts,*)" "(ts,tbname*10)","(ts,tagname)", + "(ts,2d+3m-2s,NULL)","(ts+1d,10s)","(ts+10d,NULL)" ,"(ts,now -1m%1d)","(ts+10d)","(ts+10d,_c0)","(ts+10d,)","(ts,%)","(ts, , m)","(ts,abc)","(ts,/)","(ts,*)","(ts,1s,100)", + "(ts,1s,abc)","(ts,1s,_c0)","(ts,1s,*)","(ts,1s,NULL)","(ts,,_c0)","(ts,tbname,ts)","(ts,0,tbname)","('2021-11-18 00:00:10')","('2021-11-18 00:00:10', 1s)", + "('2021-11-18T00:00:10+0800', '1s')","('2021-11-18T00:00:10Z', '1s')","('2021-11-18T00:00:10+0800', 10000000d,)","('ts', ,2021-11-18T00:00:10+0800, )"] + + for tablename in tablenames: + for abnormal_param in abnormal_list: + + if tablename.startswith("stable"): + basic_sql= "select elapsed" + abnormal_param + " from " + tablename + " group by tbname ,ind order by tbname;" #stables + else: + basic_sql= "select elapsed" + abnormal_param + " from " + tablename + ";" # regular table + tdSql.error(basic_sql) + + def abnormal_use_test(self): + + tdLog.info (" ====================================== elapsed use abnormal ==================================================") + + sqls_list = ["select elapsed(ts) from regular_empty group by tbname,ind order by desc; ", + "select elapsed(ts) from regular_empty group by tbname,ind order by desc; ", + "select elapsed(ts) from regular_table_1 group by tbname,ind order by desc; ", + "select elapsed(ts) from sub_table1_1 group by tbname,ind order by desc; ", + "select elapsed(ts) from sub_table1_1 group by tbname,ind order by desc; ", + # "select elapsed(ts,10s) from stable_empty group by ts order by ts;", + "select elapsed(ts,10s) from stable_1 group by ind order by ts;", + "select elapsed(ts,10s) from stable_2 group by tstag order by ts;", + "select elapsed(ts,10s) from stable_1 group by tbname,tstag,tscol order by ts;", + "select elapsed(ts,10s),ts from stable_1 group by tbname ,ind order by ts;", + "select ts,elapsed(ts,10s),tscol*100 from stable_1 group by tbname ,ind order by ts;", + "select elapsed(ts) from stable_1 group by tstag order by ts;", + "select elapsed(ts) from sub_empty_1 group by tbname,ind ,tscol order by ts desc;", + "select tbname, tscol,elapsed(ts) from sub_table1_1 group by tbname ,ind order by ts desc;", + "select elapsed(tscol) from sub_table1_1 order by ts desc;", + "select elapsed(tstag) from sub_table1_1 order by ts desc;", + "select elapsed(ind) from sub_table1_1 order by ts desc;", + "select elapsed(tscol) from sub_empty_1 order by ts desc;", + "select elapsed(tstag) from sub_empty_1 order by ts desc;", + "select elapsed(ind) from sub_table1_1 order by ts desc;", + "select elapsed(ind,10s) from sub_table1_1 order by ts desc;", + "select elapsed(tscol,10s) from sub_table1_1 order by ts desc;", + "select elapsed(tstag,10s) from sub_table1_1 order by ts desc;", + "select elapsed(q_int,10s) from sub_table1_1 order by ts desc;", + "select elapsed(loc,10s) from sub_table1_1 order by ts desc;", + "select elapsed(q_bigint,10s) from sub_table1_1 order by ts desc;", + "select elapsed(bin_chars,10s) from sub_table1_1 order by ts desc;"] + for sql in sqls_list : + tdSql.error(sql) + + def query_filter(self): + + tdLog.info (" ====================================== elapsed query filter ==================================================") + + for i in range(self.num): + ts_start_time = self.ts + i*10000 + ts_col_start_time = self.ts + i*10 + ts_tag_time = "2015-01-01 00:01:00" + ts_end_time = self.ts + (self.num-1-i)*10000 + ts_col_end_time = self.ts + (self.num-1-i)*10 + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts >= %d group by tbname " %(ts_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num -i-1)) + tdSql.checkData(1,0,float(self.num -i-1)) + tdSql.checkData(2,0,float(self.num -i-1)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts >= %d " %(ts_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-1)) + + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts >= %d and tscol >= %d and tstag='2015-01-01 00:01:00'group by tbname " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-1)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts >= %d and tscol >= %d " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-1)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts >= %d and tscol > %d and tstag='2015-01-01 00:01:00' group by tbname" %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts >= %d and tscol > %d " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts > %d and tscol > %d and tstag < '2015-01-01 00:01:00' group by tbname " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts > %d and tscol > %d " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts > %d and tscol <= %d and tstag < '2015-01-01 00:01:00' group by tbname" %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(0) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts > %d and tscol <= %d " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(0) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts < %d and tscol <= %d and tstag < '2015-01-01 00:01:00' group by tbname" %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts < %d and tscol <= %d " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts < %d and tscol <= %d group by tbname " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num - i - 2)) + tdSql.checkData(1,0,float(self.num - i - 2)) + tdSql.checkData(2,0,float(self.num - i - 2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts < %d and tscol <= %d " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num - i - 2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts = %d and tscol < %d group by tbname " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + tdSql.checkRows(0) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts = %d and tscol < %d " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + tdSql.checkRows(0) + + filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint != %d and tscol < %d group by tbname " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num -i-2)) + tdSql.checkData(1,0,float(self.num -i-2)) + tdSql.checkData(2,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint != %d and tscol < %d " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint != %d and tscol <= %d group by tbname " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num - i - 1)) + tdSql.checkData(1,0,float(self.num - i - 1)) + tdSql.checkData(2,0,float(self.num - i - 1)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint != %d and tscol <= %d " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num - i - 1)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint <> %d and tscol < %d group by tbname " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num -i-2)) + tdSql.checkData(1,0,float(self.num -i-2)) + tdSql.checkData(2,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint <> %d and tscol < %d " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint <> %d and tscol <= %d group by tbname " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num - i - 1)) + tdSql.checkData(1,0,float(self.num - i - 1)) + tdSql.checkData(2,0,float(self.num - i - 1)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint <> %d and tscol <= %d " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num - i - 1)) + + # filter between and + tdSql.query("select elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' ") + tdSql.checkData(0,0,2) + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and \ + q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,2) + tdSql.checkData(1,0,2) + tdSql.checkData(2,0,2) + + # filter in and or + tdSql.query("select elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' ") + tdSql.checkData(0,0,2) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,2) + tdSql.checkData(1,0,2) + tdSql.checkData(2,0,2) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint in (125,126,127) and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,2) + tdSql.checkData(1,0,2) + tdSql.checkData(2,0,2) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars in ('bintest0','bintest1') and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,1) + tdSql.checkData(1,0,1) + tdSql.checkData(2,0,1) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars in ('bintest0','bintest1') and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,1) + tdSql.checkData(1,0,1) + tdSql.checkData(2,0,1) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars like 'bintest_' and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars like 'bintest_' and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars is not null and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars is null and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars match '^b' and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkRows(3) + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars nmatch '^a' and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkRows(3) + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars ='bintest1' or bin_chars ='bintest2' and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkRows(3) + tdSql.query("select elapsed(ts,10s) from stable_1 where (ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000') or (ts between '2015-01-01 00:01:00.000' and '2015-01-01 00:02:00.000') group by tbname; ") + tdSql.checkRows(3) + tdSql.checkData(0,0,9) + tdSql.checkData(1,0,9) + tdSql.checkData(2,0,9) + + def query_interval(self): + + tdLog.info (" ====================================== elapsed interval sliding fill ==================================================") + + # empty interval + tdSql.query("select max(q_int)*10 from stable_empty where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev);") + tdSql.checkRows(0) + tdSql.query("select max(q_int)*10 from sub_empty_2 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev);") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s)*10 from stable_empty where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev) group by tbname;") + tdSql.checkRows(0) + tdSql.query("select elapsed(ts,10s)*10 from sub_empty_2 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev);") + tdSql.checkRows(0) + + for i in range(self.num): + ts_start_time = self.ts + i*10000 + ts_col_start_time = self.ts + i*10 + ts_tag_time = "2015-01-01 00:01:00" + ts_end_time = self.ts + (self.num-1-i)*10000 + ts_col_end_time = self.ts + (self.num-1-i)*10 + + + # only interval + interval_sql = "select elapsed(ts,10s) from stable_1 where ts <=%d interval(10s) group by tbname " %(ts_start_time) + tdSql.query(interval_sql) + tdSql.checkRows(3*(i+1)) + + interval_sql = "select elapsed(ts,10s) from sub_table1_1 where ts <=%d interval(10s) " %(ts_start_time) + tdSql.query(interval_sql) + tdSql.checkRows(i+1) + for x in range(i+1): + if x == i: + tdSql.checkData(x,1,0) + else : + tdSql.checkData(x,1,1) + + # interval and fill , fill_type = ["NULL","value,100","prev","next","linear"] + + # interval (10s) and time range is outer records + + tdSql.query("select elapsed(ts,10s)*10 from stable_empty where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev) group by tbname;") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s)*10 from sub_empty_2 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev);") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(59,1,0) + tdSql.checkData(60,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(next) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(10,1,None) + tdSql.checkData(59,1,None) + tdSql.checkData(60,1,10) + tdSql.checkData(61,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(linear) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(10,1,None) + tdSql.checkData(59,1,None) + tdSql.checkData(60,1,10) + tdSql.checkData(61,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(NULL) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(10,1,None) + tdSql.checkData(59,1,None) + tdSql.checkData(60,1,10) + tdSql.checkData(61,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(value ,2) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(10,1,2) + tdSql.checkData(59,1,2) + tdSql.checkData(60,1,10) + tdSql.checkData(61,1,10) + + # interval (20s) and time range is outer records + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(prev) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,10) + tdSql.checkData(29,1,10) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(next) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,None) + tdSql.checkData(29,1,None) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(linear) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,None) + tdSql.checkData(29,1,None) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,None) + tdSql.checkData(29,1,None) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(value ,2) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,2) + tdSql.checkData(29,1,2) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + # interval (20s) and time range is in records + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(prev) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(next) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(linear) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(value ,2 ) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2014-12-31 23:59:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(18) + tdSql.checkData(0,1,None) + tdSql.checkData(2,1,None) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,None) + tdSql.checkData(8,1,None) + tdSql.checkData(9,1,20) + + # interval sliding + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2014-12-31 23:59:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) sliding(20s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(18) + tdSql.checkData(0,1,None) + tdSql.checkData(2,1,None) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,None) + tdSql.checkData(8,1,None) + tdSql.checkData(9,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2014-12-31 23:59:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) sliding(10s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(39) + tdSql.checkData(0,1,None) + tdSql.checkData(2,1,None) + tdSql.checkData(6,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(12,1,0) + tdSql.checkData(13,1,None) + tdSql.checkData(15,1,None) + tdSql.checkData(19,1,10) + tdSql.checkData(20,1,20) + tdSql.checkData(25,1,0) + + def query_mix_common(self): + + tdLog.info (" ======================================elapsed mixup with common col, it will not support =======================================") + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and ind =1 group by tbname; ") + tdSql.checkRows(1) + tdSql.checkData(0,0,6) + + tdSql.query("select elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.checkRows(1) + tdSql.checkData(0,0,6) + + tdSql.error("select ts,elapsed(ts,10s) from sub_empty_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.error("select ts,elapsed(ts,10s) from stable_empty where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' group by tbname; ") + + tdSql.error("select ts,elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.error("select ts,elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' group by tbname; ") + + tdSql.error("select q_int,elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.error("select q_int,elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' group by tbname; ") + + tdSql.error("select ts,q_int,elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.error("select ts,q_int,elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' group by tbname; ") + + def query_mix_Aggregate(self): + + tdLog.info (" ====================================== elapsed mixup with aggregate ==================================================") + + tdSql.query("select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) from sub_table1_1 ; ") + + data = tdSql.getResult("select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) from sub_table1_1 ; ") + + querys = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)","LEASTSQUARES(q_int,0,1)", "elapsed(ts,10s)"] + + for index , query in enumerate(querys): + sql = "select %s from sub_table1_1 " %(query) + tdSql.query(sql) + tdSql.checkData(0,0,data[0][index]) + + tdSql.query("select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) from stable_1 group by tbname; ") + + # Arithmetic with elapsed for common table + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)", "elapsed(ts,10s)"] + + for operator in operators: + + query_datas=[] + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + query_data = tdSql.getResult("select %s from sub_table1_1;"%query) + + query_datas.append(query_data[0][0]) + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from sub_table1_1;" + + tdSql.query(sql_common) + results= query_datas[0] + if operator == "+": + for data in query_datas[1:]: + results += data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "-": + for data in query_datas[1:]: + results -= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "*": + for data in query_datas[1:]: + results *= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "/": + for data in query_datas[1:]: + results /= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "%": + for data in query_datas[1:]: + results %= data + tdSql.checkData(0,0,results) + + + # Arithmetic with elapsed for super table + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)", "elapsed(ts,10s)"] + + for operator in operators: + + query_datas=[] + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + query_data = tdSql.getResult("select %s from stable_1 group by tbname;"%query) + + query_datas.append(query_data[0][0]) + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from stable_1 group by tbname;" + + tdSql.query(sql_common) + results= query_datas[0] + if operator == "+": + for data in query_datas[1:]: + results += data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + + results= query_datas[0] + if operator == "-": + for data in query_datas[1:]: + results -= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "*": + for data in query_datas[1:]: + results *= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "/": + for data in query_datas[1:]: + results /= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "%": + for data in query_datas[1:]: + results %= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + def query_mix_select(self): + + tdLog.info (" ====================================== elapsed mixup with select function =================================================") + + querys = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(*)","last(q_int)","last(*)","PERCENTILE(q_int,10)","APERCENTILE(q_int,10)","elapsed(ts,10s)"] + + + querys_mix = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(q_int)","last(q_int)","PERCENTILE(q_int,10)","APERCENTILE(q_int,10)","elapsed(ts,10s)"] + + tdSql.query("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),PERCENTILE(q_int,10),APERCENTILE(q_int,10) ,elapsed(ts,10s) from sub_table1_1 ; ") + + data = tdSql.getResult("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),PERCENTILE(q_int,10),APERCENTILE(q_int,10) ,elapsed(ts,10s) from sub_table1_1 ; ") + + for index , query in enumerate(querys_mix): + sql = "select %s from sub_table1_1 " %(query) + tdSql.query(sql) + tdSql.checkData(0,0,data[0][index]) + + tdSql.query("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),APERCENTILE(q_int,10) ,elapsed(ts,10s) from stable_1 group by tbname ; ") + + data = tdSql.getResult("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),APERCENTILE(q_int,10) ,elapsed(ts,10s) from stable_1 group by tbname ; ") + + querys_mix = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(q_int)","last(q_int)","APERCENTILE(q_int,10)","elapsed(ts,10s)"] + + for index , query in enumerate(querys_mix): + sql = "select %s from stable_1 group by tbname " %(query) + tdSql.query(sql) + tdSql.checkData(0,0,data[0][index]) + tdSql.checkData(1,0,data[0][index]) + tdSql.checkData(2,0,data[0][index]) + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = querys_mix + + for operator in operators: + + query_datas=[] + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + query_data = tdSql.getResult("select %s from sub_table1_1;"%query) + + query_datas.append(query_data[0][0]) + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from sub_table1_1;" + + tdSql.query(sql_common) + results= query_datas[0] + if operator == "+": + for data in query_datas[1:]: + results += data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "-": + for data in query_datas[1:]: + results -= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "*": + for data in query_datas[1:]: + results *= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "/": + for data in query_datas[1:]: + results /= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "%": + for data in query_datas[1:]: + results %= data + tdSql.checkData(0,0,results) + + + # Arithmetic with elapsed for super table + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = querys_mix + + for operator in operators: + + query_datas=[] + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + query_data = tdSql.getResult("select %s from stable_1 group by tbname;"%query) + + query_datas.append(query_data[0][0]) + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from stable_1 group by tbname;" + + tdSql.query(sql_common) + results= query_datas[0] + if operator == "+": + for data in query_datas[1:]: + results += data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + + results= query_datas[0] + if operator == "-": + for data in query_datas[1:]: + results -= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "*": + for data in query_datas[1:]: + results *= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "/": + for data in query_datas[1:]: + results /= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "%": + for data in query_datas[1:]: + results %= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + def query_mix_compute(self): + + tdLog.info (" ====================================== elapsed mixup with compute function =================================================") + + querys = ["diff(q_int)","DERIVATIVE(q_int,1s,1)","spread(ts)","spread(q_tinyint)","ceil(q_float)","floor(q_float)","round(q_float)"] + + for index , query in enumerate(querys): + + sql1 = "select elapsed(ts,10s),%s from sub_table1_1 " %(query) + sql2 = "select elapsed(ts,10s),%s from stable_1 group by tbname" %(query) + if query in ["diff(q_int)","DERIVATIVE(q_int,1s,1)","ceil(q_float)","floor(q_float)","round(q_float)"]: + tdSql.error(sql1) + tdSql.error(sql2) + continue + tdSql.query(sql1) + tdSql.query(sql2) + + # only support mixup with spread + + sql = "select spread(ts)*10,spread(q_tinyint)-10,elapsed(ts,10s) from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" + tdSql.execute(sql) + + data = tdSql.getResult(sql) + + sql = "select spread(ts)*10,spread(q_tinyint)-10,elapsed(ts,10s) from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" + tdSql.execute(sql) + + querys_mix = ["spread(ts)","spread(q_tinyint)-10","elapsed(ts,10s)"] + + for index , query in enumerate(querys_mix): + sql = "select %s from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; " %(query) + tdSql.query(sql) + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = querys_mix + + for operator in operators: + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" + + tdSql.query(sql_common) + + for index , query in enumerate(querys_mix): + sql = "select %s from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; " %(query) + tdSql.query(sql) + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = querys_mix + + for operator in operators: + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" + + tdSql.query(sql_common) + + def query_mix_arithmetic(self): + + tdLog.info (" ====================================== elapsed mixup with arithmetic =================================================") + + tdSql.execute("select elapsed(ts,10s)+1 ,elapsed(ts,10s)-2,elapsed(ts,10s)*3,elapsed(ts,10s)/4,elapsed(ts,10s)%5 from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; ") + tdSql.execute("select elapsed(ts,10s)+1 ,elapsed(ts,10s)-2,elapsed(ts,10s)*3,elapsed(ts,10s)/4,elapsed(ts,10s)%5 from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; ") + + # queries = ["elapsed(ts,10s)+1" ,"elapsed(ts,10s)-2","elapsed(ts,10s)*3","elapsed(ts,10s)/4","elapsed(ts,10s)%5" ] + + # for index ,query in enumerate(queries): + # sql = "select %s from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev) ;" % (query) + # data = tdSql.getResult(sql) + # tdSql.query("select elapsed(ts,10s)+1 ,elapsed(ts,10s)-2,elapsed(ts,10s)*3,elapsed(ts,10s)/4,elapsed(ts,10s)%5 from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev) ; ") + # tdSql.checkData(0,index+1,data[0][1]) + + def query_with_join(self): + + tdLog.info (" ====================================== elapsed mixup with join =================================================") + + tdSql.error("select elapsed(ts,10s) from stable_empty TABLE1 , stable_empty TABLE2 where TABLE1.ts =TABLE2.ts; ") + tdSql.error("select elapsed(ts,10s) from stable_empty TABLE1 , stable_empty TABLE2 where TABLE1.ts =TABLE2.ts group by tbname; ") + + tdSql.execute("select elapsed(ts,10s) from sub_empty_1 TABLE1 , sub_empty_2 TABLE2 where TABLE1.ts =TABLE2.ts; ") + tdSql.error("select elapsed(ts,10s) from stable_1 TABLE1 , stable_2 TABLE2 where TABLE1.ts =TABLE2.ts and TABLE1.ind =TABLE2.ind; ") + tdSql.error("select elapsed(ts,10s) from stable_1 TABLE1 , stable_2 TABLE2 where TABLE1.ts =TABLE2.ts and TABLE1.ind =TABLE2.ind group by tbname,ind; ") # join not support group by + + tdSql.error("select elapsed(ts,10s) from sub_empty_1 TABLE1 , stable_2 TABLE2 where TABLE1.ts =TABLE2.ts and TABLE1.ind =TABLE2.ind ; ") + tdSql.execute("select elapsed(ts,10s) from sub_empty_1 TABLE1 , sub_empty_2 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + + tdSql.query("select elapsed(ts,10s) from sub_table1_1 TABLE1 , sub_table1_2 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkData(0,0,9) + + tdSql.query("select elapsed(ts,10s) from sub_empty_1 TABLE1 , sub_table1_2 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s) from sub_empty_1 TABLE1 , regular_empty TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s) from sub_empty_1 TABLE1 , regular_table_1 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s) from sub_table1_3 TABLE1 , regular_table_1 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query("select elapsed(ts,10s) from regular_table_1 ; ") + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + def query_with_union(self): + + tdLog.info (" ====================================== elapsed mixup with union all =================================================") + + # union all with empty + + tdSql.query("select elapsed(ts,10s) from regular_table_1 union all select elapsed(ts,10s) from regular_table_2;") + + tdSql.query("select elapsed(ts,10s) from regular_table_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev) union all \ + select elapsed(ts,10s) from regular_table_2 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);") + tdSql.checkRows(1200) + tdSql.checkData(0,1,0.1) + tdSql.checkData(500,1,0) + + tdSql.query("select elapsed(ts,10s) from sub_empty_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev) union all \ + select elapsed(ts,10s) from regular_table_2 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);") + tdSql.checkRows(600) + tdSql.checkData(0,1,0.1) + tdSql.checkData(500,0,0) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from sub_empty_2;') + tdSql.checkRows(0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 union all select elapsed(ts,10s) from sub_empty_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from regular_table_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from sub_table1_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 union all select elapsed(ts,10s) from sub_empty_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from regular_table_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.error('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from stable_sub_empty group by tbname;') + + tdSql.error('select elapsed(ts,10s) from regular_table_1 union all select elapsed(ts,10s) from stable_sub_empty group by tbname;') + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev) union all select elapsed(ts,10s) from sub_empty_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev);') + tdSql.checkRows(0) + + tdSql.error('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from stable_empty group by tbname;') + + tdSql.error('select elapsed(ts,10s) from sub_empty_1 interval(1s) union all select elapsed(ts,10s) from stable_empty interval(1s) group by tbname;') + + # tdSql.error('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev) union all select elapsed(ts,10s) from stable_empty where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev) group by tbname;') + + tdSql.query("select elapsed(ts,10s) from stable_empty group by tbname union all select elapsed(ts,10s) from stable_empty group by tbname ;") + tdSql.checkRows(0) + + # case : TD-12229 + tdSql.query("select elapsed(ts,10s) from stable_empty group by tbname union all select elapsed(ts,10s) from stable_1 group by tbname ;") + tdSql.checkRows(3) + + tdSql.query("select elapsed(ts,10s) from stable_1 group by tbname union all select elapsed(ts,10s) from stable_1 group by tbname ;") + tdSql.checkRows(6) + tdSql.checkData(0,0,9) + tdSql.checkData(5,0,9) + + tdSql.query("select elapsed(ts,10s) from stable_1 group by tbname union all select elapsed(ts,10s) from stable_2 group by tbname ;") + tdSql.checkRows(6) + tdSql.checkData(0,0,9) + tdSql.checkData(5,0,9) + + tdSql.query('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname union all\ + select elapsed(ts,10s) from stable_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname ;') + tdSql.checkRows(360) + tdSql.checkData(0,1,1) + tdSql.checkData(50,1,0) + + #case : TD-12229 + tdSql.query('select elapsed(ts,10s) from stable_empty group by tbname union all select elapsed(ts,10s) from stable_2 group by tbname ;') + tdSql.checkRows(3) + + tdSql.query('select elapsed(ts,10s) from stable_1 group by tbname union all select elapsed(ts,10s) from stable_empty group by tbname ;') + tdSql.checkRows(3) + + + tdSql.query('select elapsed(ts,10s) from stable_empty where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname union all\ + select elapsed(ts,10s) from stable_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname ;') + tdSql.checkRows(180) + + tdSql.query('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname union all\ + select elapsed(ts,10s) from stable_empty where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname ;') + tdSql.checkRows(180) + + # union all with sub table and regular table + + # sub_table with sub_table + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from sub_table2_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from regular_table_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from regular_table_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from regular_table_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(60) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(60) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + # stable with stable + + tdSql.query('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname union all\ + select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname;') + tdSql.checkRows(360) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_2 interval(10s) union all select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev);') + tdSql.checkRows(10) + tdSql.checkData(0,1,1) + tdSql.checkData(9,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_2 interval(10s) union all select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(70) + tdSql.checkData(0,1,1) + tdSql.checkData(9,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_2 interval(10s) order by ts desc union all select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) order by ts asc;') + tdSql.checkRows(70) + tdSql.checkData(0,1,0) + tdSql.checkData(1,1,1) + tdSql.checkData(9,1,1) + + tdSql.query('select elapsed(ts,10s) from stable_1 group by tbname, ind order by ts desc union all select elapsed(ts,10s) from stable_2 group by tbname, ind order by ts asc ;') + tdSql.checkRows(6) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from stable_1 group by tbname, ind order by ts desc union all select elapsed(ts,10s) from stable_1 group by tbname, ind order by ts asc ;') + tdSql.checkRows(6) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from stable_1 interval(10s) group by tbname,ind order by ts desc union all select elapsed(ts,10s) from stable_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname,ind order by ts asc ;') + tdSql.checkRows(210) + tdSql.checkData(0,1,0) + tdSql.checkData(1,1,1) + tdSql.checkData(9,1,1) + + tdSql.query('select elapsed(ts,10s) from stable_2 interval(10s) group by tbname,ind order by ts desc union all select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname,ind order by ts asc ;') + tdSql.checkRows(210) + tdSql.checkData(0,1,0) + tdSql.checkData(1,1,1) + tdSql.checkData(9,1,1) + + tdSql.query('select elapsed(ts,10s) from stable_1 interval(10s) group by tbname,ind order by ts desc union all select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname,ind order by ts asc ;') + tdSql.checkRows(210) + tdSql.checkData(0,1,0) + tdSql.checkData(1,1,1) + tdSql.checkData(9,1,1) + + def query_nest(self): + + tdLog.info (" ====================================== elapsed query for nest =================================================") + + # ===============================================outer nest============================================ + + # regular table + + # ts can't be used at outer query + + tdSql.query("select elapsed(ts,10s) from (select ts from regular_table_1 );") + + # case : TD-12164 + + tdSql.error("select elapsed(ts,10s) from (select qint ts from regular_table_1 );") + tdSql.error("select elapsed(tbname ,10s) from (select qint tbname from regular_table_1 );") + tdSql.error("select elapsed(tsc ,1s) from (select q_int tsc from regular_table_1) ;") + tdSql.error("select elapsed(tsv ,1s) from (select elapsed(ts,1s) tsv from regular_table_1);") + tdSql.error("select elapsed(ts ,1s) from (select elapsed(ts,1s) ts from regular_table_1);") + # # bug fix + # tdSql.error("select elapsed(tsc ,1s) from (select tscol tsc from regular_table_1) ;") + + # case TD-12276 + # tdSql.error("select elapsed(ts,10s) from (select ts,tbname from regular_table_1 order by ts asc );") + + # tdSql.error("select elapsed(ts,10s) from (select ts,tbname from regular_table_1 order by ts desc );") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from regular_table_1 order by ts ) interval(1s);") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,q_int,tbname from regular_table_1 order by ts ) interval(1s);") + + # sub table + + tdSql.query("select elapsed(ts,10s) from (select ts from sub_table1_1 );") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from sub_table1_1 order by ts ) interval(1s);") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,q_int,tbname from sub_table1_1 order by ts ) interval(1s);") + + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname,top(q_int,3) from sub_table1_1 ) interval(10s);") + + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname,bottom(q_int,3) from sub_table1_1 ) interval(10s);") + + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname from sub_table1_1 ) interval(10s);") + + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname from sub_table1_1 ) interval(10s);") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,count(*),tbname from sub_table1_1 order by ts ) interval(1s);") + + querys = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)","LEASTSQUARES(q_int,0,1)","elapsed(ts,10s)"] + + for query in querys: + sql1 = "select elapsed(ts,10s) from (select %s from regular_table_1 order by ts ) interval(1s); " % query + sql2 = "select elapsed(ts,10s) from (select ts , tbname ,%s from regular_table_1 order by ts ) interval(1s); " % query + sql3 = "select elapsed(ts,10s) from (select ts , tbname ,%s from stable_1 group by tbname, ind order by ts ) interval(1s); " % query + sql4 = "select elapsed(ts,10s) from (select %s from sub_table2_1 order by ts ) interval(1s); " % query + sql5 = "select elapsed(ts,10s) from (select ts , tbname ,%s from sub_table2_1 order by ts ) interval(1s); " % query + + tdSql.error(sql1) + tdSql.error(sql2) + tdSql.error(sql3) + tdSql.error(sql4) + tdSql.error(sql5) + + + # case TD-12164 + tdSql.error( "select elapsed(ts00 ,1s) from (select elapsed(ts,1s) ts00 from regular_table_1) ; " ) + tdSql.error( "select elapsed(ts ,1s) from (select elapsed(ts,1s) ts from regular_table_1) ; " ) + + tdSql.error( "select elapsed(ts00 ,1s) from (select elapsed(ts,1s) ts00 from stable_1 group by tbname ) ; " ) + tdSql.error( "select elapsed(ts ,1s) from (select elapsed(ts,1s) ts from stable_1 group by tbname) ; " ) + + + # stable + + tdSql.error("select elapsed(ts,10s) from (select ts from stable_1 ) group by tbname ;") + + tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from stable_1 group by tbname order by ts ) interval(1s) group by tbname;") + + tdSql.error("select elapsed(ts,10s) from (select ts ,q_int,tbname from stable_1 order by ts ) interval(1s) group by tbname;") + + # mixup with aggregate + + querys = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(*)","last(q_int)","last(*)","top(q_double,1)", + "bottom(q_float,1)","PERCENTILE(q_int,10)","APERCENTILE(q_int,10)" ,"elapsed(ts,10s)"] + + for index , query in enumerate(querys): + + sql1 = "select elapsed(ts,10s) from (select %s from sub_table1_1) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(10s) fill(prev) ; " %(query) + sql2 = "select elapsed(ts,10s) from (select %s from stable_1 ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(10s) fill(prev) group by tbname; " %(query) + sql3 = "select elapsed(ts,10s) from (select %s from stable_1 group by tbname) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(10s) fill(prev) group by tbname; " %(query) + + if query in ["interp(q_int)" ]: + # print(sql1 ) + # print(sql2) + tdSql.query(sql1) + tdSql.error(sql2) + else: + tdSql.error(sql1) + tdSql.error(sql2) + tdSql.error(sql3) + + tdSql.error("select elapsed(ts,10s) from (select ts,tbname from regular_table_1 order by ts ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);") + + tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from regular_table_1 order by ts ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);") + + # ===============================================inner nest============================================ + + # sub table + + tdSql.query("select data from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from sub_table1_1 ); ") + tdSql.checkData(0,0,9) + + # tdSql.query("select data from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from sub_table1_1 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + # tdSql.checkData(0,0,0.1) + + tdSql.query("select * from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 ); ") + tdSql.checkData(0,5,9) + + # tdSql.query("select * from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + # tdSql.checkData(0,0,0.1) + + tdSql.query("select max(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 ); ") + tdSql.checkData(0,0,9) + + # tdSql.query("select max(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(1) + # tdSql.checkData(0,0,0.1) + + # tdSql.query("select max(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from sub_empty_2 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(0) + + # tdSql.query("select max(data),min(data),avg(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(1) + + # tdSql.query("select ceil(data),floor(data),round(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select spread(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(1) + + # tdSql.query("select diff(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(599) + + # tdSql.query("select DERIVATIVE(data ,1s ,1) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(598) + + # tdSql.query("select ceil(data)from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select floor(data)from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select round(data)from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select data*10+2 from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select data*10+2 from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + def query_session_windows(self): + + # case TD-12344 + # session not support stable + tdSql.error('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts ,10s) group by tbname,ind order by ts asc ') + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 session(ts,1w) ; ') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + tdSql.query('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1w) ; ') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.error('select elapsed(ts,10s) from ( select * from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ') + + tdSql.error('select elapsed(ts,10s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ') + + tdSql.error('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(20s) fill (next) session(ts,1w) ; ') + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1w) ; ') + tdSql.checkRows(0) + + # windows state + # not support stable + + tdSql.error('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" state_window(q_int) group by tbname,ind order by ts asc ') + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 state_window(q_int) ; ') + tdSql.checkRows(10) + tdSql.checkData(0,0,0) + tdSql.query('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" state_window(q_int) ; ') + tdSql.checkRows(10) + tdSql.checkData(0,0,0) + + # tdSql.error('select elapsed(ts,10s) from ( select * from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") state_window(q_int) ; ') + + # tdSql.error('select elapsed(ts,10s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") state_window(q_int) ; ') + + # tdSql.error('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(20s) fill (next) state_window(q_int) ; ') + + # tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" state_window(q_int); ') + # tdSql.checkRows(0) + + + def continuous_query(self): + tdSql.error('create table elapsed_t as select elapsed(ts) from sub_table1_1 interval(1m) sliding(30s);') + tdSql.error('create table elapsed_tb as select elapsed(ts) from stable_1 interval(1m) sliding(30s) group by tbname;') + tdSql.error('create table elapsed_tc as select elapsed(ts) from stable_1 interval(10s) sliding(5s) interval(1m) sliding(30s) group by tbname;') + + def query_precision(self): + def generate_data(precision="ms"): + + tdSql.execute("create database if not exists db_%s precision '%s';" %(precision, precision)) + tdSql.execute("use db_%s;" %precision) + tdSql.execute("create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision) + tdSql.execute("create table db_%s.tb1 using st tags(1);"%precision) + tdSql.execute("create table db_%s.tb2 using st tags(2);"%precision) + + if precision == "ms": + start_ts = self.ts + step = 10000 + elif precision == "us": + start_ts = self.ts*1000 + step = 10000000 + elif precision == "ns": + start_ts = self.ts*1000000 + step = 10000000000 + else: + pass + + for i in range(10): + + sql1 = "insert into db_%s.tb1 values (%d,%d)"%(precision ,start_ts+i*step,i) + sql2 = "insert into db_%s.tb1 values (%d,%d)"%(precision, start_ts+i*step,i) + tdSql.execute(sql1) + tdSql.execute(sql2) + + time_units = ["10s","10a","10u","10b"] + + precision_list = ["ms","us","ns"] + for pres in precision_list: + generate_data(pres) + + for index,unit in enumerate(time_units): + + if pres == "ms": + if unit in ["10u","10b"]: + tdSql.error("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + pass + else: + tdSql.query("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + elif pres == "us" and unit in ["10b"]: + if unit in ["10b"]: + tdSql.error("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + pass + else: + tdSql.query("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + else: + + tdSql.query("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + basic_result = 9 + tdSql.checkData(0,0,basic_result*pow(1000,index)) + + def run(self): + tdSql.prepare() + self.prepare_data() + self.abnormal_common_test() + self.abnormal_use_test() + self.query_filter() + # self.query_interval() + self.query_mix_common() + self.query_mix_Aggregate() + self.query_mix_select() + self.query_mix_compute() + self.query_mix_arithmetic() + # self.query_with_join() + # self.query_with_union() + self.query_nest() + self.query_session_windows() + self.continuous_query() + self.query_precision() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/system-test/2-query/first.py b/tests/system-test/2-query/first.py new file mode 100644 index 0000000000000000000000000000000000000000..7227d1afb5e22f68af90fb9d2192eb7a4a088c96 --- /dev/null +++ b/tests/system-test/2-query/first.py @@ -0,0 +1,152 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1)) + + # first verifacation + # bug TD-15957 + tdSql.query("select first(*) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 1, None) + + tdSql.query("select first(col1) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col2) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col3) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col4) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col11) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col12) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col13) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col14) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col5) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col6) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col7) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col8) from test1") + tdSql.checkRows(0) + + tdSql.query("select first(col9) from test1") + tdSql.checkRows(0) + + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + tdSql.query("select first(*) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + + tdSql.query("select first(col1) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col2) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col3) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col4) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col11) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col12) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col13) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col14) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select first(col5) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.1) + + tdSql.query("select first(col6) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.1) + + tdSql.query("select first(col7) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, False) + + tdSql.query("select first(col8) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'taosdata1') + + tdSql.query("select first(col9) from test1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, '涛思数据1') + + + tdSql.query("select first(*),last(*) from test1 where ts < 23 interval(1s)") + tdSql.checkRows(0) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/function_diff.py b/tests/system-test/2-query/function_diff.py new file mode 100644 index 0000000000000000000000000000000000000000..325bd2bc8ebd79f3e58daf6690492dc8ca329dda --- /dev/null +++ b/tests/system-test/2-query/function_diff.py @@ -0,0 +1,432 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def diff_query_form(self, col="c1", alias="", table_expr="t1", condition=""): + + ''' + diff function: + :param col: string, column name, required parameters; + :param alias: string, result column another name,or add other funtion; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)',or give result column another name, like 'c2' + :return: diff query statement,default: select diff(c1) from t1 + ''' + + return f"select diff({col}) {alias} from {table_expr} {condition}" + + def checkdiff(self,col="c1", alias="", table_expr="t1", condition="" ): + line = sys._getframe().f_back.f_lineno + pre_sql = self.diff_query_form( + col=col, table_expr=table_expr, condition=condition + ).replace("diff", "count") + tdSql.query(pre_sql) + + if tdSql.queryRows == 0: + tdSql.query(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + if "order by tbname" in condition: + tdSql.error(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + return + + if "group" in condition: + + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition) + + tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}") + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_diff = np.diff(pre_data) + # trans precision for data + tdSql.query(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for j in range(len(pre_diff)): + print(f"case in {line}:", end='') + if isinstance(pre_diff[j] , float) : + pass + else: + tdSql.checkData(pre_row+j, 1, pre_diff[j] ) + pre_row += len(pre_diff) + return + elif "union" in condition: + union_sql_0 = self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + union_diff_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + union_diff_1 = tdSql.queryResult + + tdSql.query(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if i < row_union_0: + tdSql.checkData(i, 0, union_diff_0[i][0]) + else: + tdSql.checkData(i, 0, union_diff_1[i-row_union_0][0]) + return + + else: + tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_diff = np.diff(pre_result)[offset_val:] + tdSql.query(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if isinstance(pre_diff[i] , float ): + pass + else: + tdSql.checkData(i, 0, pre_diff[i]) + + pass + + def diff_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checkdiff() + case2 = {"col": "c2"} + self.checkdiff(**case2) + case3 = {"col": "c5"} + self.checkdiff(**case3) + case4 = {"col": "c7"} + self.checkdiff(**case4) + case5 = {"col": "c8"} + self.checkdiff(**case5) + case6 = {"col": "c9"} + self.checkdiff(**case6) + + # case7~8: nested query + # case7 = {"table_expr": "(select c1 from stb1)"} + # self.checkdiff(**case7) + # case8 = {"table_expr": "(select diff(c1) c1 from stb1 group by tbname)"} + # self.checkdiff(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checkdiff(**case9) + # case10 = {"alias": ", _c0"} + # self.checkdiff(**case10) + # case11 = {"alias": ", st1"} + # self.checkdiff(**case11) + # case12 = {"alias": ", c1"} + # self.checkdiff(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checkdiff(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checkdiff(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checkdiff(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checkdiff(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checkdiff(**case17) + # case18~19: with group by + # case18 = { + # "table_expr": "t1", + # "condition": "group by c6" + # } + # self.checkdiff(**case18) + # case19 = { + # "table_expr": "stb1", + # "condition": "partition by tbname" # partition by tbname + # } + # self.checkdiff(**case19) + + # # case20~21: with order by + # case20 = {"condition": "order by ts"} + # self.checkdiff(**case20) + + # # case22: with union + # case22 = { + # "condition": "union all select diff(c1) from t2" + # } + # self.checkdiff(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checkdiff(**case23) + # case24 = { + # "table_expr": "stb1", + # "condition": "group by tbname slimit 1 soffset 1" + # } + # self.checkdiff(**case24) + + pass + + def diff_error_query(self) -> None : + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.diff_query_form(col="")) # no col + tdSql.error("diff(c1) from stb1") # no select + tdSql.error("select diff from t1") # no diff condition + tdSql.error("select diff c1 from t1") # no brackets + tdSql.error("select diff(c1) t1") # no from + tdSql.error("select diff( c1 ) from ") # no table_expr + # tdSql.error(self.diff_query_form(col="st1")) # tag col + tdSql.query("select diff(st1) from t1 ") + # tdSql.error(self.diff_query_form(col=1)) # col is a value + tdSql.error(self.diff_query_form(col="'c1'")) # col is a string + tdSql.error(self.diff_query_form(col=None)) # col is NULL 1 + tdSql.error(self.diff_query_form(col="NULL")) # col is NULL 2 + tdSql.error(self.diff_query_form(col='""')) # col is "" + tdSql.error(self.diff_query_form(col='c%')) # col is spercial char 1 + tdSql.error(self.diff_query_form(col='c_')) # col is spercial char 2 + tdSql.error(self.diff_query_form(col='c.')) # col is spercial char 3 + tdSql.error(self.diff_query_form(col='c3')) # timestamp col + tdSql.error(self.diff_query_form(col='ts')) # Primary key + tdSql.error(self.diff_query_form(col='avg(c1)')) # expr col + # tdSql.error(self.diff_query_form(col='c6')) # bool col + tdSql.query("select diff(c6) from t1") + tdSql.error(self.diff_query_form(col='c4')) # binary col + tdSql.error(self.diff_query_form(col='c10')) # nachr col + tdSql.error(self.diff_query_form(col='c10')) # not table_expr col + tdSql.error(self.diff_query_form(col='t1')) # tbname + tdSql.error(self.diff_query_form(col='stb1')) # stbname + tdSql.error(self.diff_query_form(col='db')) # datbasename + # tdSql.error(self.diff_query_form(col=True)) # col is BOOL 1 + # tdSql.error(self.diff_query_form(col='True')) # col is BOOL 2 + tdSql.error(self.diff_query_form(col='*')) # col is all col + tdSql.error("select diff[c1] from t1") # sql form error 1 + tdSql.error("select diff{c1} from t1") # sql form error 2 + tdSql.error(self.diff_query_form(col="[c1]")) # sql form error 3 + # tdSql.error(self.diff_query_form(col="c1, c2")) # sql form error 3 + # tdSql.error(self.diff_query_form(col="c1, 2")) # sql form error 3 + tdSql.error(self.diff_query_form(alias=", count(c1)")) # mix with aggregate function 1 + tdSql.error(self.diff_query_form(alias=", avg(c1)")) # mix with aggregate function 2 + tdSql.error(self.diff_query_form(alias=", min(c1)")) # mix with select function 1 + tdSql.error(self.diff_query_form(alias=", top(c1, 5)")) # mix with select function 2 + tdSql.error(self.diff_query_form(alias=", spread(c1)")) # mix with calculation function 1 + tdSql.error(self.diff_query_form(alias=", diff(c1)")) # mix with calculation function 2 + # tdSql.error(self.diff_query_form(alias=" + 2")) # mix with arithmetic 1 + tdSql.error(self.diff_query_form(alias=" + avg(c1)")) # mix with arithmetic 2 + tdSql.error(self.diff_query_form(alias=", c2")) # mix with other 1 + # tdSql.error(self.diff_query_form(table_expr="stb1")) # select stb directly + stb_join = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + tdSql.error(self.diff_query_form(**stb_join)) # stb join + interval_sql = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + tdSql.error(self.diff_query_form(**interval_sql)) # interval + group_normal_col = { + "table_expr": "t1", + "condition": "group by c6" + } + tdSql.error(self.diff_query_form(**group_normal_col)) # group by normal col + slimit_soffset_sql = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + # tdSql.error(self.diff_query_form(**slimit_soffset_sql)) + order_by_tbname_sql = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" + } + tdSql.error(self.diff_query_form(**order_by_tbname_sql)) + + pass + + def diff_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def diff_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def diff_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 10 + self.diff_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.diff_current_query() + self.diff_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.diff_current_query() + self.diff_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + self.diff_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + self.diff_current_query() + self.diff_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + self.diff_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + self.diff_current_query() + self.diff_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.diff_test_table(tbnum) + self.diff_test_data(tbnum, per_table_rows, nowtime) + self.diff_current_query() + self.diff_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.diff_current_query() + self.diff_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.diff_current_query() + self.diff_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.diff_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/function_stateduration.py b/tests/system-test/2-query/function_stateduration.py new file mode 100644 index 0000000000000000000000000000000000000000..b25a658469a7b4041d678ba2f7946c4ac22156bb --- /dev/null +++ b/tests/system-test/2-query/function_stateduration.py @@ -0,0 +1,431 @@ +from math import floor +from random import randint, random +from numpy import equal +import taos +import sys +import datetime +import inspect + +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def prepare_datas(self): + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + def test_errors(self): + error_sql_lists = [ + # "select stateduration(c1,'GT',5,1s) from t1" + "select stateduration from t1", + "select stateduration(123--123)==1 from t1", + "select stateduration(123,123) from t1", + "select stateduration(c1,ts) from t1", + "select stateduration(c1,c1,ts) from t1", + "select stateduration(c1 ,c2 ) from t1", + "select stateduration(c1 ,NULL) from t1", + #"select stateduration(c1 ,'NULL',1.0,1s) from t1", + "select stateduration(c1 ,'GT','1',1s) from t1", + "select stateduration(c1 ,'GT','tbname',1s) from t1", + "select stateduration(c1 ,'GT','*',1s) from t1", + "select stateduration(c1 ,'GT',ts,1s) from t1", + "select stateduration(c1 ,'GT',max(c1),1s) from t1", + "select stateduration(abs(c1) ,'GT',1,1s) from t1", + "select stateduration(c1+2 ,'GT',1,1s) from t1", + "select stateduration(c1 ,'GT',1,1u) from t1", + "select stateduration(c1 ,'GT',1,now) from t1", + "select stateduration(c1 ,'GT','1',1s) from t1", + "select stateduration(c1 ,'GT','1',True) from t1", + "select stateduration(stateduration(c1) ab from t1)", + "select stateduration(c1 ,'GT',1,,)int from t1", + "select stateduration('c1','GT',1) from t1", + "select stateduration('c1','GT', 1 , NULL) from t1", + "select stateduration('c1','GT', 1 , '') from t1", + "select stateduration('c1','GT', 1 ,c%) from t1", + "select stateduration(c1 ,'GT',1,t1) from t1", + "select stateduration(c1 ,'GT',1,True) from t1", + "select stateduration(c1 ,'GT',1,1s) , count(c1) from t1", + "select stateduration(c1 ,'GT',1,1s) , avg(c1) from t1", + "select stateduration(c1 ,'GT',1,1s) , min(c1) from t1", + "select stateduration(c1 ,'GT',1,1s) , spread(c1) from t1", + "select stateduration(c1 ,'GT',1,1s) , diff(c1) from t1", + "select stateduration(c1 ,'GT',1,1s) , abs(c1) from t1", + "select stateduration(c1 ,'GT',1,1s) , c1 from t1", + ] + for error_sql in error_sql_lists: + tdSql.error(error_sql) + pass + + def support_types(self): + other_no_value_types = [ + "select stateduration(ts,'GT',1,1s) from t1" , + "select stateduration(c7,'GT',1,1s) from t1", + "select stateduration(c8,'GT',1,1s) from t1", + "select stateduration(c9,'GT',1,1s) from t1", + "select stateduration(ts,'GT',1,1s) from ct1" , + "select stateduration(c7,'GT',1,1s) from ct1", + "select stateduration(c8,'GT',1,1s) from ct1", + "select stateduration(c9,'GT',1,1s) from ct1", + "select stateduration(ts,'GT',1,1s) from ct3" , + "select stateduration(c7,'GT',1,1s) from ct3", + "select stateduration(c8,'GT',1,1s) from ct3", + "select stateduration(c9,'GT',1,1s) from ct3", + "select stateduration(ts,'GT',1,1s) from ct4" , + "select stateduration(c7,'GT',1,1s) from ct4", + "select stateduration(c8,'GT',1,1s) from ct4", + "select stateduration(c9,'GT',1,1s) from ct4", + "select stateduration(ts,'GT',1,1s) from stb1 partition by tbname" , + "select stateduration(c7,'GT',1,1s) from stb1 partition by tbname", + "select stateduration(c8,'GT',1,1s) from stb1 partition by tbname", + "select stateduration(c9,'GT',1,1s) from stb1 partition by tbname" + ] + + for type_sql in other_no_value_types: + tdSql.error(type_sql) + tdLog.info("support type ok , sql is : %s"%type_sql) + + type_sql_lists = [ + "select stateduration(c1,'GT',1,1s) from t1", + "select stateduration(c2,'GT',1,1s) from t1", + "select stateduration(c3,'GT',1,1s) from t1", + "select stateduration(c4,'GT',1,1s) from t1", + "select stateduration(c5,'GT',1,1s) from t1", + "select stateduration(c6,'GT',1,1s) from t1", + + "select stateduration(c1,'GT',1,1s) from ct1", + "select stateduration(c2,'GT',1,1s) from ct1", + "select stateduration(c3,'GT',1,1s) from ct1", + "select stateduration(c4,'GT',1,1s) from ct1", + "select stateduration(c5,'GT',1,1s) from ct1", + "select stateduration(c6,'GT',1,1s) from ct1", + + "select stateduration(c1,'GT',1,1s) from ct3", + "select stateduration(c2,'GT',1,1s) from ct3", + "select stateduration(c3,'GT',1,1s) from ct3", + "select stateduration(c4,'GT',1,1s) from ct3", + "select stateduration(c5,'GT',1,1s) from ct3", + "select stateduration(c6,'GT',1,1s) from ct3", + + "select stateduration(c1,'GT',1,1s) from stb1 partition by tbname", + "select stateduration(c2,'GT',1,1s) from stb1 partition by tbname", + "select stateduration(c3,'GT',1,1s) from stb1 partition by tbname", + "select stateduration(c4,'GT',1,1s) from stb1 partition by tbname", + "select stateduration(c5,'GT',1,1s) from stb1 partition by tbname", + "select stateduration(c6,'GT',1,1s) from stb1 partition by tbname", + + "select stateduration(c6,'GT',1,1s) as alisb from stb1 partition by tbname", + "select stateduration(c6,'GT',1,1s) alisb from stb1 partition by tbname", + ] + + for type_sql in type_sql_lists: + tdSql.query(type_sql) + + def support_opers(self): + oper_lists = ['LT','lt','Lt','lT','GT','gt','Gt','gT','LE','le','Le','lE','GE','ge','Ge','gE','NE','ne','Ne','nE','EQ','eq','Eq','eQ'] + + oper_errors = [",","*","NULL","tbname","ts","sum","_c0"] + + for oper in oper_lists: + tdSql.query(f"select stateduration(c1 ,'{oper}',1,1s) as col from t1") + tdSql.checkRows(12) + + for oper in oper_errors: + tdSql.error(f"select stateduration(c1 ,'{oper}',1,1s) as col from t1") + + + def basic_stateduration_function(self): + + # basic query + tdSql.query("select c1 from ct3") + tdSql.checkRows(0) + tdSql.query("select c1 from t1") + tdSql.checkRows(12) + tdSql.query("select c1 from stb1") + tdSql.checkRows(25) + + # used for empty table , ct3 is empty + tdSql.query("select stateduration(c6,'GT',1,1s) from ct3") + tdSql.checkRows(0) + tdSql.query("select stateduration(c6,'GT',1,1s) from ct3") + tdSql.checkRows(0) + tdSql.query("select stateduration(c6,'GT',1,1s) from ct3") + tdSql.checkRows(0) + tdSql.query("select stateduration(c6,'GT',1,1s) from ct3") + tdSql.checkRows(0) + tdSql.query("select stateduration(c6,'GT',1,1s) from ct3") + tdSql.checkRows(0) + tdSql.query("select stateduration(c6,'GT',1,1s) from ct3") + + # will support _rowts mix with + # tdSql.query("select (c6,'GT',1,1s),_rowts from ct3") + + # auto check for t1 table + # used for regular table + tdSql.query("select stateduration(c6,'GT',1,1s) from t1") + + # unique with super tags + + tdSql.query("select stateduration(c6,'GT',1,1s) from ct1") + tdSql.checkRows(13) + + tdSql.query("select stateduration(c6,'GT',1,1s) from ct4") + tdSql.checkRows(12) + + tdSql.error("select stateduration(c6,'GT',1,1s),tbname from ct1") + tdSql.error("select stateduration(c6,'GT',1,1s),t1 from ct1") + + # unique with common col + tdSql.error("select stateduration(c6,'GT',1,1s) ,ts from ct1") + tdSql.error("select stateduration(c6,'GT',1,1s) ,c1 from ct1") + + # unique with scalar function + tdSql.error("select stateduration(c6,'GT',1,1s) ,abs(c1) from ct1") + tdSql.error("select stateduration(c6,'GT',1,1s) , unique(c2) from ct1") + tdSql.error("select stateduration(c6,'GT',1,1s) , abs(c2)+2 from ct1") + + + # unique with aggregate function + tdSql.error("select stateduration(c6,'GT',1,1s) ,sum(c1) from ct1") + tdSql.error("select stateduration(c6,'GT',1,1s) ,max(c1) from ct1") + tdSql.error("select stateduration(c6,'GT',1,1s) ,csum(c1) from ct1") + tdSql.error("select stateduration(c6,'GT',1,1s) ,count(c1) from ct1") + + # unique with filter where + tdSql.query("select stateduration(c6,'GT',1,1s) from ct4 where c1 is null") + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query("select stateduration(c1,'GT',1,1s) from t1 where c1 >2 ") + tdSql.checkData(0, 0, 0) + tdSql.checkData(1, 0, 10886404) + tdSql.checkData(2, 0, 23500810) + tdSql.checkData(4, 0, 57456020) + tdSql.checkData(5, 0, 60393624) + + tdSql.query("select stateduration(c2,'GT',1,1s) from t1 where c2 between 0 and 99999") + tdSql.checkData(0, 0, 0) + tdSql.checkData(1, 0, 6134400) + tdSql.checkData(6, 0, -1) + + + # unique with union all + tdSql.query("select stateduration(c1,'GT',1,1s) from ct4 union all select stateduration(c1,'GT',1,1s) from ct1") + tdSql.checkRows(25) + tdSql.query("select stateduration(c1,'GT',1,1s) from ct4 union all select distinct(c1) from ct4") + tdSql.checkRows(22) + + # unique with join + # prepare join datas with same ts + + tdSql.execute(" use db ") + tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)") + tdSql.execute(" create table tb1 using st1 tags(1)") + tdSql.execute(" create table tb2 using st1 tags(2)") + + tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)") + tdSql.execute(" create table ttb1 using st2 tags(1)") + tdSql.execute(" create table ttb2 using st2 tags(2)") + + start_ts = 1622369635000 # 2021-05-30 18:13:55 + + for i in range(10): + ts_value = start_ts+i*1000 + tdSql.execute(f" insert into tb1 values({ts_value} , {i})") + tdSql.execute(f" insert into tb2 values({ts_value} , {i})") + + tdSql.execute(f" insert into ttb1 values({ts_value} , {i})") + tdSql.execute(f" insert into ttb2 values({ts_value} , {i})") + + tdSql.query("select stateduration(tb1.num,'GT',1,1s) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.checkRows(10) + tdSql.checkData(0,0,-1) + tdSql.checkData(1,0,-1) + tdSql.checkData(2,0,0) + tdSql.checkData(9,0,7) + + tdSql.query("select stateduration(tb1.num,'GT',1,1s) from tb1, tb2 where tb1.ts=tb2.ts union all select stateduration(tb2.num,'GT',1,1s) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.checkRows(20) + + # nest query + # tdSql.query("select unique(c1) from (select c1 from ct1)") + tdSql.query("select c1 from (select stateduration(c1,'GT',1,1s) c1 from t1)") + tdSql.checkRows(12) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, -1) + tdSql.checkData(2, 0, 0) + tdSql.checkData(10, 0, 63072035) + + tdSql.query("select sum(c1) from (select stateduration(c1,'GT',1,1d) c1 from t1)") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 2893) + + tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select stateduration(c1,'GT',1,1s) c1 from ct1)") + tdSql.checkRows(2) + + tdSql.query("select 1-abs(c1) from (select stateduration(c1,'GT',1,1s) c1 from t1)") + tdSql.checkRows(12) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 0.000000000) + tdSql.checkData(3, 0, -86404.000000000) + + + # bug for stable + #partition by tbname + # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.checkRows(21) + + # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.checkRows(21) + + # group by + tdSql.error("select stateduration(c1,'GT',1,1s) from ct1 group by c1") + tdSql.error("select stateduration(c1,'GT',1,1s) from ct1 group by tbname") + + # super table + + def check_unit_time(self): + tdSql.execute(" use db ") + tdSql.error("select stateduration(c1,'GT',1,1b) from ct1") + tdSql.error("select stateduration(c1,'GT',1,1u) from ct1") + tdSql.query("select stateduration(c1,'GT',1,1s) from t1") + tdSql.checkData(10,0,63072035) + tdSql.query("select stateduration(c1,'GT',1,1000s) from t1") + tdSql.checkData(10,0,int(63072035/1000)) + tdSql.query("select stateduration(c1,'GT',1,1m) from t1") + tdSql.checkData(10,0,int(63072035/60)) + tdSql.query("select stateduration(c1,'GT',1,1h) from t1") + tdSql.checkData(10,0,int(63072035/60/60)) + tdSql.query("select stateduration(c1,'GT',1,1d) from t1") + tdSql.checkData(10,0,int(63072035/60/24/60)) + tdSql.query("select stateduration(c1,'GT',1,1w) from t1") + tdSql.checkData(10,0,int(63072035/60/7/24/60)) + + + def check_boundary_values(self): + + tdSql.execute("drop database if exists bound_test") + tdSql.execute("create database if not exists bound_test") + tdSql.execute("use bound_test") + tdSql.execute( + "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + ) + tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute( + f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.error( + f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.query("select stateduration(c1,'GT',1,1s) from sub1_bound") + tdSql.checkRows(5) + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table ==============") + + self.prepare_datas() + + tdLog.printNoPrefix("==========step2:test errors ==============") + + self.test_errors() + + tdLog.printNoPrefix("==========step3:support types ============") + + self.support_types() + + tdLog.printNoPrefix("==========step4:support opers ============") + self.support_opers() + + tdLog.printNoPrefix("==========step5: stateduration basic query ============") + + self.basic_stateduration_function() + + tdLog.printNoPrefix("==========step6: stateduration boundary query ============") + + self.check_boundary_values() + + tdLog.printNoPrefix("==========step6: stateduration unit time test ============") + + self.check_unit_time() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/histogram.py b/tests/system-test/2-query/histogram.py new file mode 100644 index 0000000000000000000000000000000000000000..2c203bdceb1c6f180fc3e653aa1dd6c62512d0e2 --- /dev/null +++ b/tests/system-test/2-query/histogram.py @@ -0,0 +1,3554 @@ +import datetime + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + +ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ] + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __query_condition(self,tbname): + query_condition = [f"cast({col} as bigint)" for col in ALL_COL] + for num_col in NUM_COL: + query_condition.extend( + ( + f"{tbname}.{num_col}", + f"abs( {tbname}.{num_col} )", + f"acos( {tbname}.{num_col} )", + f"asin( {tbname}.{num_col} )", + f"atan( {tbname}.{num_col} )", + f"avg( {tbname}.{num_col} )", + f"ceil( {tbname}.{num_col} )", + f"cos( {tbname}.{num_col} )", + f"count( {tbname}.{num_col} )", + f"floor( {tbname}.{num_col} )", + f"log( {tbname}.{num_col}, {tbname}.{num_col})", + f"max( {tbname}.{num_col} )", + f"min( {tbname}.{num_col} )", + f"pow( {tbname}.{num_col}, 2)", + f"round( {tbname}.{num_col} )", + f"sum( {tbname}.{num_col} )", + f"sin( {tbname}.{num_col} )", + f"sqrt( {tbname}.{num_col} )", + f"tan( {tbname}.{num_col} )", + f"cast( {tbname}.{num_col} as timestamp)", + ) + ) + [ query_condition.append(f"{num_col} + {any_col}") for any_col in ALL_COL ] + for char_col in CHAR_COL: + query_condition.extend( + ( + f"count({tbname}.{char_col})", + f"sum(cast({tbname}.{char_col}) as bigint)", + f"max(cast({tbname}.{char_col}) as bigint)", + f"min(cast({tbname}.{char_col}) as bigint)", + f"avg(cast({tbname}.{char_col}) as bigint)", + ) + ) + query_condition.extend( + ( + 1010, + ) + ) + + return query_condition + + def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): + table_reference = tb_list[0] + join_condition = table_reference + join = "inner join" if INNER else "join" + for i in range(len(tb_list[1:])): + join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + + return join_condition + + def __where_condition(self, col=None, tbname=None, query_conditon=None): + if query_conditon and isinstance(query_conditon, str): + if query_conditon.startswith("count"): + query_conditon = query_conditon[6:-1] + elif query_conditon.startswith("max"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("sum"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("min"): + query_conditon = query_conditon[4:-1] + + if query_conditon: + return f" where {query_conditon} is not null" + if col in NUM_COL: + return f" where abs( {tbname}.{col} ) >= 0" + if col in CHAR_COL: + return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " + if col in BOOLEAN_COL: + return f" where {tbname}.{col} in (false, true) " + if col in TS_TYPE_COL or col in PRIMARY_COL: + return f" where cast( {tbname}.{col} as binary(16) ) is not null " + + return "" + + def __group_condition(self, col, having = None): + if isinstance(col, str): + if col.startswith("count"): + col = col[6:-1] + elif col.startswith("max"): + col = col[4:-1] + elif col.startswith("sum"): + col = col[4:-1] + elif col.startswith("min"): + col = col[4:-1] + return f" group by {col} having {having}" if having else f" group by {col} " + + def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]: + return + return f"select spread({select_clause}) from {from_clause} {where_condition} {group_condition}" + + @property + def __tb_list(self): + return [ + "ct1", + "ct4", + "t1", + "ct2", + "stb1", + ] + + def sql_list(self): + sqls = [] + __no_join_tblist = self.__tb_list + for tb in __no_join_tblist: + select_claus_list = self.__query_condition(tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition(col=select_claus) + where_claus = self.__where_condition(query_conditon=select_claus) + having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") + sqls.extend( + ( + self.__single_sql(select_claus, tb, where_claus, having_claus), + self.__single_sql(select_claus, tb,), + self.__single_sql(select_claus, tb, where_condition=where_claus), + self.__single_sql(select_claus, tb, group_condition=group_claus), + ) + ) + + # return filter(None, sqls) + return list(filter(None, sqls)) + + def __get_type(self, col): + if tdSql.cursor.istype(col, "BOOL"): + return "BOOL" + if tdSql.cursor.istype(col, "INT"): + return "INT" + if tdSql.cursor.istype(col, "BIGINT"): + return "BIGINT" + if tdSql.cursor.istype(col, "TINYINT"): + return "TINYINT" + if tdSql.cursor.istype(col, "SMALLINT"): + return "SMALLINT" + if tdSql.cursor.istype(col, "FLOAT"): + return "FLOAT" + if tdSql.cursor.istype(col, "DOUBLE"): + return "DOUBLE" + if tdSql.cursor.istype(col, "BINARY"): + return "BINARY" + if tdSql.cursor.istype(col, "NCHAR"): + return "NCHAR" + if tdSql.cursor.istype(col, "TIMESTAMP"): + return "TIMESTAMP" + if tdSql.cursor.istype(col, "JSON"): + return "JSON" + if tdSql.cursor.istype(col, "TINYINT UNSIGNED"): + return "TINYINT UNSIGNED" + if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"): + return "SMALLINT UNSIGNED" + if tdSql.cursor.istype(col, "INT UNSIGNED"): + return "INT UNSIGNED" + if tdSql.cursor.istype(col, "BIGINT UNSIGNED"): + return "BIGINT UNSIGNED" + + def spread_check(self): + sqls = self.sql_list() + tdLog.printNoPrefix("===step 1: curent case, must return query OK") + for i in range(len(sqls)): + tdLog.info(f"sql: {sqls[i]}") + tdSql.query(sqls[i]) + + def __test_current(self): + tdSql.query("select spread(ts) from ct1") + tdSql.checkRows(1) + tdSql.query("select spread(c1) from ct2") + tdSql.checkRows(1) + tdSql.query("select spread(c1) from ct4 group by c1") + tdSql.checkRows(self.rows + 3) + tdSql.query("select spread(c1) from ct4 group by c7") + tdSql.checkRows(3) + tdSql.query("select spread(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts") + tdSql.checkRows(1) + + self.spread_check() + + def __test_error(self): + + tdLog.printNoPrefix("===step 0: err case, must return err") + tdSql.error( "select spread() from ct1" ) + tdSql.error( "select spread(1, 2) from ct2" ) + tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from ct4" ) + tdSql.error( f"select spread({BOOLEAN_COL[0]}) from t1" ) + tdSql.error( f"select spread({CHAR_COL[0]}) from stb1" ) + + # tdSql.error( ''' select spread(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']) + # from ct1 + # where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null + # group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] + # having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' ) + # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") + + def all_test(self): + self.__test_error() + self.__test_current() + + def __create_tb(self): + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) + + + + + +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-11222]: Histogram function + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + tdSql.execute('use db') + + #Prepare data + tdSql.execute("create stable stb (col_timestamp timestamp, col_tinyint tinyint, col_smallint smallint, col_int int, col_bigint bigint, col_float float, col_double double, col_bool bool, col_binary binary(10), col_nchar nchar(10)) \ + tags(tag_timestamp timestamp, tag_tinyint tinyint, tag_smallint smallint, tag_int int, tag_bigint bigint, tag_float float, tag_double double, tag_bool bool, tag_binary binary(10), tag_nchar nchar(10));") + tdSql.execute("create table ctb using stb tags (now, 1, 1, 1, 1, 1.0, 1.0, true, 'abc', 'abc');") + tdSql.execute("create table tb (col_timestamp timestamp, col_tinyint tinyint, col_smallint smallint, col_int int, col_bigint bigint, col_float float, col_double double, col_bool bool, col_binary binary(10), col_nchar nchar(10));") + + tdSql.execute("insert into ctb values (now, -9, -9, -9, -9, -9.5, -9.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 1s, -1, -1, -1, -1, -1.5, -1.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 2s, 1, 1, 1, 1, 1.5, 1.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 3s, 2, 2, 2, 2, 2.5, 2.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 4s, 3, 3, 3, 3, 3.5, 3.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 5s, 4, 4, 4, 4, 4.5, 4.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 6s, 5, 5, 5, 5, 5.5, 5.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 7s, 6, 6, 6, 6, 6.5, 6.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 8s, 7, 7, 7, 7, 7.5, 7.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 9s, 8, 8, 8, 8, 8.5, 8.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 10s, 9, 9, 9, 9, 9.5, 9.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 11s, 10, 10, 10, 10, 10.5, 10.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 12s, 15, 15, 15, 15, 15.5, 15.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 13s, 20, 20, 20, 20, 20.5, 20.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 14s, 99, 99, 99, 99, 99.5, 99.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb values (now + 15s, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);") + + tdSql.execute("insert into tb values (now, -9, -9, -9, -9, -9.5, -9.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 1s, -1, -1, -1, -1, -1.5, -1.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 2s, 1, 1, 1, 1, 1.5, 1.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 3s, 2, 2, 2, 2, 2.5, 2.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 4s, 3, 3, 3, 3, 3.5, 3.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 5s, 4, 4, 4, 4, 4.5, 4.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 6s, 5, 5, 5, 5, 5.5, 5.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 7s, 6, 6, 6, 6, 6.5, 6.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 8s, 7, 7, 7, 7, 7.5, 7.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 9s, 8, 8, 8, 8, 8.5, 8.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 10s, 9, 9, 9, 9, 9.5, 9.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 11s, 10, 10, 10, 10, 10.5, 10.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 12s, 15, 15, 15, 15, 15.5, 15.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 13s, 20, 20, 20, 20, 20.5, 20.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 14s, 99, 99, 99, 99, 99.5, 99.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb values (now + 15s, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);") + + #execute query + print("============== STEP 1: column types ================== ") + #Supported column types + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + + tdSql.query('select histogram(col_float, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_float, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_float, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + + tdSql.query('select histogram(col_double, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_double, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.query('select histogram(col_double, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + + #Unsupported column types + tdSql.error('select histogram(col_timestamp, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_timestamp, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(col_timestamp, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(col_bool, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_bool, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(col_bool, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(col_binary, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_binary, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(col_binary, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(col_nchar, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_nchar, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(col_nchar, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(col, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(col, "user_input", "[1,3,5,7]", 0) from tb;') + + #Unsupported tags + tdSql.error('select histogram(tag_timestamp, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_timestamp, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_timestamp, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_tinyint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_tinyint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_tinyint, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_smallint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_smallint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_smallint, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_float, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_float, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_float, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_double, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_double, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_double, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_bool, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_bool, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_bool, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_binary, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_binary, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_binary, "user_input", "[1,3,5,7]", 0) from tb;') + + tdSql.error('select histogram(tag_nchar, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(tag_nchar, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.error('select histogram(tag_nchar, "user_input", "[1,3,5,7]", 0) from tb;') + + + print("============== STEP 2: bin types ================== ") + ## user_input ## + #TINYINT + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5]", 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5]", 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5]", 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_tinyint, "user_input", "[0,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[0,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[0,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_tinyint, "user_input", "[-10,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[-10,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[-10,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_tinyint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + + tdSql.query('select histogram(col_tinyint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + + #SMALLINT + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5]", 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5]", 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5]", 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_smallint, "user_input", "[0,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_smallint, "user_input", "[0,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_smallint, "user_input", "[0,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_smallint, "user_input", "[-10,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_smallint, "user_input", "[-10,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_smallint, "user_input", "[-10,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_smallint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + + tdSql.query('select histogram(col_smallint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + + #INT + tdSql.query('select histogram(col_int, "user_input", "[1,3,5]", 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5]", 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5]", 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_int, "user_input", "[0,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_int, "user_input", "[0,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_int, "user_input", "[-10,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_int, "user_input", "[-10,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_int, "user_input", "[-10,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_int, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + + #BIGINT + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5]", 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5]", 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5]", 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[0,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_bigint, "user_input", "[0,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_bigint, "user_input", "[0,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[-10,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_bigint, "user_input", "[-10,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + tdSql.query('select histogram(col_bigint, "user_input", "[-10,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":12}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":1}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[-8.9,9.9,19.9,99.9]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-8.9, "upper_bin":9.9, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":99.9, "count":2}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + + #FLOAT + tdSql.query('select histogram(col_float, "user_input", "[1,3,5]", 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[1,3,5]", 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[1,3,5]", 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_float, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_float, "user_input", "[0,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":9}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[0,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":9}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[0,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":9}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + + tdSql.query('select histogram(col_float, "user_input", "[-10,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[-10,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[-10,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + + tdSql.query('select histogram(col_float, "user_input", "[-9.4,9.6,20.4,99.9]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-9.4, "upper_bin":9.6, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.6, "upper_bin":20.4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20.4, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[-9.4,9.6,20.4,99.9]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-9.4, "upper_bin":9.6, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.6, "upper_bin":20.4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20.4, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[-9.4,9.6,20.4,99.9]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-9.4, "upper_bin":9.6, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.6, "upper_bin":20.4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20.4, "upper_bin":99.9, "count":2}'); + + tdSql.query('select histogram(col_float, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_float, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + + #DOUBLE + tdSql.query('select histogram(col_double, "user_input", "[1,3,5]", 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[1,3,5]", 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[1,3,5]", 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_double, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[1,3,5,7]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[1,3,5,7]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_double, "user_input", "[0,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":9}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[0,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":9}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[0,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":10, "count":9}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + + tdSql.query('select histogram(col_double, "user_input", "[-10,10,20,100]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[-10,10,20,100]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[-10,10,20,100]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":10, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":10, "upper_bin":20, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20, "upper_bin":100, "count":2}'); + + tdSql.query('select histogram(col_double, "user_input", "[-9.4,9.6,20.4,99.9]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-9.4, "upper_bin":9.6, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.6, "upper_bin":20.4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20.4, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[-9.4,9.6,20.4,99.9]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-9.4, "upper_bin":9.6, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.6, "upper_bin":20.4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20.4, "upper_bin":99.9, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[-9.4,9.6,20.4,99.9]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-9.4, "upper_bin":9.6, "count":10}'); + tdSql.checkData(1, 0, '{"lower_bin":9.6, "upper_bin":20.4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":20.4, "upper_bin":99.9, "count":2}'); + + tdSql.query('select histogram(col_double, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + tdSql.query('select histogram(col_double, "user_input", "[-99999999999999,9.9,19.9,99999999999999]", 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-1e+14, "upper_bin":9.9, "count":11}'); + tdSql.checkData(1, 0, '{"lower_bin":9.9, "upper_bin":19.9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":19.9, "upper_bin":1e+14, "count":2}'); + + #ERROR CASE + tdSql.error('select histogram(col_double, 1, "[1,5,3,7]", 0) from stb;') + tdSql.error('select histogram(col_double, 1, "[1,5,3,7]", 0) from ctb;') + tdSql.error('select histogram(col_double, 1, "[1,5,3,7]", 0) from tb;') + tdSql.error('select histogram(col_double, -1.0, "[1,5,3,7]", 0) from stb;') + tdSql.error('select histogram(col_double, -1.0, "[1,5,3,7]", 0) from ctb;') + tdSql.error('select histogram(col_double, -1.0, "[1,5,3,7]", 0) from tb;') + tdSql.error('select histogram(col_double, true, "[1,5,3,7]", 0) from stb;') + tdSql.error('select histogram(col_double, false, "[1,5,3,7]", 0) from ctb;') + tdSql.error('select histogram(col_double, true, "[1,5,3,7]", 0) from tb;') + tdSql.error('select histogram(col_double, "user", "[1,5,3,7]", 0) from stb;') + tdSql.error('select histogram(col_double, "user", "[1,5,3,7]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user", "[1,5,3,7]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1,5,3,7]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1,5,3,7]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1,5,3,7]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1,-1,3,-3]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1,-1,3,-3]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1,-1,3,-3]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0,5.5,3.3,7.7]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0,5.5,3.3,7.7]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0,5.5,3.3,7.7]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1,1,1]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1,1,1]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1,1,1]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[-1,-1,1]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[-1,-1,1]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[-1,-1,1]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[false,3,5]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[false,3,5]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[false,3,5]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1,true,5]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1,true,5]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1,true,5]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0,"abc",5]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0,"abc",5]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0,"abc",5]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0, 5, "中文"]", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0, 5, "中文"]", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "[1.0, 5, "中文"]", 0) from tb;') + tdSql.error('select histogram(col_double, "user_input", "{1.0, 3.0, 5.0}", 0) from stb;') + tdSql.error('select histogram(col_double, "user_input", "{1.0, 3.0, 5.0}", 0) from ctb;') + tdSql.error('select histogram(col_double, "user_input", "{1.0, 3.0, 5.0}", 0) from tb;') + tdSql.error('select histogram(col_double, \'user_input\', \'{"start": 1.0, "width": 3.0, "count": 5, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_double, \'user_input\', \'{"start": 1.0, "width": 3.0, "count": 5, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_double, \'user_input\', \'{"start": 1.0, "width": 3.0, "count": 5, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_double, \'user_input\', \'{"start": 1.0, "factor": 3.0, "count": 5, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_double, \'user_input\', \'{"start": 1.0, "factor": 3.0, "count": 5, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_double, \'user_input\', \'{"start": 1.0, "factor": 3.0, "count": 5, "infinity": true}\', 0) from tb;') + + + ## linear_bins ## + #INTEGER + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 3, "count": 8, "infinity": false}\', 0) from stb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":4, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":4, "upper_bin":7, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":7, "upper_bin":10, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":13, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":13, "upper_bin":16, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":16, "upper_bin":19, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":19, "upper_bin":22, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":22, "upper_bin":25, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 3, "count": 8, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":4, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":4, "upper_bin":7, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":7, "upper_bin":10, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":13, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":13, "upper_bin":16, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":16, "upper_bin":19, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":19, "upper_bin":22, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":22, "upper_bin":25, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 3, "count": 8, "infinity": false}\', 0) from tb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":4, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":4, "upper_bin":7, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":7, "upper_bin":10, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":13, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":13, "upper_bin":16, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":16, "upper_bin":19, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":19, "upper_bin":22, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":22, "upper_bin":25, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -10.0, "width": 3.0, "count": 8, "infinity": false}\', 0) from stb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-7, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-7, "upper_bin":-4, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-4, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":2, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":2, "upper_bin":5, "count":3}'); + tdSql.checkData(5, 0, '{"lower_bin":5, "upper_bin":8, "count":3}'); + tdSql.checkData(6, 0, '{"lower_bin":8, "upper_bin":11, "count":2}'); + tdSql.checkData(7, 0, '{"lower_bin":11, "upper_bin":14, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -10.0, "width": 3.0, "count": 8, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-7, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-7, "upper_bin":-4, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-4, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":2, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":2, "upper_bin":5, "count":3}'); + tdSql.checkData(5, 0, '{"lower_bin":5, "upper_bin":8, "count":3}'); + tdSql.checkData(6, 0, '{"lower_bin":8, "upper_bin":11, "count":2}'); + tdSql.checkData(7, 0, '{"lower_bin":11, "upper_bin":14, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -10.0, "width": 3.0, "count": 8, "infinity": false}\', 0) from tb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-7, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-7, "upper_bin":-4, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-4, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":2, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":2, "upper_bin":5, "count":3}'); + tdSql.checkData(5, 0, '{"lower_bin":5, "upper_bin":8, "count":3}'); + tdSql.checkData(6, 0, '{"lower_bin":8, "upper_bin":11, "count":2}'); + tdSql.checkData(7, 0, '{"lower_bin":11, "upper_bin":14, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -2.5, "width": 0.5, "count": 8, "infinity": false}\', 0) from stb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-2.5, "upper_bin":-2, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-2, "upper_bin":-1.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.5, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -2.5, "width": 0.5, "count": 8, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-2.5, "upper_bin":-2, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-2, "upper_bin":-1.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.5, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -2.5, "width": 0.5, "count": 8, "infinity": false}\', 0) from tb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-2.5, "upper_bin":-2, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-2, "upper_bin":-1.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.5, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 4, "width": -0.5, "count": 10, "infinity": false}\', 0) from stb;') + tdSql.checkRows(10); + tdSql.checkData(0, 0, '{"lower_bin":3.5, "upper_bin":4, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":3.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":3, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":2.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":1.5, "upper_bin":2, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(8, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(9, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 4, "width": -0.5, "count": 10, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(10); + tdSql.checkData(0, 0, '{"lower_bin":3.5, "upper_bin":4, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":3.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":3, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":2.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":1.5, "upper_bin":2, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(8, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(9, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 4, "width": -0.5, "count": 10, "infinity": false}\', 0) from tb;') + tdSql.checkRows(10); + tdSql.checkData(0, 0, '{"lower_bin":3.5, "upper_bin":4, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":3.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":3, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":2.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":1.5, "upper_bin":2, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(8, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(9, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.9999, "infinity": false}\', 0) from stb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.9999, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.9999, "infinity": false}\', 0) from tb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":1.5, "upper_bin":2, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":1.5, "upper_bin":2, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":1.5, "upper_bin":2, "count":1}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 5, "count": 5, "infinity": true}\', 0) from stb;') + tdSql.checkRows(7); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":15, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":15, "upper_bin":20, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":20, "upper_bin":25, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":25, "upper_bin":inf, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 5, "count": 5, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(7); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":15, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":15, "upper_bin":20, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":20, "upper_bin":25, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":25, "upper_bin":inf, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 5, "count": 5, "infinity": true}\', 0) from tb;') + tdSql.checkRows(7); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":15, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":15, "upper_bin":20, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":20, "upper_bin":25, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":25, "upper_bin":inf, "count":1}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.76e+308, "width": 5, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-1.76e+308, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.76e+308, "upper_bin":inf, "count":15}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.76e+308, "width": 5, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-1.76e+308, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.76e+308, "upper_bin":inf, "count":15}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.76e+308, "width": 5, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-1.76e+308, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.76e+308, "upper_bin":inf, "count":15}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 10, "width": -5, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-5, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 10, "width": -5, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-5, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": 10, "width": -5, "count": 3, "infinity": true}\', 0) from tb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-5, "count":1}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": false}\', 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": true}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-7e+307, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.checkData(3, 0, '{"lower_bin":7e+307, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-7e+307, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.checkData(3, 0, '{"lower_bin":7e+307, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": true}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-7e+307, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.checkData(3, 0, '{"lower_bin":7e+307, "upper_bin":inf, "count":0}'); + + #FLOATING NUMBER + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 3, "count": 8, "infinity": false}\', 0) from stb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":4, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":4, "upper_bin":7, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":7, "upper_bin":10, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":13, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":13, "upper_bin":16, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":16, "upper_bin":19, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":19, "upper_bin":22, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":22, "upper_bin":25, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 3, "count": 8, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":4, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":4, "upper_bin":7, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":7, "upper_bin":10, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":13, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":13, "upper_bin":16, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":16, "upper_bin":19, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":19, "upper_bin":22, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":22, "upper_bin":25, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 3, "count": 8, "infinity": false}\', 0) from tb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":4, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":4, "upper_bin":7, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":7, "upper_bin":10, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":13, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":13, "upper_bin":16, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":16, "upper_bin":19, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":19, "upper_bin":22, "count":1}'); + tdSql.checkData(7, 0, '{"lower_bin":22, "upper_bin":25, "count":0}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -10.0, "width": 3.0, "count": 8, "infinity": false}\', 0) from stb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-7, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-7, "upper_bin":-4, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-4, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":2, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":2, "upper_bin":5, "count":3}'); + tdSql.checkData(5, 0, '{"lower_bin":5, "upper_bin":8, "count":3}'); + tdSql.checkData(6, 0, '{"lower_bin":8, "upper_bin":11, "count":3}'); + tdSql.checkData(7, 0, '{"lower_bin":11, "upper_bin":14, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -10.0, "width": 3.0, "count": 8, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-7, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-7, "upper_bin":-4, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-4, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":2, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":2, "upper_bin":5, "count":3}'); + tdSql.checkData(5, 0, '{"lower_bin":5, "upper_bin":8, "count":3}'); + tdSql.checkData(6, 0, '{"lower_bin":8, "upper_bin":11, "count":3}'); + tdSql.checkData(7, 0, '{"lower_bin":11, "upper_bin":14, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -10.0, "width": 3.0, "count": 8, "infinity": false}\', 0) from tb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-7, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-7, "upper_bin":-4, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-4, "upper_bin":-1, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":2, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":2, "upper_bin":5, "count":3}'); + tdSql.checkData(5, 0, '{"lower_bin":5, "upper_bin":8, "count":3}'); + tdSql.checkData(6, 0, '{"lower_bin":8, "upper_bin":11, "count":3}'); + tdSql.checkData(7, 0, '{"lower_bin":11, "upper_bin":14, "count":0}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -2.5, "width": 0.5, "count": 8, "infinity": false}\', 0) from stb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-2.5, "upper_bin":-2, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-2, "upper_bin":-1.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.5, "upper_bin":-1, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":0}'); + tdSql.checkData(7, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -2.5, "width": 0.5, "count": 8, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-2.5, "upper_bin":-2, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-2, "upper_bin":-1.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.5, "upper_bin":-1, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":0}'); + tdSql.checkData(7, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -2.5, "width": 0.5, "count": 8, "infinity": false}\', 0) from tb;') + tdSql.checkRows(8); + tdSql.checkData(0, 0, '{"lower_bin":-2.5, "upper_bin":-2, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-2, "upper_bin":-1.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.5, "upper_bin":-1, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":0}'); + tdSql.checkData(7, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 4, "width": -0.5, "count": 10, "infinity": false}\', 0) from stb;') + tdSql.checkRows(10); + tdSql.checkData(0, 0, '{"lower_bin":3.5, "upper_bin":4, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":3.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":3, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":2.5, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":1.5, "upper_bin":2, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":0}'); + tdSql.checkData(7, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(8, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(9, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 4, "width": -0.5, "count": 10, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(10); + tdSql.checkData(0, 0, '{"lower_bin":3.5, "upper_bin":4, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":3.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":3, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":2.5, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":1.5, "upper_bin":2, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":0}'); + tdSql.checkData(7, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(8, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(9, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 4, "width": -0.5, "count": 10, "infinity": false}\', 0) from tb;') + tdSql.checkRows(10); + tdSql.checkData(0, 0, '{"lower_bin":3.5, "upper_bin":4, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":3.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":3, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":2.5, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":1.5, "upper_bin":2, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.checkData(6, 0, '{"lower_bin":0.5, "upper_bin":1, "count":0}'); + tdSql.checkData(7, 0, '{"lower_bin":0, "upper_bin":0.5, "count":0}'); + tdSql.checkData(8, 0, '{"lower_bin":-0.5, "upper_bin":0, "count":0}'); + tdSql.checkData(9, 0, '{"lower_bin":-1, "upper_bin":-0.5, "count":0}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.9999, "infinity": false}\', 0) from stb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.9999, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.9999, "infinity": false}\', 0) from tb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":1.5, "upper_bin":2, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":1.5, "upper_bin":2, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 1, "width": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":1.5, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":1.5, "upper_bin":2, "count":0}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 0, "width": 5, "count": 5, "infinity": true}\', 0) from stb;') + tdSql.checkRows(7); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":15, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":15, "upper_bin":20, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":20, "upper_bin":25, "count":1}'); + tdSql.checkData(6, 0, '{"lower_bin":25, "upper_bin":inf, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 0, "width": 5, "count": 5, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(7); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":15, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":15, "upper_bin":20, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":20, "upper_bin":25, "count":1}'); + tdSql.checkData(6, 0, '{"lower_bin":25, "upper_bin":inf, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 0, "width": 5, "count": 5, "infinity": true}\', 0) from tb;') + tdSql.checkRows(7); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(3, 0, '{"lower_bin":10, "upper_bin":15, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":15, "upper_bin":20, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":20, "upper_bin":25, "count":1}'); + tdSql.checkData(6, 0, '{"lower_bin":25, "upper_bin":inf, "count":1}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 10, "width": -5, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":4}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-5, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 10, "width": -5, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":4}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-5, "count":1}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": 10, "width": -5, "count": 3, "infinity": true}\', 0) from tb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":4}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-5, "count":1}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -1.76e+308, "width": 5, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-1.76e+308, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.76e+308, "upper_bin":inf, "count":15}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -1.76e+308, "width": 5, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-1.76e+308, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.76e+308, "upper_bin":inf, "count":15}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -1.76e+308, "width": 5, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-1.76e+308, "upper_bin":-1.76e+308, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-1.76e+308, "upper_bin":inf, "count":15}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": false}\', 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": true}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-7e+307, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.checkData(3, 0, '{"lower_bin":7e+307, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-7e+307, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.checkData(3, 0, '{"lower_bin":7e+307, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"start": -0.7e+308, "width": 0.7e+308, "count": 2, "infinity": true}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-7e+307, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-7e+307, "upper_bin":0, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":0, "upper_bin":7e+307, "count":13}'); + tdSql.checkData(3, 0, '{"lower_bin":7e+307, "upper_bin":inf, "count":0}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"width":2, "start": 0, "count": 4, "infinity": false}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"width":2, "start": 0, "count": 4, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"width":2, "start": 0, "count": 4, "infinity": false}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"width":2, "start": 0, "count": 4, "infinity": false}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"width":2, "start": 0, "count": 4, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"width":2, "start": 0, "count": 4, "infinity": false}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"count": 4, "width":2, "start": 0, "infinity": false}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"count": 4, "width":2, "start": 0, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"count": 4, "width":2, "start": 0, "infinity": false}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"infinity": false, "width":2, "start": 0, "count": 4}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"infinity": false, "width":2, "start": 0, "count": 4}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + tdSql.query('select histogram(col_float, \'linear_bin\', \'{"infinity": false, "width":2, "start": 0, "count": 4}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":6, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6, "upper_bin":8, "count":2}'); + + #ERROR CASE + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": true, "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": true, "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": true, "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": false, "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": false, "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": false, "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": "abc", "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": "abc", "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": "abc", "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": "中文", "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": "中文", "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": "中文", "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": abc, "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": abc, "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": abc, "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.80e+308, "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.80e+308, "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.80e+308, "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1.80e+308, "width": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1.80e+308, "width": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 1.80e+308, "width": 5, "count": 5, "infinity": false}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": true, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": true, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": true, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": false, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": false, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": false, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": "abc", "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": "abc", "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": "abc", "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": "中文", "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": "中文", "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": "中文", "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": abc, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": abc, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": abc, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 0, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 0, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 0, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": -1.80e+308, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": -1.80e+308, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": -1.80e+308, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1.80e+308, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1.80e+308, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1.80e+308, "count": 5, "infinity": false}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.4e+308, "width": 1.4e+308, "count": 3, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.4e+308, "width": 1.4e+308, "count": 3, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.4e+308, "width": 1.4e+308, "count": 3, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.4e+308, "width": 1.4e+308, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.4e+308, "width": 1.4e+308, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": -1.4e+308, "width": 1.4e+308, "count": 3, "infinity": true}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": -1, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": -1, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": -1, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 0, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 0, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 0, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1001, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1001, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1001, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": true, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": true, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": true, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": false, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": false, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": false, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": "abc", "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": "abc", "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": "abc", "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": "中文", "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": "中文", "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": "中文", "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": abc, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": abc, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": abc, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1.8e+308, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1.8e+308, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1.8e+308, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": -1.8e+308, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": -1.8e+308, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": -1.8e+308, "infinity": true}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 1}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 1}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 1}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 0}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 0}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 0}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": -1.5}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": -1.5}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": -1.5}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 1.8e+308}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 1.8e+308}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": 1.8e+308}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": "abc"}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": "abc"}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": "abc"}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": "中文"}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": "中文"}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": "中文"}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": abc}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": abc}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": abc}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"begin": 0, "width": 1, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"begin": 0, "width": 1, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"begin": 0, "width": 1, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "factor": 1, "cnt": 1, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "factor": 1, "cnt": 1, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "factor": 1, "cnt": 1, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "inf": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "inf": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{"start": 0, "width": 1, "count": 1, "inf": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{start: 0, width: 1, count: 1, infinity: true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{start: 0, width: 1, count: 1, infinity: true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'{start: 0, width: 1, count: 1, infinity: true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'[ 0, 1, 1, true]\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'[ 0, 1, 1, true]\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'linear_bin\', \'[ 0, 1, 1, true]\', 0) from tb;') + + ## log_bin ## + #INTEGER + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 3, "count": 6, "infinity": false}\', 0) from stb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":9, "count":6}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":27, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":27, "upper_bin":81, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":81, "upper_bin":243, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":243, "upper_bin":729, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 3, "count": 6, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":9, "count":6}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":27, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":27, "upper_bin":81, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":81, "upper_bin":243, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":243, "upper_bin":729, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 3, "count": 6, "infinity": false}\', 0) from tb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":9, "count":6}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":27, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":27, "upper_bin":81, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":81, "upper_bin":243, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":243, "upper_bin":729, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.0, "factor": 3.0, "count": 6, "infinity": false}\', 0) from stb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":-3, "upper_bin":-1, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-9, "upper_bin":-3, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-27, "upper_bin":-9, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-81, "upper_bin":-27, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-243, "upper_bin":-81, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":-729, "upper_bin":-243, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.0, "factor": 3.0, "count": 6, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":-3, "upper_bin":-1, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-9, "upper_bin":-3, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-27, "upper_bin":-9, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-81, "upper_bin":-27, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-243, "upper_bin":-81, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":-729, "upper_bin":-243, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.0, "factor": 3.0, "count": 6, "infinity": false}\', 0) from tb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":-3, "upper_bin":-1, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-9, "upper_bin":-3, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-27, "upper_bin":-9, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-81, "upper_bin":-27, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-243, "upper_bin":-81, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":-729, "upper_bin":-243, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 10, "factor": 0.5, "count": 6, "infinity": false}\', 0) from stb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(1, 0, '{"lower_bin":2.5, "upper_bin":5, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":1.25, "upper_bin":2.5, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":0.625, "upper_bin":1.25, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":0.3125, "upper_bin":0.625, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0.15625, "upper_bin":0.3125, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 10, "factor": 0.5, "count": 6, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(1, 0, '{"lower_bin":2.5, "upper_bin":5, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":1.25, "upper_bin":2.5, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":0.625, "upper_bin":1.25, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":0.3125, "upper_bin":0.625, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0.15625, "upper_bin":0.3125, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 10, "factor": 0.5, "count": 6, "infinity": false}\', 0) from tb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(1, 0, '{"lower_bin":2.5, "upper_bin":5, "count":3}'); + tdSql.checkData(2, 0, '{"lower_bin":1.25, "upper_bin":2.5, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":0.625, "upper_bin":1.25, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":0.3125, "upper_bin":0.625, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":0.15625, "upper_bin":0.3125, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -10, "factor": 0.5, "count": 6, "infinity": false}\', 0) from stb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-5, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-5, "upper_bin":-2.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-2.5, "upper_bin":-1.25, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-1.25, "upper_bin":-0.625, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.625, "upper_bin":-0.3125, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":-0.3125, "upper_bin":-0.15625, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -10, "factor": 0.5, "count": 6, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-5, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-5, "upper_bin":-2.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-2.5, "upper_bin":-1.25, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-1.25, "upper_bin":-0.625, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.625, "upper_bin":-0.3125, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":-0.3125, "upper_bin":-0.15625, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -10, "factor": 0.5, "count": 6, "infinity": false}\', 0) from tb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":-10, "upper_bin":-5, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":-5, "upper_bin":-2.5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-2.5, "upper_bin":-1.25, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-1.25, "upper_bin":-0.625, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-0.625, "upper_bin":-0.3125, "count":0}'); + tdSql.checkData(5, 0, '{"lower_bin":-0.3125, "upper_bin":-0.15625, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 2, "factor": 1.5, "count": 6, "infinity": false}\', 0) from stb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":2, "upper_bin":3, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":4.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":4.5, "upper_bin":6.75, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6.75, "upper_bin":10.125, "count":4}'); + tdSql.checkData(4, 0, '{"lower_bin":10.125, "upper_bin":15.1875, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":15.1875, "upper_bin":22.7812, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 2, "factor": 1.5, "count": 6, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":2, "upper_bin":3, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":4.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":4.5, "upper_bin":6.75, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6.75, "upper_bin":10.125, "count":4}'); + tdSql.checkData(4, 0, '{"lower_bin":10.125, "upper_bin":15.1875, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":15.1875, "upper_bin":22.7812, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 2, "factor": 1.5, "count": 6, "infinity": false}\', 0) from tb;') + tdSql.checkRows(6); + tdSql.checkData(0, 0, '{"lower_bin":2, "upper_bin":3, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":4.5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":4.5, "upper_bin":6.75, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":6.75, "upper_bin":10.125, "count":4}'); + tdSql.checkData(4, 0, '{"lower_bin":10.125, "upper_bin":15.1875, "count":1}'); + tdSql.checkData(5, 0, '{"lower_bin":15.1875, "upper_bin":22.7812, "count":1}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 3.2, "factor": 0.5, "count": 1.9999, "infinity": false}\', 0) from stb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1.6, "upper_bin":3.2, "count":2}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 3.2, "factor": 0.5, "count": 1.9999, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1.6, "upper_bin":3.2, "count":2}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 3.2, "factor": 0.5, "count": 1.9999, "infinity": false}\', 0) from tb;') + tdSql.checkRows(1); + tdSql.checkData(0, 0, '{"lower_bin":1.6, "upper_bin":3.2, "count":2}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 3.2, "factor": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1.6, "upper_bin":3.2, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0.8, "upper_bin":1.6, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 3.2, "factor": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1.6, "upper_bin":3.2, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0.8, "upper_bin":1.6, "count":1}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 3.2, "factor": 0.5, "count": 1.99999999999999999, "infinity": false}\', 0) from tb;') + tdSql.checkRows(2); + tdSql.checkData(0, 0, '{"lower_bin":1.6, "upper_bin":3.2, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":0.8, "upper_bin":1.6, "count":1}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 5, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":1, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":1, "upper_bin":5, "count":4}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":25, "count":7}'); + tdSql.checkData(3, 0, '{"lower_bin":25, "upper_bin":125, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":125, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 5, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":1, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":1, "upper_bin":5, "count":4}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":25, "count":7}'); + tdSql.checkData(3, 0, '{"lower_bin":25, "upper_bin":125, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":125, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 5, "count": 3, "infinity": true}\', 0) from tb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":1, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":1, "upper_bin":5, "count":4}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":25, "count":7}'); + tdSql.checkData(3, 0, '{"lower_bin":25, "upper_bin":125, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":125, "upper_bin":inf, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 0.2e+308, "factor": 3.14, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":2e+307, "count":15}'); + tdSql.checkData(1, 0, '{"lower_bin":2e+307, "upper_bin":6.28e+307, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":6.28e+307, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 0.2e+308, "factor": 3.14, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":2e+307, "count":15}'); + tdSql.checkData(1, 0, '{"lower_bin":2e+307, "upper_bin":6.28e+307, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":6.28e+307, "upper_bin":inf, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 0.2e+308, "factor": 3.14, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.checkRows(3); + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":2e+307, "count":15}'); + tdSql.checkData(1, 0, '{"lower_bin":2e+307, "upper_bin":6.28e+307, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":6.28e+307, "upper_bin":inf, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -2, "factor": 3, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":-2, "upper_bin":inf, "count":14}'); + tdSql.checkData(1, 0, '{"lower_bin":-6, "upper_bin":-2, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-18, "upper_bin":-6, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-54, "upper_bin":-18, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-54, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -2, "factor": 3, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":-2, "upper_bin":inf, "count":14}'); + tdSql.checkData(1, 0, '{"lower_bin":-6, "upper_bin":-2, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-18, "upper_bin":-6, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-54, "upper_bin":-18, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-54, "count":0}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": -2, "factor": 3, "count": 3, "infinity": true}\', 0) from tb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":-2, "upper_bin":inf, "count":14}'); + tdSql.checkData(1, 0, '{"lower_bin":-6, "upper_bin":-2, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-18, "upper_bin":-6, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":-54, "upper_bin":-18, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":-54, "count":0}'); + + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 10, "factor": 0.5, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":5, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":1.25, "upper_bin":2.5, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":1.25, "count":3}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 10, "factor": 0.5, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":5, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":1.25, "upper_bin":2.5, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":1.25, "count":3}'); + tdSql.query('select histogram(col_tinyint, \'log_bin\', \'{"start": 10, "factor": 0.5, "count": 3, "infinity": true}\', 0) from tb;') + tdSql.checkRows(5); + tdSql.checkData(0, 0, '{"lower_bin":10, "upper_bin":inf, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.checkData(2, 0, '{"lower_bin":2.5, "upper_bin":5, "count":3}'); + tdSql.checkData(3, 0, '{"lower_bin":1.25, "upper_bin":2.5, "count":1}'); + tdSql.checkData(4, 0, '{"lower_bin":-inf, "upper_bin":1.25, "count":3}'); + + #FLOAT + tdSql.query('select histogram(col_float, \'log_bin\', \'{"factor":2, "start": 1, "count": 4, "infinity": false}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"factor":2, "start": 1, "count": 4, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"factor":2, "start": 1, "count": 4, "infinity": false}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + + tdSql.query('select histogram(col_float, \'log_bin\', \'{"count": 4, "factor":2, "start": 1, "infinity": false}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"count": 4, "factor":2, "start": 1, "infinity": false}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"count": 4, "factor":2, "start": 1, "infinity": false}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + + tdSql.query('select histogram(col_float, \'log_bin\', \'{"infinity": false, "count": 4, "factor":2, "start": 1}\', 0) from stb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"infinity": false, "count": 4, "factor":2, "start": 1}\', 0) from ctb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"infinity": false, "count": 4, "factor":2, "start": 1}\', 0) from tb;') + tdSql.checkRows(4); + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":4, "upper_bin":8, "count":4}'); + tdSql.checkData(3, 0, '{"lower_bin":8, "upper_bin":16, "count":4}'); + + #ERROR CASE + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": true, "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": true, "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": true, "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": false, "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": false, "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": false, "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": "abc", "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": "abc", "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": "abc", "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": "中文", "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": "中文", "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": "中文", "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": abc, "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": abc, "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": abc, "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.80e+308, "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.80e+308, "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.80e+308, "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1.80e+308, "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1.80e+308, "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1.80e+308, "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 5, "count": 5, "infinity": false}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": true, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": true, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": true, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": false, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": false, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": false, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": "abc", "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": "abc", "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": "abc", "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": "中文", "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": "中文", "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": "中文", "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": abc, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": abc, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": abc, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 1.80e+308, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 1.80e+308, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 1.80e+308, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 0, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 0, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 0, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": -5, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": -5, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": -5, "count": 5, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 1, "count": 5, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 1, "count": 5, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 1, "factor": 1, "count": 5, "infinity": false}\', 0) from tb;') + + #out of range + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.4e+308, "factor": 1.5, "count": 3, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.4e+308, "factor": 1.5, "count": 3, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.4e+308, "factor": 1.5, "count": 3, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.4e+308, "factor": 1.5, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.4e+308, "factor": 1.5, "count": 3, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": -1.4e+308, "factor": 1.5, "count": 3, "infinity": true}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": -1, "infinity": false}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": -1, "infinity": false}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": -1, "infinity": false}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 0, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 0, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 0, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1001, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1001, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1001, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": true, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": true, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": true, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": false, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": false, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": false, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": "abc", "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": "abc", "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": "abc", "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": "中文", "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": "中文", "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": "中文", "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": abc, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": abc, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": abc, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1.8e+308, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1.8e+308, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1.8e+308, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": -1.8e+308, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": -1.8e+308, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": -1.8e+308, "infinity": true}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 1}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 1}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 1}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 0}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 0}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 0}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": -1.5}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": -1.5}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": -1.5}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 1.8e+308}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 1.8e+308}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": 1.8e+308}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": "abc"}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": "abc"}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": "abc"}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": "中文"}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": "中文"}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": "中文"}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": abc}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": abc}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "infinity": abc}\', 0) from tb;') + + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"begin": 0, "factor": 1, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"begin": 0, "factor": 1, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"begin": 0, "factor": 1, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "width": 1, "count": 1, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "cnt": 1, "infinity": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "cnt": 1, "infinity": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "cnt": 1, "infinity": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "inf": true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "inf": true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{"start": 0, "factor": 1, "count": 1, "inf": true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{start: 0, factor: 1, count: 1, infinity: true}\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{start: 0, factor: 1, count: 1, infinity: true}\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'{start: 0, factor: 1, count: 1, infinity: true}\', 0) from tb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'[ 0, 1, 1, true]\', 0) from stb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'[ 0, 1, 1, true]\', 0) from ctb;') + tdSql.error('select histogram(col_tinyint, \'log_bin\', \'[ 0, 1, 1, true]\', 0) from tb;') + + print("============== STEP 3: normalization ================== ") + ## Normalization ## + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 1) from ctb;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0.333333}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0.333333}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":0.333333}'); + tdSql.query('select histogram(col_smallint, "user_input", "[1,3,5,7]", 1) from tb;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0.333333}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0.333333}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":0.333333}'); + + tdSql.query('select histogram(col_int, "user_input", "[1,5,10]", 0) from stb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":5, "count":4}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":5}'); + tdSql.query('select histogram(col_int, "user_input", "[1,5,10]", 1) from ctb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":5, "count":0.444444}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":0.555556}'); + tdSql.query('select histogram(col_int, "user_input", "[1,5,10]", 1) from tb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":5, "count":0.444444}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":10, "count":0.555556}'); + + tdSql.query('select histogram(col_double, "user_input", "[0,5,11]", 0) from stb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":5, "count":4}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":11, "count":6}'); + tdSql.query('select histogram(col_double, "user_input", "[0,5,11]", 1) from ctb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":5, "count":0.400000}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":11, "count":0.600000}'); + tdSql.query('select histogram(col_double, "user_input", "[0,5,11]", 1) from tb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":5, "count":0.400000}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":11, "count":0.600000}'); + + tdSql.query('select histogram(col_bigint, \'linear_bin\', \'{"start": 1, "width": 5, "count": 2, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":6, "count":5}'); + tdSql.checkData(1, 0, '{"lower_bin":6, "upper_bin":11, "count":4}'); + tdSql.query('select histogram(col_bigint, \'linear_bin\', \'{"start": 1, "width": 5, "count": 2, "infinity": false}\', 1) from ctb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":6, "count":0.555556}'); + tdSql.checkData(1, 0, '{"lower_bin":6, "upper_bin":11, "count":0.444444}'); + tdSql.query('select histogram(col_bigint, \'linear_bin\', \'{"start": 1, "width": 5, "count": 2, "infinity": false}\', 1) from tb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":6, "count":0.555556}'); + tdSql.checkData(1, 0, '{"lower_bin":6, "upper_bin":11, "count":0.444444}'); + + tdSql.query('select histogram(col_int, \'linear_bin\', \'{"start": -10, "width": 5, "count": 3, "infinity": true}\', 0) from stb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-10, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-10, "upper_bin":-5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":-5, "upper_bin":0, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":0, "upper_bin":5, "count":5}'); + tdSql.checkData(4, 0, '{"lower_bin":5, "upper_bin":inf, "count":8}'); + tdSql.query('select histogram(col_int, \'linear_bin\', \'{"start": -10, "width": 5, "count": 3, "infinity": true}\', 1) from ctb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-10, "count":0.000000}'); + tdSql.checkData(1, 0, '{"lower_bin":-10, "upper_bin":-5, "count":0.066667}'); + tdSql.checkData(2, 0, '{"lower_bin":-5, "upper_bin":0, "count":0.066667}'); + tdSql.checkData(3, 0, '{"lower_bin":0, "upper_bin":5, "count":0.333333}'); + tdSql.checkData(4, 0, '{"lower_bin":5, "upper_bin":inf, "count":0.533333}'); + tdSql.query('select histogram(col_int, \'linear_bin\', \'{"start": -10, "width": 5, "count": 3, "infinity": true}\', 1) from tb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-10, "count":0.000000}'); + tdSql.checkData(1, 0, '{"lower_bin":-10, "upper_bin":-5, "count":0.066667}'); + tdSql.checkData(2, 0, '{"lower_bin":-5, "upper_bin":0, "count":0.066667}'); + tdSql.checkData(3, 0, '{"lower_bin":0, "upper_bin":5, "count":0.333333}'); + tdSql.checkData(4, 0, '{"lower_bin":5, "upper_bin":inf, "count":0.533333}'); + + tdSql.query('select histogram(col_float, \'log_bin\', \'{"start": 1, "factor": 5, "count": 3, "infinity": false}\', 0) from stb;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":5, "count":4}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":25, "count":8}'); + tdSql.checkData(2, 0, '{"lower_bin":25, "upper_bin":125, "count":1}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"start": 1, "factor": 5, "count": 3, "infinity": false}\', 1) from ctb;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":5, "count":0.307692}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":25, "count":0.615385}'); + tdSql.checkData(2, 0, '{"lower_bin":25, "upper_bin":125, "count":0.076923}'); + tdSql.query('select histogram(col_float, \'log_bin\', \'{"start": 1, "factor": 5, "count": 3, "infinity": false}\', 1) from tb;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":5, "count":0.307692}'); + tdSql.checkData(1, 0, '{"lower_bin":5, "upper_bin":25, "count":0.615385}'); + tdSql.checkData(2, 0, '{"lower_bin":25, "upper_bin":125, "count":0.076923}'); + + tdSql.query('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": false}\', 0) from stb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":-0.5, "upper_bin":-0.25, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":-0.25, "upper_bin":-0.125, "count":0}'); + tdSql.query('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": false}\', 1) from ctb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":-0.5, "upper_bin":-0.25, "count":0.000000}'); + tdSql.checkData(1, 0, '{"lower_bin":-0.25, "upper_bin":-0.125, "count":0.000000}'); + tdSql.query('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": false}\', 1) from tb;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, '{"lower_bin":-0.5, "upper_bin":-0.25, "count":0.000000}'); + tdSql.checkData(1, 0, '{"lower_bin":-0.25, "upper_bin":-0.125, "count":0.000000}'); + + tdSql.query('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": true}\', 0) from stb;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-0.5, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":-0.5, "upper_bin":-0.25, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":-0.25, "upper_bin":-0.125, "count":0}'); + tdSql.checkData(3, 0, '{"lower_bin":-0.125, "upper_bin":inf, "count":13}'); + tdSql.query('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": true}\', 1) from ctb;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-0.5, "count":0.133333}'); + tdSql.checkData(1, 0, '{"lower_bin":-0.5, "upper_bin":-0.25, "count":0.000000}'); + tdSql.checkData(2, 0, '{"lower_bin":-0.25, "upper_bin":-0.125, "count":0.000000}'); + tdSql.checkData(3, 0, '{"lower_bin":-0.125, "upper_bin":inf, "count":0.866667}'); + tdSql.query('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": true}\', 1) from tb;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":-inf, "upper_bin":-0.5, "count":0.133333}'); + tdSql.checkData(1, 0, '{"lower_bin":-0.5, "upper_bin":-0.25, "count":0.000000}'); + tdSql.checkData(2, 0, '{"lower_bin":-0.25, "upper_bin":-0.125, "count":0.000000}'); + tdSql.checkData(3, 0, '{"lower_bin":-0.125, "upper_bin":inf, "count":0.866667}'); + + #ERROR CASE + tdSql.error('select histogram(col_smallint, "user_input", "[1,3,5,7]", -10) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 2) from ctb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 3.14) from tb;') + + tdSql.error('select histogram(col_bigint, \'linear_bin\', \'{"start": 1, "width": 5, "count": 2, "infinity": false}\', true) from stb;') + tdSql.error('select histogram(col_bigint, \'linear_bin\', \'{"start": 1, "width": 5, "count": 2, "infinity": false}\', false) from ctb;') + + tdSql.error('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": true}\', "abc") from tb;') + tdSql.error('select histogram(col_double, \'log_bin\', \'{"start": -0.5, "factor": 0.5, "count": 2, "infinity": true}\', abc) from tb;') + + print("============== STEP 4: combinations ================== ") + ## Combinations ## + #select distinct func(col_name) + tdSql.error('select distinct histogram(col_tinyint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_smallint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_int, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_float, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_double, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_bool, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_timestamp, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_nchar, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(col_binary, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_tinyint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_smallint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_int, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_float, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_double, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_bool, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_timestamp, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_nchar, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select distinct histogram(tag_binary, "user_input", "[1,3,5,7]", 0) from stb;') + + tdSql.error('select histogram(*, "user_input", "[1,3,5,7]", 0) from stb;') + + #select func(col_name arith_oper xxx) + tdSql.error('select histogram(col_int + 1, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int - 1, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int * 2.0, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int / 2.0, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int % 2.0, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_timestamp + now, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int + col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int - col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int * col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int / col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int % col_bigint, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int + pow(1,2), "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int - abs(-100), "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int * round(col_float), "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int / ceil(1.5), "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int % floor(col_double), "user_input", "[1,3,5,7]", 0) from stb;') + + #select func() arith_oper xxx + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) + 1 from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) - 1 from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) * 1 from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) / 1 from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) % 1 from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) + col_double from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) - col_double from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) * col_double from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) / col_double from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) % col_double from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) + abs(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) - ceil(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) * floor(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) / round(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) % acos(col_double) from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) + max(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) - min(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) * first(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) / last(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) % top(col_double, 1) from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) + sum(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) - avg(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) * count(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) / stddev(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0) % twa(col_double) from stb;') + + #select func(),xxx + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_tinyint from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_smallint from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_int from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_bigint from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_timstamp from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_bool from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_float from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_double from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_binary from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),col_nchar from stb;') + + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_tinyint from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_smallint from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_int from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_bigint from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_timstamp from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_bool from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_float from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_double from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_binary from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_nchar from stb;') + + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_tinyint from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_smallint from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_int from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_bigint from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_timstamp from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_bool from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_float from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_double from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_binary from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tag_nchar from stb;') + + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),ts from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),tbname from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),_c0 from stb;') + tdSql.error('select histogram(tag_int, "user_input", "[1,3,5,7]", 0),_C0 from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),abs(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),ceil(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),floor(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),round(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),acos(col_double) from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),max(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),min(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),first(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),last(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),top(col_double, 1) from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),sum(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),avg(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),count(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),stddev(col_double) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),twa(col_double) from stb;') + + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),histogram(col_int, "user_input", "[1,3,5,7]", 0) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),histogram(col_int, "linear_bin", \'{"start": -1, "width":5, "count":5, "infinity":false}\', 0) from stb;') + tdSql.error('select histogram(col_int, "user_input", "[1,3,5,7]", 0),histogram(col_int, "log_bin", \'{"start": 10, "factor":0.5, "count":5, "infinity":false}\', 0) from stb;') + + #select where condition + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int > 3;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int < 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int >= 3;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int <= 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int = 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int != 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int <> 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int > 5 and col_int <7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int >= 5 and col_int <=7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_int, "user_input", "[1,3,5,7,9,15]", 0) from tb where col_int between 5 and 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint > 3;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint < 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint >= 3;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint <= 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint = 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint != 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint <> 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint > 5 and col_tinyint <7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint >= 5 and col_tinyint <=7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_tinyint, "user_input", "[1,3,5,7,9,15]", 0) from ctb where col_tinyint between 5 and 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint > 3;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint < 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint >= 3;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint <= 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint = 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint != 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint <> 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint > 5 and col_bigint <7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":0}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint >= 5 and col_bigint <=7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where col_bigint between 5 and 7;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":0}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":1}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":0}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":0}'); + + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint > 0;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint < 2;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint >= 1;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint <= 1;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint = 1;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint != 2;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint <> 2;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint > 0 and tag_bigint < 2;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint >= 1 and tag_bigint <= 1;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_bigint, "user_input", "[1,3,5,7,9,15]", 0) from stb where tag_bigint between 0 and 2;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + #select session + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb session (col_timestamp, 1w);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb session (col_timestamp, 1d);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb session (col_timestamp, 1h);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb session (col_timestamp, 1m);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + #tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb session (col_timestamp, 1s);') + #tdSql.checkRows(16) + #tdSql.checkData(0, 1, "(0:10]:0"); + #tdSql.checkData(1, 1, "(0:10]:0"); + #tdSql.checkData(2, 1, "(0:10]:1"); + #tdSql.checkData(3, 1, "(0:10]:1"); + #tdSql.checkData(4, 1, "(0:10]:1"); + #tdSql.checkData(5, 1, "(0:10]:1"); + #tdSql.checkData(6, 1, "(0:10]:1"); + #tdSql.checkData(7, 1, "(0:10]:1"); + #tdSql.checkData(8, 1, "(0:10]:1"); + #tdSql.checkData(9, 1, "(0:10]:1"); + #tdSql.checkData(10, 1, "(0:10]:1"); + #tdSql.checkData(11, 1, "(0:10]:1"); + #tdSql.checkData(12, 1, "(0:10]:0"); + #tdSql.checkData(13, 1, "(0:10]:0"); + #tdSql.checkData(14, 1, "(0:10]:0"); + #tdSql.checkData(15, 1, "(0:10]:0"); + + #tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb session (col_timestamp, 1a);') + #tdSql.checkRows(16) + #tdSql.checkData(0, 1, "(0:10]:0"); + #tdSql.checkData(1, 1, "(0:10]:0"); + #tdSql.checkData(2, 1, "(0:10]:1"); + #tdSql.checkData(3, 1, "(0:10]:1"); + #tdSql.checkData(4, 1, "(0:10]:1"); + #tdSql.checkData(5, 1, "(0:10]:1"); + #tdSql.checkData(6, 1, "(0:10]:1"); + #tdSql.checkData(7, 1, "(0:10]:1"); + #tdSql.checkData(8, 1, "(0:10]:1"); + #tdSql.checkData(9, 1, "(0:10]:1"); + #tdSql.checkData(10, 1, "(0:10]:1"); + #tdSql.checkData(11, 1, "(0:10]:1"); + #tdSql.checkData(12, 1, "(0:10]:0"); + #tdSql.checkData(13, 1, "(0:10]:0"); + #tdSql.checkData(14, 1, "(0:10]:0"); + #tdSql.checkData(15, 1, "(0:10]:0"); + + #select state_window + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_timestamp);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_tinyint);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_smallint);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_int);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_bigint);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_bool);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_float);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_double);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_binary);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb state_window(col_nchar);') + + #select interval/sliding/fill + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1y);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1n);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1w);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1d);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1h);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1s);') + tdSql.checkRows(16) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(1, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(2, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(3, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(4, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(5, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(6, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(7, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(8, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(9, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(10, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(11, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(12, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(13, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(14, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(15, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1a);') + tdSql.checkRows(16) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(1, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(2, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(3, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(4, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(5, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(6, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(7, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(8, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(9, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(10, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(11, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(12, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(13, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(14, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(15, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1w) sliding(1w);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1d) sliding(1d);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1h) sliding(1h);') + tdSql.checkRows(1) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":10}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,10]", 0) from tb interval(1s) sliding(1s);') + tdSql.checkRows(16) + tdSql.checkData(0, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(1, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(2, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(3, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(4, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(5, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(6, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(7, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(8, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(9, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(10, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(11, 1, '{"lower_bin":0, "upper_bin":10, "count":1}'); + tdSql.checkData(12, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(13, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(14, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + tdSql.checkData(15, 1, '{"lower_bin":0, "upper_bin":10, "count":0}'); + + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb where col_timestamp > now - 1w and col_timestamp < now + 1w interval(1w) fill(NULL);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb where col_timestamp > now - 1d and col_timestamp < now + 1d interval(1d) fill(None);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb where col_timestamp > now - 1h and col_timestamp < now + 1h interval(1h) fill(Prev);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb where col_timestamp > now - 1m and col_timestamp < now + 1m interval(1m) fill(Next);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb where col_timestamp > now - 1s and col_timestamp < now + 1s interval(1s) fill(Linear);') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from tb where col_timestamp > now - 1a and col_timestamp < now + 1a interval(1a) fill(Value, 1);') + + #select group by + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_tinyint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_smallint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_int;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_bigint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_bool;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_float;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_binary;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by col_nchar;') + + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_tinyint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_smallint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_int;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_bigint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_bool;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_float;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_binary;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_nchar;') + + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tbname;') + + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_tinyint,col_tinyint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_smallint,col_smallint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_int,col_int;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_bigint,col_bigint;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_bool,col_bool;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_float,col_float;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_double,col_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_binary,col_binary;') + tdSql.error('select histogram(col_int, "user_input", "[0,10]", 0) from stb group by tag_nchar,col_nchar;') + + #select order by + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_timestamp;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_timestamp desc;') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_tinyint;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_tinyint desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_smallint;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_smallint desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_int;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_int desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_bigint;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_bigint desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_bool;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_bool desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_float;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_float desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_double desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_double desc;') + + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_tinyint;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_tinyint desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_smallint;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_smallint desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_int;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_int desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bigint;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bigint desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bool;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bool desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_float;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_float desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double desc;') + + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tbname;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tbname desc;') + + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_timestamp,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_timestamp,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_tinyint,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_tinyint,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_smallint,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_smallint,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_int,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_int,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bigint,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bigint,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bool,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_bool,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_float,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_float,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double,col_timestamp desc;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double,col_timestamp;') + tdSql.error('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by tag_double,col_timestamp desc;') + + #select limit/offset + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb limit 3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb limit 3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb limit 3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb limit 2,3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb limit 2,3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb limit 2,3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + #nested query + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from (select * from stb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from (select * from ctb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from (select * from tb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select histogram(val, "user_input", "[0,3,5,7,9,15]", 0) from (select col_int as val from stb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(val, "user_input", "[0,3,5,7,9,15]", 0) from (select col_int as val from ctb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select histogram(val, "user_input", "[0,3,5,7,9,15]", 0) from (select col_int as val from tb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select * from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select * from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb)') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select * from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb)') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb)') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb)') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select first(_c0) from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb);') + tdSql.checkRows(1) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.query('select first(_c0) from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb)') + tdSql.checkRows(1) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.query('select first(_c0) from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb)') + tdSql.checkRows(1) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + + tdSql.query('select last(_c0) from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb);') + tdSql.checkRows(1) + tdSql.checkData(0, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select last(_c0) from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb)') + tdSql.checkRows(1) + tdSql.checkData(0, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select last(_c0) from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb)') + tdSql.checkRows(1) + tdSql.checkData(0, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb limit 3);') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb limit 3)') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb limit 3)') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb) limit 3;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb) limit 3') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb) limit 3') + tdSql.checkRows(3) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb order by col_timestamp);') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb order by col_timestamp)') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + tdSql.query('select _c0 from (select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb order by col_timestamp)') + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + #join + tdSql.execute("create stable stb1 (col_timestamp timestamp, col_tinyint tinyint, col_smallint smallint, col_int int, col_bigint bigint, col_float float, col_double double, col_bool bool, col_binary binary(10), col_nchar nchar(10)) \ + tags(tag_timestamp timestamp, tag_tinyint tinyint, tag_smallint smallint, tag_int int, tag_bigint bigint, tag_float float, tag_double double, tag_bool bool, tag_binary binary(10), tag_nchar nchar(10));") + tdSql.execute("create table ctb1 using stb1 tags (now, 1, 1, 1, 1, 1.0, 1.0, true, 'abc', 'abc');") + tdSql.execute("create table tb1 (col_timestamp timestamp, col_tinyint tinyint, col_smallint smallint, col_int int, col_bigint bigint, col_float float, col_double double, col_bool bool, col_binary binary(10), col_nchar nchar(10));") + + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:00', -9, -9, -9, -9, -9.5, -9.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:01', -1, -1, -1, -1, -1.5, -1.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:02', 1, 1, 1, 1, 1.5, 1.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:03', 2, 2, 2, 2, 2.5, 2.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:04', 3, 3, 3, 3, 3.5, 3.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:05', 4, 4, 4, 4, 4.5, 4.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:06', 5, 5, 5, 5, 5.5, 5.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:07', 6, 6, 6, 6, 6.5, 6.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:08', 7, 7, 7, 7, 7.5, 7.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:09', 8, 8, 8, 8, 8.5, 8.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:10', 9, 9, 9, 9, 9.5, 9.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:11', 10, 10, 10, 10, 10.5, 10.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:12', 15, 15, 15, 15, 15.5, 15.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:13', 20, 20, 20, 20, 20.5, 20.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:14', 99, 99, 99, 99, 99.5, 99.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb1 values ('2022-01-01 00:00:15', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);") + + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:00', -9, -9, -9, -9, -9.5, -9.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:01', -1, -1, -1, -1, -1.5, -1.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:02', 1, 1, 1, 1, 1.5, 1.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:03', 2, 2, 2, 2, 2.5, 2.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:04', 3, 3, 3, 3, 3.5, 3.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:05', 4, 4, 4, 4, 4.5, 4.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:06', 5, 5, 5, 5, 5.5, 5.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:07', 6, 6, 6, 6, 6.5, 6.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:08', 7, 7, 7, 7, 7.5, 7.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:09', 8, 8, 8, 8, 8.5, 8.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:10', 9, 9, 9, 9, 9.5, 9.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:11', 10, 10, 10, 10, 10.5, 10.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:12', 15, 15, 15, 15, 15.5, 15.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:13', 20, 20, 20, 20, 20.5, 20.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:14', 99, 99, 99, 99, 99.5, 99.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb1 values ('2022-01-01 00:00:15', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);") + + tdSql.execute("create stable stb2 (col_timestamp timestamp, col_tinyint tinyint, col_smallint smallint, col_int int, col_bigint bigint, col_float float, col_double double, col_bool bool, col_binary binary(10), col_nchar nchar(10)) \ + tags(tag_timestamp timestamp, tag_tinyint tinyint, tag_smallint smallint, tag_int int, tag_bigint bigint, tag_float float, tag_double double, tag_bool bool, tag_binary binary(10), tag_nchar nchar(10));") + tdSql.execute("create table ctb2 using stb2 tags (now, 1, 1, 1, 1, 1.0, 1.0, true, 'abc', 'abc');") + tdSql.execute("create table tb2 (col_timestamp timestamp, col_tinyint tinyint, col_smallint smallint, col_int int, col_bigint bigint, col_float float, col_double double, col_bool bool, col_binary binary(10), col_nchar nchar(10));") + + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:00', -9, -9, -9, -9, -9.5, -9.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:01', -1, -1, -1, -1, -1.5, -1.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:02', 1, 1, 1, 1, 1.5, 1.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:03', 2, 2, 2, 2, 2.5, 2.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:04', 3, 3, 3, 3, 3.5, 3.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:05', 4, 4, 4, 4, 4.5, 4.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:06', 5, 5, 5, 5, 5.5, 5.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:07', 6, 6, 6, 6, 6.5, 6.5, true, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:08', 7, 7, 7, 7, 7.5, 7.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:09', 8, 8, 8, 8, 8.5, 8.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:10', 9, 9, 9, 9, 9.5, 9.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:11', 10, 10, 10, 10, 10.5, 10.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:12', 15, 15, 15, 15, 15.5, 15.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:13', 20, 20, 20, 20, 20.5, 20.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:14', 99, 99, 99, 99, 99.5, 99.5, false, 'abc', 'abc');") + tdSql.execute("insert into ctb2 values ('2022-01-01 00:00:15', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);") + + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:00', -9, -9, -9, -9, -9.5, -9.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:01', -1, -1, -1, -1, -1.5, -1.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:02', 1, 1, 1, 1, 1.5, 1.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:03', 2, 2, 2, 2, 2.5, 2.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:04', 3, 3, 3, 3, 3.5, 3.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:05', 4, 4, 4, 4, 4.5, 4.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:06', 5, 5, 5, 5, 5.5, 5.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:07', 6, 6, 6, 6, 6.5, 6.5, true, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:08', 7, 7, 7, 7, 7.5, 7.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:09', 8, 8, 8, 8, 8.5, 8.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:10', 9, 9, 9, 9, 9.5, 9.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:11', 10, 10, 10, 10, 10.5, 10.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:12', 15, 15, 15, 15, 15.5, 15.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:13', 20, 20, 20, 20, 20.5, 20.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:14', 99, 99, 99, 99, 99.5, 99.5, false, 'abc', 'abc');") + tdSql.execute("insert into tb2 values ('2022-01-01 00:00:15', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);") + + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from tb1, tb2 where tb1.col_timestamp = tb2.col_timestamp;'); + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from ctb1, ctb2 where ctb1.col_timestamp = ctb2.col_timestamp;'); + tdSql.checkRows(5) + tdSql.checkData(0, 0, '{"lower_bin":0, "upper_bin":3, "count":3}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":5, "upper_bin":7, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":7, "upper_bin":9, "count":2}'); + tdSql.checkData(4, 0, '{"lower_bin":9, "upper_bin":15, "count":2}'); + + #stable join will cause crash + #tdSql.query('select histogram(col_int, "user_input", "[0,3,5,7,9,15]", 0) from stb1, stb2 where stb1.col_timestamp = stb2.col_timestamp and stb1.tag_int = stb2.tag_int;'); + #tdSql.checkRows(5) + #tdSql.checkData(0, 0, "(0:3]:3"); + #tdSql.checkData(1, 0, "(3:5]:2"); + #tdSql.checkData(2, 0, "(5:7]:2"); + #tdSql.checkData(3, 0, "(7:9]:2"); + #tdSql.checkData(4, 0, "(9:15]:2"); + + #union all + tdSql.query('select histogram(col_int, \'user_input\', \'[1,3,5]\', 0) from tb1 union all select histogram(col_int, \'user_input\', \'[1,3,5]\', 0) from tb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_int, \'user_input\', \'[1,3,5]\', 0) from ctb1 union all select histogram(col_int, \'user_input\', \'[1,3,5]\', 0) from ctb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_int, \'user_input\', \'[1,3,5]\', 0) from stb1 union all select histogram(col_int, \'user_input\', \'[1,3,5]\', 0) from stb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_int, \'linear_bin\', \'{"start":1, "width":2, "count":2, "infinity":false}\', 0) from tb1 union all select histogram(col_int, \'linear_bin\', \'{"start":1, "width":2, "count":2, "infinity":false}\', 0) from tb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_int, \'linear_bin\', \'{"start":1, "width":2, "count":2, "infinity":false}\', 0) from ctb1 union all select histogram(col_int, \'linear_bin\', \'{"start":1, "width":2, "count":2, "infinity":false}\', 0) from ctb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.query('select histogram(col_int, \'linear_bin\', \'{"start":1, "width":2, "count":2, "infinity":false}\', 0) from stb1 union all select histogram(col_int, \'linear_bin\', \'{"start":1, "width":2, "count":2, "infinity":false}\', 0) from stb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(1, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":3, "count":2}'); + tdSql.checkData(3, 0, '{"lower_bin":3, "upper_bin":5, "count":2}'); + + tdSql.query('select histogram(col_int, \'log_bin\', \'{"start":1, "factor":2, "count":2, "infinity":false}\', 0) from tb1 union all select histogram(col_int, \'log_bin\', \'{"start":1, "factor":2, "count":2, "infinity":false}\', 0) from tb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.query('select histogram(col_int, \'log_bin\', \'{"start":1, "factor":2, "count":2, "infinity":false}\', 0) from ctb1 union all select histogram(col_int, \'log_bin\', \'{"start":1, "factor":2, "count":2, "infinity":false}\', 0) from ctb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.query('select histogram(col_int, \'log_bin\', \'{"start":1, "factor":2, "count":2, "infinity":false}\', 0) from stb1 union all select histogram(col_int, \'log_bin\', \'{"start":1, "factor":2, "count":2, "infinity":false}\', 0) from stb2;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(1, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + tdSql.checkData(2, 0, '{"lower_bin":1, "upper_bin":2, "count":1}'); + tdSql.checkData(3, 0, '{"lower_bin":2, "upper_bin":4, "count":2}'); + + + tdSql.execute('drop database db') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/hyperloglog.py b/tests/system-test/2-query/hyperloglog.py new file mode 100644 index 0000000000000000000000000000000000000000..35703e441dd3465054d9b7b451c651f906da7e45 --- /dev/null +++ b/tests/system-test/2-query/hyperloglog.py @@ -0,0 +1,361 @@ +import datetime + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + +ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ] + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __query_condition(self,tbname): + query_condition = [f"cast({col} as bigint)" for col in ALL_COL] + for num_col in NUM_COL: + query_condition.extend( + ( + f"{tbname}.{num_col}", + f"abs( {tbname}.{num_col} )", + f"acos( {tbname}.{num_col} )", + f"asin( {tbname}.{num_col} )", + f"atan( {tbname}.{num_col} )", + f"avg( {tbname}.{num_col} )", + f"ceil( {tbname}.{num_col} )", + f"cos( {tbname}.{num_col} )", + f"count( {tbname}.{num_col} )", + f"floor( {tbname}.{num_col} )", + f"log( {tbname}.{num_col}, {tbname}.{num_col})", + f"max( {tbname}.{num_col} )", + f"min( {tbname}.{num_col} )", + f"pow( {tbname}.{num_col}, 2)", + f"round( {tbname}.{num_col} )", + f"sum( {tbname}.{num_col} )", + f"sin( {tbname}.{num_col} )", + f"sqrt( {tbname}.{num_col} )", + f"tan( {tbname}.{num_col} )", + f"cast( {tbname}.{num_col} as timestamp)", + ) + ) + query_condition.extend((f"{num_col} + {any_col}" for any_col in ALL_COL)) + for char_col in CHAR_COL: + query_condition.extend( + ( + f"count({tbname}.{char_col})", + f"sum(cast({tbname}.{char_col}) as bigint)", + f"max(cast({tbname}.{char_col}) as bigint)", + f"min(cast({tbname}.{char_col}) as bigint)", + f"avg(cast({tbname}.{char_col}) as bigint)", + ) + ) + # query_condition.extend( + # ( + # 1010, + # ) + # ) + + return query_condition + + def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): + table_reference = tb_list[0] + join_condition = table_reference + join = "inner join" if INNER else "join" + for i in range(len(tb_list[1:])): + join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + + return join_condition + + def __where_condition(self, col=None, tbname=None, query_conditon=None): + if query_conditon and isinstance(query_conditon, str): + if query_conditon.startswith("count"): + query_conditon = query_conditon[6:-1] + elif query_conditon.startswith("max"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("sum"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("min"): + query_conditon = query_conditon[4:-1] + + if query_conditon: + return f" where {query_conditon} is not null" + if col in NUM_COL: + return f" where abs( {tbname}.{col} ) >= 0" + if col in CHAR_COL: + return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " + if col in BOOLEAN_COL: + return f" where {tbname}.{col} in (false, true) " + if col in TS_TYPE_COL or col in PRIMARY_COL: + return f" where cast( {tbname}.{col} as binary(16) ) is not null " + + return "" + + def __group_condition(self, col, having = None): + if isinstance(col, str): + if col.startswith("count"): + col = col[6:-1] + elif col.startswith("max"): + col = col[4:-1] + elif col.startswith("sum"): + col = col[4:-1] + elif col.startswith("min"): + col = col[4:-1] + return f" group by {col} having {having}" if having else f" group by {col} " + + def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]: + return + return f"select hyperloglog({select_clause}) from {from_clause} {where_condition} {group_condition}" + + @property + def __tb_list(self): + return [ + "ct1", + "ct4", + "t1", + "ct2", + "stb1", + ] + + def sql_list(self): + sqls = [] + __no_join_tblist = self.__tb_list + for tb in __no_join_tblist: + select_claus_list = self.__query_condition(tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition(col=select_claus) + where_claus = self.__where_condition(query_conditon=select_claus) + having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") + sqls.extend( + ( + self.__single_sql(select_claus, tb, where_claus, having_claus), + self.__single_sql(select_claus, tb,), + self.__single_sql(select_claus, tb, where_condition=where_claus), + self.__single_sql(select_claus, tb, group_condition=group_claus), + ) + ) + + # return filter(None, sqls) + return list(filter(None, sqls)) + + def __get_type(self, col): + if tdSql.cursor.istype(col, "BOOL"): + return "BOOL" + if tdSql.cursor.istype(col, "INT"): + return "INT" + if tdSql.cursor.istype(col, "BIGINT"): + return "BIGINT" + if tdSql.cursor.istype(col, "TINYINT"): + return "TINYINT" + if tdSql.cursor.istype(col, "SMALLINT"): + return "SMALLINT" + if tdSql.cursor.istype(col, "FLOAT"): + return "FLOAT" + if tdSql.cursor.istype(col, "DOUBLE"): + return "DOUBLE" + if tdSql.cursor.istype(col, "BINARY"): + return "BINARY" + if tdSql.cursor.istype(col, "NCHAR"): + return "NCHAR" + if tdSql.cursor.istype(col, "TIMESTAMP"): + return "TIMESTAMP" + if tdSql.cursor.istype(col, "JSON"): + return "JSON" + if tdSql.cursor.istype(col, "TINYINT UNSIGNED"): + return "TINYINT UNSIGNED" + if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"): + return "SMALLINT UNSIGNED" + if tdSql.cursor.istype(col, "INT UNSIGNED"): + return "INT UNSIGNED" + if tdSql.cursor.istype(col, "BIGINT UNSIGNED"): + return "BIGINT UNSIGNED" + + def spread_check(self): + sqls = self.sql_list() + tdLog.printNoPrefix("===step 1: curent case, must return query OK") + for i in range(len(sqls)): + tdLog.info(f"sql: {sqls[i]}") + tdSql.query(sqls[i]) + + def __test_current(self): + tdSql.query("select hyperloglog(ts) from ct1") + tdSql.checkRows(1) + tdSql.query("select hyperloglog(c1) from ct2") + tdSql.checkRows(1) + tdSql.query("select hyperloglog(c1) from ct4 group by c1") + tdSql.checkRows(self.rows + 3) + tdSql.query("select hyperloglog(c1) from ct4 group by c7") + tdSql.checkRows(3) + tdSql.query("select hyperloglog(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts") + tdSql.checkRows(1) + tdSql.checkData(0, 0, self.rows + 2) + tdSql.query("select hyperloglog(c1), c1 from stb1 group by c1") + for i in range(tdSql.queryRows): + tdSql.checkData(i, 0, 1) if tdSql.queryResult[i][1] is not None else tdSql.checkData(i, 0, 0) + + + + self.spread_check() + + def __test_error(self): + + tdLog.printNoPrefix("===step 0: err case, must return err") + tdSql.error( "select hyperloglog() from ct1" ) + tdSql.error( "select hyperloglog(c1, c2) from ct2" ) + tdSql.error( "select hyperloglog(1) from ct2" ) + tdSql.error( f"select hyperloglog({NUM_COL[0]}, {NUM_COL[1]}) from ct4" ) + tdSql.error( ''' select hyperloglog(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']) + from ct1 + where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null + group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] + having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' ) + + def all_test(self): + self.__test_error() + self.__test_current() + + def __create_tb(self): + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/join.py b/tests/system-test/2-query/join.py index 8fc131e58173faf31fcc4ffbc8fab08f6e937aea..140808d3874915b56cc3a0ee559e352a1a0589ae 100644 --- a/tests/system-test/2-query/join.py +++ b/tests/system-test/2-query/join.py @@ -36,17 +36,14 @@ class TDTestCase: query_condition.extend( ( f"{tbname}.{char_col}", - f"upper( {tbname}.{char_col} )", + # f"upper( {tbname}.{char_col} )", ) ) query_condition.extend( f"cast( {tbname}.{un_char_col} as binary(16) ) " for un_char_col in NUM_COL) - query_condition.extend( f"cast( {tbname}.{char_col} + {tbname}.{char_col_2} as binary(32) ) " for char_col_2 in CHAR_COL ) - query_condition.extend( f"cast( {tbname}.{char_col} + {tbname}.{un_char_col} as binary(32) ) " for un_char_col in NUM_COL ) for num_col in NUM_COL: query_condition.extend( ( - f"{tbname}.{num_col}", - f"sin( {tbname}.{num_col} )" + f"sin( {tbname}.{num_col} )", ) ) query_condition.extend( f"{tbname}.{num_col} + {tbname}.{num_col_1} " for num_col_1 in NUM_COL ) @@ -55,41 +52,115 @@ class TDTestCase: return query_condition - def __join_condition(self, tb_list, filter=PRIMARY_COL): - # sourcery skip: flip-comparison - if 1 == len(tb_list): - join_filter = f"{tb_list[0]}.{filter} = {tb_list[0]}.{filter} " - elif 2 == len(tb_list): - join_filter = f"{tb_list[0]}.{filter} = {tb_list[1]}.{filter} " - else: - join_filter = f"{tb_list[0]}.{filter} = {tb_list[1]}.{filter} " - for i in range(1, len(tb_list)-1 ): - join_filter += f"and {tb_list[i]}.{filter} = {tb_list[i+1]}.{filter}" - - return join_filter - - def __where_condition(self, col, tbname): + def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): + table_reference = tb_list[0] + join_condition = table_reference + join = "inner join" if INNER else "join" + for i in range(len(tb_list[1:])): + join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + + return join_condition + + def __where_condition(self, col=None, tbname=None, query_conditon=None): + if query_conditon and isinstance(query_conditon, str): + if query_conditon.startswith("count"): + query_conditon = query_conditon[6:-1] + elif query_conditon.startswith("max"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("sum"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("min"): + query_conditon = query_conditon[4:-1] + + if query_conditon: + return f" where {query_conditon} is not null" if col in NUM_COL: - return f" abs( {tbname}.{col} ) >= 0" - elif col in CHAR_COL: - return f" lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " - elif col in BOOLEAN_COL: - return f" {tbname}.{col} in (false, true) " - elif col in TS_TYPE_COL or col in PRIMARY_COL: - return f" cast( {tbname}.{col} as binary(16) ) is not null " - else: - return "" - - def __group_condition(self, tbname, col, having = ""): + return f" where abs( {tbname}.{col} ) >= 0" + if col in CHAR_COL: + return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " + if col in BOOLEAN_COL: + return f" where {tbname}.{col} in (false, true) " + if col in TS_TYPE_COL or col in PRIMARY_COL: + return f" where cast( {tbname}.{col} as binary(16) ) is not null " + + return "" + + def __group_condition(self, col, having = None): + if isinstance(col, str): + if col.startswith("count"): + col = col[6:-1] + elif col.startswith("max"): + col = col[4:-1] + elif col.startswith("sum"): + col = col[4:-1] + elif col.startswith("min"): + col = col[4:-1] return f" group by {col} having {having}" if having else f" group by {col} " - def __join_check(self, tblist, checkrows, join_flag=True): + def __gen_sql(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]: + return + return f"select {select_clause} from {from_clause} {where_condition} {group_condition}" + + @property + def __join_tblist(self): + return [ + # ["ct1", "ct2"], + ["ct1", "ct4"], + ["ct1", "t1"], + # ["ct2", "ct4"], + # ["ct2", "t1"], + # ["ct4", "t1"], + # ["ct1", "ct2", "ct4"], + # ["ct1", "ct2", "t1"], + # ["ct1", "ct4", "t1"], + # ["ct2", "ct4", "t1"], + # ["ct1", "ct2", "ct4", "t1"], + ] + + @property + def __sqls_list(self): + sqls = [] + __join_tblist = self.__join_tblist + for join_tblist in __join_tblist: + for join_tb in join_tblist: + select_claus_list = self.__query_condition(join_tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition( col=select_claus) + where_claus = self.__where_condition( query_conditon=select_claus ) + having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" ) + sqls.extend( + ( + # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist)), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ), + ) + ) + return list(filter(None, sqls)) + + def __join_check(self,): + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + for i in range(len(self.__sqls_list)): + tdSql.query(self.__sqls_list[i]) + # if i % 10 == 0 : + # tdLog.success(f"{i} sql is already executed success !") + + def __join_check_old(self, tblist, checkrows, join_flag=True): query_conditions = self.__query_condition(tblist[0]) join_condition = self.__join_condition(tb_list=tblist) if join_flag else " " for condition in query_conditions: where_condition = self.__where_condition(col=condition, tbname=tblist[0]) - group_having = self.__group_condition(tbname=tblist[0], col=condition, having=f"{condition} is not null " ) - group_no_having= self.__group_condition(tbname=tblist[0], col=condition ) + group_having = self.__group_condition(col=condition, having=f"{condition} is not null " ) + group_no_having= self.__group_condition(col=condition ) groups = ["", group_having, group_no_having] for group_condition in groups: if where_condition: @@ -116,23 +187,6 @@ class TDTestCase: tdSql.query(sql=sql) # tdSql.checkRows(checkrows) - - def __test_current(self): - # sourcery skip: extract-duplicate-method, inline-immediately-returned-variable - tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") - tblist_1 = ["ct1", "ct2"] - self.__join_check(tblist_1, 1) - tdLog.printNoPrefix(f"==========current sql condition check in {tblist_1} over==========") - tblist_2 = ["ct2", "ct4"] - self.__join_check(tblist_2, self.rows) - tdLog.printNoPrefix(f"==========current sql condition check in {tblist_2} over==========") - tblist_3 = ["t1", "ct4"] - self.__join_check(tblist_3, 1) - tdLog.printNoPrefix(f"==========current sql condition check in {tblist_3} over==========") - tblist_4 = ["t1", "ct1"] - self.__join_check(tblist_4, 1) - tdLog.printNoPrefix(f"==========current sql condition check in {tblist_4} over==========") - def __test_error(self): # sourcery skip: extract-duplicate-method, move-assign-in-block tdLog.printNoPrefix("==========err sql condition check , must return error==========") @@ -141,17 +195,17 @@ class TDTestCase: err_list_3 = ["ct1","ct4", "t1"] err_list_4 = ["ct2","ct4", "t1"] err_list_5 = ["ct1", "ct2","ct4", "t1"] - self.__join_check(err_list_1, -1) + self.__join_check_old(err_list_1, -1) tdLog.printNoPrefix(f"==========err sql condition check in {err_list_1} over==========") - self.__join_check(err_list_2, -1) + self.__join_check_old(err_list_2, -1) tdLog.printNoPrefix(f"==========err sql condition check in {err_list_2} over==========") - self.__join_check(err_list_3, -1) + self.__join_check_old(err_list_3, -1) tdLog.printNoPrefix(f"==========err sql condition check in {err_list_3} over==========") - self.__join_check(err_list_4, -1) + self.__join_check_old(err_list_4, -1) tdLog.printNoPrefix(f"==========err sql condition check in {err_list_4} over==========") - self.__join_check(err_list_5, -1) + self.__join_check_old(err_list_5, -1) tdLog.printNoPrefix(f"==========err sql condition check in {err_list_5} over==========") - self.__join_check(["ct2", "ct4"], -1, join_flag=False) + self.__join_check_old(["ct2", "ct4"], -1, join_flag=False) tdLog.printNoPrefix("==========err sql condition check in has no join condition over==========") tdSql.error( f"select c1, c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" ) @@ -172,7 +226,7 @@ class TDTestCase: def all_test(self): - self.__test_current() + self.__join_check() self.__test_error() diff --git a/tests/system-test/2-query/join2.py b/tests/system-test/2-query/join2.py new file mode 100644 index 0000000000000000000000000000000000000000..40da41eee76f7fe4be70a8217c06ac0f94fd8981 --- /dev/null +++ b/tests/system-test/2-query/join2.py @@ -0,0 +1,357 @@ +import datetime + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def __query_condition(self,tbname): + query_condition = [] + for char_col in CHAR_COL: + query_condition.extend( + ( + f"{tbname}.{char_col}", + # f"upper( {tbname}.{char_col} )", + ) + ) + query_condition.extend( f"cast( {tbname}.{un_char_col} as binary(16) ) " for un_char_col in NUM_COL) + for num_col in NUM_COL: + query_condition.extend( + ( + f"sin( {tbname}.{num_col} )", + ) + ) + query_condition.extend( f"{tbname}.{num_col} + {tbname}.{num_col_1} " for num_col_1 in NUM_COL ) + + query_condition.append(''' "test1234!@#$%^&*():'>= 0" + if col in CHAR_COL: + return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " + if col in BOOLEAN_COL: + return f" where {tbname}.{col} in (false, true) " + if col in TS_TYPE_COL or col in PRIMARY_COL: + return f" where cast( {tbname}.{col} as binary(16) ) is not null " + + return "" + + def __group_condition(self, col, having = None): + if isinstance(col, str): + if col.startswith("count"): + col = col[6:-1] + elif col.startswith("max"): + col = col[4:-1] + elif col.startswith("sum"): + col = col[4:-1] + elif col.startswith("min"): + col = col[4:-1] + return f" group by {col} having {having}" if having else f" group by {col} " + + def __gen_sql(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]: + return + return f"select {select_clause} from {from_clause} {where_condition} {group_condition}" + + @property + def __join_tblist(self): + return [ + # ["ct1", "ct2"], + # ["ct1", "ct4"], + # ["ct1", "t1"], + ["ct2", "ct4"], + # ["ct2", "t1"], + ["ct4", "t1"], + # ["ct1", "ct2", "ct4"], + # ["ct1", "ct2", "t1"], + # ["ct1", "ct4", "t1"], + # ["ct2", "ct4", "t1"], + # ["ct1", "ct2", "ct4", "t1"], + ] + + @property + def __sqls_list(self): + sqls = [] + __join_tblist = self.__join_tblist + for join_tblist in __join_tblist: + for join_tb in join_tblist: + select_claus_list = self.__query_condition(join_tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition( col=select_claus) + where_claus = self.__where_condition( query_conditon=select_claus ) + having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" ) + sqls.extend( + ( + # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist)), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ), + # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ), + self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ), + ) + ) + return list(filter(None, sqls)) + + def __join_check(self,): + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + for i in range(len(self.__sqls_list)): + tdSql.query(self.__sqls_list[i]) + # if i % 10 == 0 : + # tdLog.success(f"{i} sql is already executed success !") + + def __join_check_old(self, tblist, checkrows, join_flag=True): + query_conditions = self.__query_condition(tblist[0]) + join_condition = self.__join_condition(tb_list=tblist) if join_flag else " " + for condition in query_conditions: + where_condition = self.__where_condition(col=condition, tbname=tblist[0]) + group_having = self.__group_condition(col=condition, having=f"{condition} is not null " ) + group_no_having= self.__group_condition(col=condition ) + groups = ["", group_having, group_no_having] + for group_condition in groups: + if where_condition: + sql = f" select {condition} from {tblist[0]},{tblist[1]} where {join_condition} and {where_condition} {group_condition} " + else: + sql = f" select {condition} from {tblist[0]},{tblist[1]} where {join_condition} {group_condition} " + + if not join_flag : + tdSql.error(sql=sql) + break + if len(tblist) == 2: + if "ct1" in tblist or "t1" in tblist: + self.__join_current(sql, checkrows) + elif where_condition or "not null" in group_condition: + self.__join_current(sql, checkrows + 2 ) + elif group_condition: + self.__join_current(sql, checkrows + 3 ) + else: + self.__join_current(sql, checkrows + 5 ) + if len(tblist) > 2 or len(tblist) < 1: + tdSql.error(sql=sql) + + def __join_current(self, sql, checkrows): + tdSql.query(sql=sql) + # tdSql.checkRows(checkrows) + + def __test_error(self): + # sourcery skip: extract-duplicate-method, move-assign-in-block + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + err_list_1 = ["ct1","ct2", "ct4"] + err_list_2 = ["ct1","ct2", "t1"] + err_list_3 = ["ct1","ct4", "t1"] + err_list_4 = ["ct2","ct4", "t1"] + err_list_5 = ["ct1", "ct2","ct4", "t1"] + self.__join_check_old(err_list_1, -1) + tdLog.printNoPrefix(f"==========err sql condition check in {err_list_1} over==========") + self.__join_check_old(err_list_2, -1) + tdLog.printNoPrefix(f"==========err sql condition check in {err_list_2} over==========") + self.__join_check_old(err_list_3, -1) + tdLog.printNoPrefix(f"==========err sql condition check in {err_list_3} over==========") + self.__join_check_old(err_list_4, -1) + tdLog.printNoPrefix(f"==========err sql condition check in {err_list_4} over==========") + self.__join_check_old(err_list_5, -1) + tdLog.printNoPrefix(f"==========err sql condition check in {err_list_5} over==========") + self.__join_check_old(["ct2", "ct4"], -1, join_flag=False) + tdLog.printNoPrefix("==========err sql condition check in has no join condition over==========") + + tdSql.error( f"select c1, c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" ) + tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{INT_COL}=ct4.{INT_COL}" ) + tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{TS_COL}=ct4.{TS_COL}" ) + tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{TS_COL}" ) + tdSql.error( f"select ct2.c1, ct1.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" ) + tdSql.error( f"select ct2.c1, ct4.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and c1 is not null " ) + tdSql.error( f"select ct2.c1, ct4.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and ct1.c1 is not null " ) + + + tbname = ["ct1", "ct2", "ct4", "t1"] + + # for tb in tbname: + # for errsql in self.__join_err_check(tb): + # tdSql.error(sql=errsql) + # tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__join_check() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/last.py b/tests/system-test/2-query/last.py index b491679c627b5bd65c1d4c67ed16b31c792d8a08..4ef13e9142f3a2ebc3ef55f6a2316fd6433908f3 100644 --- a/tests/system-test/2-query/last.py +++ b/tests/system-test/2-query/last.py @@ -170,7 +170,96 @@ class TDTestCase: tdSql.query("select last(col9) from db.stb_1") tdSql.checkRows(1) tdSql.checkData(0, 0, '涛思数据10') + tdSql.query("select last(col1,col2,col3) from stb_1") + tdSql.checkData(0,2,10) + tdSql.query("select last(*) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 10) + tdSql.query("select last(*) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 10) + tdSql.query("select last(col1) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col1) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col2) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col2) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col3) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col3) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col4) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col4) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col11) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col11) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col12) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col12) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col13) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col13) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col14) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col14) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.query("select last(col5) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 9.1) + tdSql.query("select last(col5) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 9.1) + tdSql.query("select last(col6) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 9.1) + tdSql.query("select last(col6) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 9.1) + tdSql.query("select last(col7) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, True) + tdSql.query("select last(col7) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, True) + tdSql.query("select last(col8) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'taosdata10') + tdSql.query("select last(col8) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'taosdata10') + tdSql.query("select last(col9) from stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, '涛思数据10') + tdSql.query("select last(col9) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, '涛思数据10') + tdSql.query("select last(col1,col2,col3) from stb") + tdSql.checkData(0,2,10) + tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') @@ -322,7 +411,12 @@ class TDTestCase: tdSql.query("select last(col9) from db.ntb") tdSql.checkRows(1) tdSql.checkData(0, 0, '涛思数据10') - + tdSql.query("select last(col1,col2,col3) from ntb") + tdSql.checkData(0,2,10) + + tdSql.error("select col1 from stb where last(col9)='涛思数据10'") + tdSql.error("select col1 from ntb where last(col9)='涛思数据10'") + tdSql.error("select col1 from stb_1 where last(col9)='涛思数据10'") def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/2-query/mavg.py b/tests/system-test/2-query/mavg.py new file mode 100644 index 0000000000000000000000000000000000000000..1d929646159d36a14b5ce6e9e95e1b02ad0be43a --- /dev/null +++ b/tests/system-test/2-query/mavg.py @@ -0,0 +1,680 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re +import taos + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def mavg_query_form(self, sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + ''' + mavg function: + + :param sel: string, must be "select", required parameters; + :param func: string, in this case must be "mavg(", otherwise return other function, required parameters; + :param col: string, column name, required parameters; + :param m_comm: string, comma between col and k , required parameters; + :param k: int/float,the width of the sliding window, [1,100], required parameters; + :param r_comm: string, must be ")", use with "(" in func, required parameters; + :param alias: string, result column another name,or add other funtion; + :param fr: string, must be "from", required parameters; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :return: mavg query statement,default: select mavg(c1, 1) from t1 + ''' + + return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}" + + def checkmavg(self,sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + # print(self.mavg_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition)) + line = sys._getframe().f_back.f_lineno + + if not all([sel , func , col , m_comm , k , r_comm , fr , table_expr]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + sql = "select * from t1" + collist = tdSql.getColNameList(sql) + + if not isinstance(col, str): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if len([x for x in col.split(",") if x.strip()]) != 1: + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + col = col.replace(",", "").replace(" ", "") + + if any([re.compile('^[a-zA-Z]{1}.*$').match(col) is None , not col.replace(".","").isalnum()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + # if all(["," in col , len(col.split(",")) != 2]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.mavg_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # + # if ("," in col): + # if (not col.split(",")[0].strip()) ^ (not col.split(",")[1].strip()): + # col = col.strip().split(",")[0] if not col.split(",")[1].strip() else col.strip().split(",")[1] + # else: + # print(f"case in {line}: ", end='') + # return tdSql.error(self.mavg_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # pass + + if '.' in col: + if any([col.split(".")[0] not in table_expr, col.split(".")[1] not in collist]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if "." not in col: + if col not in collist: + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + colname = col if "." not in col else col.split(".")[1] + col_index = collist.index(colname) + if any([tdSql.cursor.istype(col_index, "TIMESTAMP"), tdSql.cursor.istype(col_index, "BOOL")]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if any([tdSql.cursor.istype(col_index, "BINARY") , tdSql.cursor.istype(col_index,"NCHAR")]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if any( [func != "mavg(" , r_comm != ")" , fr != "from", sel != "select"]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["(" not in table_expr, "stb" in table_expr, "group" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if "order by tbname" in condition.lower(): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["group" in condition.lower(), "tbname" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + alias_list = ["tbname", "_c0", "st", "ts"] + if all([alias, "," not in alias, not alias.isalnum()]): + # actually, column alias also support "_", but in this case,forbidden that。 + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all([alias, "," in alias]): + if all(parm != alias.lower().split(",")[1].strip() for parm in alias_list): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + condition_exception = [ "~", "^", "insert", "distinct", + "count", "avg", "twa", "irate", "sum", "stddev", "leastquares", + "min", "max", "first", "last", "top", "bottom", "percentile", + "apercentile", "last_row", "interp", "diff", "derivative", + "spread", "ceil", "floor", "round", "interval", "fill", "slimit", "soffset"] + if "union" not in condition.lower(): + if any(parm in condition.lower().strip() for parm in condition_exception): + + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if not any([isinstance(k, int) , isinstance(k, float)]) : + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + if not(1 <= k < 1001): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + k = int(k // 1) + pre_sql = re.sub("mavg\([a-z0-9 .,]*\)", f"count({col})", self.mavg_query_form( + col=col, table_expr=table_expr, condition=condition + )) + tdSql.query(pre_sql) + + if tdSql.queryRows == 0: + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + if "group" in condition: + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition) + + tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}") + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_mavg = np.convolve(pre_data, np.ones(k), "valid")/k + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for j in range(len(pre_mavg)): + print(f"case in {line}:", end='') + tdSql.checkData(pre_row+j, 0, pre_mavg[j]) + pre_row += len(pre_mavg) + return + elif "union" in condition: + union_sql_0 = self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + union_mavg_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + union_mavg_1 = tdSql.queryResult + + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if i < row_union_0: + tdSql.checkData(i, 0, union_mavg_0[i][0]) + else: + tdSql.checkData(i, 0, union_mavg_1[i-row_union_0][0]) + return + + else: + tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + # print(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + if not tdSql.queryResult: + pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + + pre_mavg = pre_mavg = np.convolve(pre_result, np.ones(k), "valid")[offset_val:]/k + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + tdSql.checkData(i, 0, pre_mavg[i]) + + pass + + def mavg_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checkmavg() + case2 = {"col": "c2"} + self.checkmavg(**case2) + case3 = {"col": "c5"} + self.checkmavg(**case3) + case4 = {"col": "c7"} + self.checkmavg(**case4) + case5 = {"col": "c8"} + self.checkmavg(**case5) + case6 = {"col": "c9"} + self.checkmavg(**case6) + + # # case7~8: nested query + # case7 = {"table_expr": "(select c1 from stb1)"} + # self.checkmavg(**case7) + # case8 = {"table_expr": "(select mavg(c1, 1) c1 from stb1 group by tbname)"} + # self.checkmavg(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checkmavg(**case9) + # case10 = {"alias": ", _c0"} + # self.checkmavg(**case10) + # case11 = {"alias": ", st1"} + # self.checkmavg(**case11) + # case12 = {"alias": ", c1"} + # self.checkmavg(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checkmavg(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checkmavg(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checkmavg(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checkmavg(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checkmavg(**case17) + # # case18~19: with group by + # case19 = { + # "table_expr": "stb1", + # "condition": "partition by tbname" + # } + # self.checkmavg(**case19) + + # case20~21: with order by + # case20 = {"condition": "order by ts"} + # self.checkmavg(**case20) + #case21 = { + # "table_expr": "stb1", + # "condition": "group by tbname order by tbname" + #} + #self.checkmavg(**case21) + + # # case22: with union + # case22 = { + # "condition": "union all select mavg( c1 , 1 ) from t2" + # } + # self.checkmavg(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checkmavg(**case23) + + # case24: value k range[1, 100], can be int or float, k = floor(k) + case24 = {"k": 3} + self.checkmavg(**case24) + case25 = {"k": 2.999} + self.checkmavg(**case25) + case26 = {"k": 1000} + self.checkmavg(**case26) + + pass + + def mavg_error_query(self) -> None : + # unusual test + + # form test + err1 = {"col": ""} + self.checkmavg(**err1) # no col + err2 = {"sel": ""} + self.checkmavg(**err2) # no select + err3 = {"func": "mavg", "col": "", "m_comm": "", "k": "", "r_comm": ""} + self.checkmavg(**err3) # no mavg condition: select mavg from + err4 = {"col": "", "m_comm": "", "k": ""} + self.checkmavg(**err4) # no mavg condition: select mavg() from + err5 = {"func": "mavg", "r_comm": ""} + self.checkmavg(**err5) # no brackets: select mavg col, k from + err6 = {"fr": ""} + self.checkmavg(**err6) # no from + err7 = {"k": ""} + self.checkmavg(**err7) # no k + err8 = {"table_expr": ""} + self.checkmavg(**err8) # no table_expr + + # err9 = {"col": "st1"} + # self.checkmavg(**err9) # col: tag + err10 = {"col": 1} + self.checkmavg(**err10) # col: value + err11 = {"col": "NULL"} + self.checkmavg(**err11) # col: NULL + err12 = {"col": "%_"} + self.checkmavg(**err12) # col: %_ + err13 = {"col": "c3"} + self.checkmavg(**err13) # col: timestamp col + err14 = {"col": "_c0"} + self.checkmavg(**err14) # col: Primary key + err15 = {"col": "avg(c1)"} + self.checkmavg(**err15) # expr col + err16 = {"col": "c4"} + self.checkmavg(**err16) # binary col + err17 = {"col": "c10"} + self.checkmavg(**err17) # nchar col + err18 = {"col": "c6"} + self.checkmavg(**err18) # bool col + err19 = {"col": "'c1'"} + self.checkmavg(**err19) # col: string + err20 = {"col": None} + self.checkmavg(**err20) # col: None + err21 = {"col": "''"} + self.checkmavg(**err21) # col: '' + err22 = {"col": "tt1.c1"} + self.checkmavg(**err22) # not table_expr col + err23 = {"col": "t1"} + self.checkmavg(**err23) # tbname + err24 = {"col": "stb1"} + self.checkmavg(**err24) # stbname + err25 = {"col": "db"} + self.checkmavg(**err25) # datbasename + err26 = {"col": "True"} + self.checkmavg(**err26) # col: BOOL 1 + err27 = {"col": True} + self.checkmavg(**err27) # col: BOOL 2 + err28 = {"col": "*"} + self.checkmavg(**err28) # col: all col + err29 = {"func": "mavg[", "r_comm": "]"} + self.checkmavg(**err29) # form: mavg[col, k] + err30 = {"func": "mavg{", "r_comm": "}"} + self.checkmavg(**err30) # form: mavg{col, k} + err31 = {"col": "[c1]"} + self.checkmavg(**err31) # form: mavg([col], k) + err32 = {"col": "c1, c2"} + self.checkmavg(**err32) # form: mavg(col, col2, k) + err33 = {"col": "c1, 2"} + self.checkmavg(**err33) # form: mavg(col, k1, k2) + err34 = {"alias": ", count(c1)"} + self.checkmavg(**err34) # mix with aggregate function 1 + err35 = {"alias": ", avg(c1)"} + self.checkmavg(**err35) # mix with aggregate function 2 + err36 = {"alias": ", min(c1)"} + self.checkmavg(**err36) # mix with select function 1 + err37 = {"alias": ", top(c1, 5)"} + self.checkmavg(**err37) # mix with select function 2 + err38 = {"alias": ", spread(c1)"} + self.checkmavg(**err38) # mix with calculation function 1 + err39 = {"alias": ", diff(c1)"} + self.checkmavg(**err39) # mix with calculation function 2 + # err40 = {"alias": "+ 2"} + # self.checkmavg(**err40) # mix with arithmetic 1 + #tdSql.query(" select mavg( c1 , 1 ) + 2 from t1 ") + err41 = {"alias": "+ avg(c1)"} + self.checkmavg(**err41) # mix with arithmetic 2 + err42 = {"alias": ", c1"} + self.checkmavg(**err42) # mix with other col + # err43 = {"table_expr": "stb1"} + # self.checkmavg(**err43) # select stb directly + err44 = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + self.checkmavg(**err44) # stb join + err45 = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + self.checkmavg(**err45) # interval + err46 = { + "table_expr": "t1", + "condition": "group by c6" + } + self.checkmavg(**err46) # group by normal col + err47 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 " + } + # self.checkmavg(**err47) # with slimit + err48 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + # self.checkmavg(**err48) # with soffset + err49 = {"k": "2021-01-01 00:00:00.000"} + self.checkmavg(**err49) # k: timestamp + err50 = {"k": False} + self.checkmavg(**err50) # k: False + err51 = {"k": "%"} + self.checkmavg(**err51) # k: special char + err52 = {"k": ""} + self.checkmavg(**err52) # k: "" + err53 = {"k": None} + self.checkmavg(**err53) # k: None + err54 = {"k": "NULL"} + self.checkmavg(**err54) # k: null + err55 = {"k": "binary(4)"} + self.checkmavg(**err55) # k: string + err56 = {"k": "c1"} + self.checkmavg(**err56) # k: sring,col name + err57 = {"col": "c1, 1, c2"} + self.checkmavg(**err57) # form: mavg(col1, k1, col2, k2) + err58 = {"col": "c1 cc1"} + self.checkmavg(**err58) # form: mavg(col newname, k) + err59 = {"k": "'1'"} + # self.checkmavg(**err59) # formL mavg(colm, "1") + err60 = {"k": "-1-(-2)"} + # self.checkmavg(**err60) # formL mavg(colm, -1-2) + err61 = {"k": 1001} + self.checkmavg(**err61) # k: right out of [1, 1000] + err62 = {"k": -1} + self.checkmavg(**err62) # k: negative number + err63 = {"k": 0} + self.checkmavg(**err63) # k: 0 + err64 = {"k": 2**63-1} + self.checkmavg(**err64) # k: max(bigint) + err65 = {"k": 1-2**63} + # self.checkmavg(**err65) # k: min(bigint) + err66 = {"k": -2**63} + self.checkmavg(**err66) # k: NULL + err67 = {"k": 0.999999} + self.checkmavg(**err67) # k: left out of [1, 1000] + err68 = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" # order by tbname not supported + } + self.checkmavg(**err68) + + pass + + def mavg_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def mavg_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def mavg_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 2 + self.mavg_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.mavg_current_query() + self.mavg_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.mavg_current_query() + self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + # self.mavg_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # self.mavg_current_query() + # self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + # self.mavg_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + # self.mavg_current_query() + # self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.mavg_test_table(tbnum) + self.mavg_test_data(tbnum, per_table_rows, nowtime) + self.mavg_current_query() + self.mavg_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.mavg_current_query() + self.mavg_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.mavg_current_query() + self.mavg_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.mavg_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/nestedQuery_str.py b/tests/system-test/2-query/nestedQuery_str.py new file mode 100755 index 0000000000000000000000000000000000000000..8214c98c5cc8526874db5f40df22f8e587ea36f4 --- /dev/null +++ b/tests/system-test/2-query/nestedQuery_str.py @@ -0,0 +1,5753 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import os +import time +import taos +import subprocess +from faker import Faker +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes +from util.dnodes import * + +class TDTestCase: + updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.testcasePath = os.path.split(__file__)[0] + self.testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.num = 10 + self.fornum = 5 + + self.db_nest = "nest" + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + # regular column select + #q_select= ['ts' , '*' , 'q_int', 'q_bigint' , 'q_bigint' , 'q_smallint' , 'q_tinyint' , 'q_bool' , 'q_binary' , 'q_nchar' ,'q_float' , 'q_double' ,'q_ts '] + self.q_select= ['ts' , 'q_int', 'q_bigint' , 'q_bigint' , 'q_smallint' , 'q_tinyint' , 'q_bool' , 'q_binary' , 'q_nchar' ,'q_float' , 'q_double' ,'q_ts ', 'q_int_null ', 'q_bigint_null ' , 'q_bigint_null ' , 'q_smallint_null ' , 'q_tinyint_null ' , 'q_bool_null ' , 'q_binary_null ' , 'q_nchar_null ' ,'q_float_null ' , 'q_double_null ' ,'q_ts_null '] + + # tag column select + #t_select= ['*' , 'loc' ,'t_int', 't_bigint' , 't_bigint' , 't_smallint' , 't_tinyint' , 't_bool' , 't_binary' , 't_nchar' ,'t_float' , 't_double' ,'t_ts '] + self.t_select= ['loc','t_int', 't_bigint' , 't_bigint' , 't_smallint' , 't_tinyint' , 't_bool' , 't_binary' , 't_nchar' ,'t_float' , 't_double' ,'t_ts '] + + # regular and tag column select + self.qt_select= self.q_select + self.t_select + + # distinct regular column select + self.dq_select= ['distinct q_int', 'distinct q_bigint' , 'distinct q_smallint' , 'distinct q_tinyint' , + 'distinct q_bool' , 'distinct q_binary' , 'distinct q_nchar' ,'distinct q_float' , 'distinct q_double' ,'distinct q_ts '] + + # distinct tag column select + self.dt_select= ['distinct loc', 'distinct t_int', 'distinct t_bigint' , 'distinct t_smallint' , 'distinct t_tinyint' , + 'distinct t_bool' , 'distinct t_binary' , 'distinct t_nchar' ,'distinct t_float' , 'distinct t_double' ,'distinct t_ts '] + + # distinct regular and tag column select + self.dqt_select= self.dq_select + self.dt_select + + # special column select + self.s_r_select= ['_c0', '_rowts' , '_C0' ] + self.s_s_select= ['tbname' , '_rowts' , '_c0', '_C0' ] + self.unionall_or_union= [ ' union ' , ' union all ' ] + + # regular column where + self.q_where = ['ts < now +1s','q_bigint >= -9223372036854775807 and q_bigint <= 9223372036854775807', 'q_int <= 2147483647 and q_int >= -2147483647', + 'q_smallint >= -32767 and q_smallint <= 32767','q_tinyint >= -127 and q_tinyint <= 127','q_float >= -1.7E308 and q_float <= 1.7E308', + 'q_double >= -1.7E308 and q_double <= 1.7E308', 'q_binary like \'binary%\' or q_binary = \'0\' ' , 'q_nchar like \'nchar%\' or q_nchar = \'0\' ' , + 'q_bool = true or q_bool = false' , 'q_bool in (0 , 1)' , 'q_bool in ( true , false)' , 'q_bool = 0 or q_bool = 1', + 'q_bigint between -9223372036854775807 and 9223372036854775807',' q_int between -2147483647 and 2147483647','q_smallint between -32767 and 32767', + 'q_bigint not between 9223372036854775807 and -9223372036854775807','q_int not between 2147483647 and -2147483647','q_smallint not between 32767 and -32767', + 'q_tinyint between -127 and 127 ','q_float >= -3.4E38 ','q_float <= 3.4E38 ','q_double >= -1.7E308 ', + 'q_double <= 1.7E308 ','q_float between -3.4E38 and 3.4E38 ','q_double between -1.7E308 and 1.7E308 ' ,'q_float not between 3.4E38 and -3.4E38 ','q_double not between 1.7E308 and -1.7E308 ', + 'q_float is not null ' ,'q_double is not null ' ,'q_binary match \'binary\' ','q_binary nmatch \'binarynchar\' ','q_nchar match \'nchar\' ','q_nchar nmatch \'binarynchar\' ', + 'q_binary like \'binary%\' ','(q_binary like \'binary%\' or q_nchar = \'0\' or q_binary = \'binary_\' ) ','q_nchar like \'nchar%\' ','(q_nchar like \'nchar%\' or q_binary = \'0\' or q_nchar = \'nchar_\' ) ',] + #TD-6201 ,'q_bool between 0 and 1' + + # regular column where for test union,join + self.q_u_where = ['t1.ts < now +1s' , 't2.ts < now +1s','t1.q_bigint >= -9223372036854775807 and t1.q_bigint <= 9223372036854775807 and t2.q_bigint >= -9223372036854775807 and t2.q_bigint <= 9223372036854775807', + 't1.q_int <= 2147483647 and t1.q_int >= -2147483647 and t2.q_int <= 2147483647 and t2.q_int >= -2147483647', + 't1.q_smallint >= -32767 and t1.q_smallint <= 32767 and t2.q_smallint >= -32767 and t2.q_smallint <= 32767', + 't1.q_tinyint >= -127 and t1.q_tinyint <= 127 and t2.q_tinyint >= -127 and t2.q_tinyint <= 127', + 't1.q_float >= - 1.7E308 and t1.q_float <= 1.7E308 and t2.q_float >= - 1.7E308 and t2.q_float <= 1.7E308', + 't1.q_double >= - 1.7E308 and t1.q_double <= 1.7E308 and t2.q_double >= - 1.7E308 and t2.q_double <= 1.7E308', + 't1.q_binary like \'binary%\' and t2.q_binary like \'binary%\' ' , + 't1.q_nchar like \'nchar%\' and t2.q_nchar like \'nchar%\' ' , + 't1.q_bool in (0 , 1) and t2.q_bool in (0 , 1)' , 't1.q_bool in ( true , false) and t2.q_bool in ( true , false)' , + 't1.q_bigint between -9223372036854775807 and 9223372036854775807 and t2.q_bigint between -9223372036854775807 and 9223372036854775807', + 't1.q_int between -2147483647 and 2147483647 and t2.q_int between -2147483647 and 2147483647', + 't1.q_smallint between -32767 and 32767 and t2.q_smallint between -32767 and 32767', + 't1.q_tinyint between -127 and 127 and t2.q_tinyint between -127 and 127 ','t1.q_float between -1.7E308 and 1.7E308 and t2.q_float between -1.7E308 and 1.7E308', + 't1.q_double between -1.7E308 and 1.7E308 and t2.q_double between -1.7E308 and 1.7E308', + 't1.q_bigint not between 9223372036854775807 and -9223372036854775807 and t2.q_bigint not between 9223372036854775807 and -9223372036854775807', + 't1.q_int not between 2147483647 and -2147483647 and t2.q_int not between 2147483647 and -2147483647', + 't1.q_smallint not between 32767 and -32767 and t2.q_smallint not between 32767 and -32767', + 't1.q_tinyint not between 127 and -127 and t2.q_tinyint not between 127 and -127 ','t1.q_float not between -1.7E308 and -1.7E308 and t2.q_float not between 1.7E308 and -1.7E308', + 't1.q_double not between 1.7E308 and -1.7E308 and t2.q_double not between 1.7E308 and -1.7E308'] + #TD-6201 ,'t1.q_bool between 0 and 1 or t2.q_bool between 0 and 1'] + #'t1.q_bool = true and t1.q_bool = false and t2.q_bool = true and t2.q_bool = false' , 't1.q_bool = 0 and t1.q_bool = 1 and t2.q_bool = 0 and t2.q_bool = 1' , + + self.q_u_or_where = ['(t1.q_binary like \'binary%\' or t1.q_binary = \'0\' or t2.q_binary like \'binary%\' or t2.q_binary = \'0\' )' , + '(t1.q_nchar like \'nchar%\' or t1.q_nchar = \'0\' or t2.q_nchar like \'nchar%\' or t2.q_nchar = \'0\' )' , '(t1.q_bool = true or t1.q_bool = false or t2.q_bool = true or t2.q_bool = false)' , + '(t1.q_bool in (0 , 1) or t2.q_bool in (0 , 1))' , '(t1.q_bool in ( true , false) or t2.q_bool in ( true , false))' , '(t1.q_bool = 0 or t1.q_bool = 1 or t2.q_bool = 0 or t2.q_bool = 1)' , + '(t1.q_bigint between -9223372036854775807 and 9223372036854775807 or t2.q_bigint between -9223372036854775807 and 9223372036854775807)', + '(t1.q_int between -2147483647 and 2147483647 or t2.q_int between -2147483647 and 2147483647)', + '(t1.q_smallint between -32767 and 32767 or t2.q_smallint between -32767 and 32767)', + '(t1.q_tinyint between -127 and 127 or t2.q_tinyint between -127 and 127 )','(t1.q_float between -1.7E308 and 1.7E308 or t2.q_float between -1.7E308 and 1.7E308)', + '(t1.q_double between -1.7E308 and 1.7E308 or t2.q_double between -1.7E308 and 1.7E308)'] + + # tag column where + self.t_where = ['ts < now +1s','t_bigint >= -9223372036854775807 and t_bigint <= 9223372036854775807','t_int <= 2147483647 and t_int >= -2147483647', + 't_smallint >= -32767 and t_smallint <= 32767','q_tinyint >= -127 and t_tinyint <= 127','t_float >= -1.7E308 and t_float <= 1.7E308', + 't_double >= -1.7E308 and t_double <= 1.7E308', 't_binary like \'binary%\' or t_binary = \'0\' ' , 't_nchar like \'nchar%\' or t_nchar = \'0\'' , + 't_bool = true or t_bool = false' , 't_bool in (0 , 1)' , 't_bool in ( true , false)' , 't_bool = 0 or t_bool = 1', + 't_bigint between -9223372036854775807 and 9223372036854775807',' t_int between -2147483647 and 2147483647','t_smallint between -32767 and 32767', + 't_tinyint between -127 and 127 ','t_float between -1.7E308 and 1.7E308','t_double between -1.7E308 and 1.7E308', + 't_binary match \'binary\' ','t_binary nmatch \'binarynchar\' ','t_nchar match \'nchar\' ','t_nchar nmatch \'binarynchar\' ', + 't_binary like \'binary%\' ','t_nchar like \'nchar%\' ','(t_binary like \'binary%\' or t_nchar = \'0\' ) ','(t_nchar like \'nchar%\' or t_binary = \'0\' ) ',] + #TD-6201,'t_bool between 0 and 1' + + # tag column where for test union,join | this is not support + self.t_u_where = ['t1.ts < now +1s' , 't2.ts < now +1s','t1.t_bigint >= -9223372036854775807 and t1.t_bigint <= 9223372036854775807 and t2.t_bigint >= -9223372036854775807 and t2.t_bigint <= 9223372036854775807', + 't1.t_int <= 2147483647 and t1.t_int >= -2147483647 and t2.t_int <= 2147483647 and t2.t_int >= -2147483647', + 't1.t_smallint >= -32767 and t1.t_smallint <= 32767 and t2.t_smallint >= -32767 and t2.t_smallint <= 32767', + 't1.t_tinyint >= -127 and t1.t_tinyint <= 127 and t2.t_tinyint >= -127 and t2.t_tinyint <= 127', + 't1.t_float >= -1.7E308 and t1.t_float <= 1.7E308 and t2.t_float >= -1.7E308 and t2.t_float <= 1.7E308', + 't1.t_double >= -1.7E308 and t1.t_double <= 1.7E308 and t2.t_double >= -1.7E308 and t2.t_double <= 1.7E308', + '(t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\') ' , + '(t1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' )' , '(t1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false)' , + 't1.t_bool in (0 , 1) and t2.t_bool in (0 , 1)' , 't1.t_bool in ( true , false) and t2.t_bool in ( true , false)' , '(t1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1)', + 't1.t_bigint between -9223372036854775807 and 9223372036854775807 and t2.t_bigint between -9223372036854775807 and 9223372036854775807', + 't1.t_int between -2147483647 and 2147483647 and t2.t_int between -2147483647 and 2147483647', + 't1.t_smallint between -32767 and 32767 and t2.t_smallint between -32767 and 32767', + '(t1.t_tinyint between -127 and 127 and t2.t_tinyint between -127 and 127) ','t1.t_float between -1.7E308 and 1.7E308 and t2.t_float between -1.7E308 and 1.7E308', + '(t1.t_double between -1.7E308 and 1.7E308 and t2.t_double between -1.7E308 and 1.7E308)'] + #TD-6201,'t1.t_bool between 0 and 1 or t2.q_bool between 0 and 1'] + + self.t_u_or_where = ['(t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\' )' , + '(t1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' )' , '(t1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false)' , + '(t1.t_bool in (0 , 1) or t2.t_bool in (0 , 1))' , '(t1.t_bool in ( true , false) or t2.t_bool in ( true , false))' , '(t1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1)', + '(t1.t_bigint between -9223372036854775807 and 9223372036854775807 or t2.t_bigint between -9223372036854775807 and 9223372036854775807)', + '(t1.t_int between -2147483647 and 2147483647 or t2.t_int between -2147483647 and 2147483647)', + '(t1.t_smallint between -32767 and 32767 or t2.t_smallint between -32767 and 32767)', + '(t1.t_tinyint between -127 and 127 or t2.t_tinyint between -127 and 127 )','(t1.t_float between -1.7E308 and 1.7E308 or t2.t_float between -1.7E308 and 1.7E308)', + '(t1.t_double between -1.7E308 and 1.7E308 or t2.t_double between -1.7E308 and 1.7E308)'] + + # regular and tag column where + self.qt_where = self.q_where + self.t_where + self.qt_u_where = self.q_u_where + self.t_u_where + # now,qt_u_or_where is not support + self.qt_u_or_where = self.q_u_or_where + self.t_u_or_where + + # tag column where for test super join | this is support , 't1.t_bool = t2.t_bool ' ??? + self.t_join_where = ['t1.t_bigint = t2.t_bigint ', 't1.t_int = t2.t_int ', 't1.t_smallint = t2.t_smallint ', 't1.t_tinyint = t2.t_tinyint ', + 't1.t_float = t2.t_float ', 't1.t_double = t2.t_double ', 't1.t_binary = t2.t_binary ' , 't1.t_nchar = t2.t_nchar ' ] + + # session && fill + self.session_where = ['session(ts,10a)' , 'session(ts,10s)', 'session(ts,10m)' , 'session(ts,10h)','session(ts,10d)' , 'session(ts,10w)'] + self.session_u_where = ['session(t1.ts,10a)' , 'session(t1.ts,10s)', 'session(t1.ts,10m)' , 'session(t1.ts,10h)','session(t1.ts,10d)' , 'session(t1.ts,10w)', + 'session(t2.ts,10a)' , 'session(t2.ts,10s)', 'session(t2.ts,10m)' , 'session(t2.ts,10h)','session(t2.ts,10d)' , 'session(t2.ts,10w)'] + + self.fill_where = ['FILL(NONE)','FILL(PREV)','FILL(NULL)','FILL(LINEAR)','FILL(NEXT)','FILL(VALUE, 1.23)'] + + self.state_window = ['STATE_WINDOW(q_tinyint)','STATE_WINDOW(q_bigint)','STATE_WINDOW(q_int)','STATE_WINDOW(q_bool)','STATE_WINDOW(q_smallint)'] + self.state_u_window = ['STATE_WINDOW(t1.q_tinyint)','STATE_WINDOW(t1.q_bigint)','STATE_WINDOW(t1.q_int)','STATE_WINDOW(t1.q_bool)','STATE_WINDOW(t1.q_smallint)', + 'STATE_WINDOW(t2.q_tinyint)','STATE_WINDOW(t2.q_bigint)','STATE_WINDOW(t2.q_int)','STATE_WINDOW(t2.q_bool)','STATE_WINDOW(t2.q_smallint)'] + + # order by where + self.order_where = ['order by ts' , 'order by ts asc'] + self.order_u_where = ['order by t1.ts' , 'order by t1.ts asc' , 'order by t2.ts' , 'order by t2.ts asc'] + self.order_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' ] + self.orders_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' , 'order by loc' , 'order by loc asc' , 'order by loc desc'] + + self.group_where = ['group by tbname , loc' , 'group by tbname', 'group by tbname, t_bigint', 'group by tbname,t_int', 'group by tbname, t_smallint', 'group by tbname,t_tinyint', + 'group by tbname,t_float', 'group by tbname,t_double' , 'group by tbname,t_binary', 'group by tbname,t_nchar', 'group by tbname,t_bool' ,'group by tbname ,loc ,t_bigint', + 'group by tbname,t_binary ,t_nchar ,t_bool' , 'group by tbname,t_int ,t_smallint ,t_tinyint' , 'group by tbname,t_float ,t_double ' , + 'PARTITION BY tbname , loc' , 'PARTITION BY tbname', 'PARTITION BY tbname, t_bigint', 'PARTITION BY tbname,t_int', 'PARTITION BY tbname, t_smallint', 'PARTITION BY tbname,t_tinyint', + 'PARTITION BY tbname,t_float', 'PARTITION BY tbname,t_double' , 'PARTITION BY tbname,t_binary', 'PARTITION BY tbname,t_nchar', 'PARTITION BY tbname,t_bool' ,'PARTITION BY tbname ,loc ,t_bigint', + 'PARTITION BY tbname,t_binary ,t_nchar ,t_bool' , 'PARTITION BY tbname,t_int ,t_smallint ,t_tinyint' , 'PARTITION BY tbname,t_float ,t_double '] + self.group_where_j = ['group by t1.loc' , 'group by t1.t_bigint', 'group by t1.t_int', 'group by t1.t_smallint', 'group by t1.t_tinyint', + 'group by t1.t_float', 'group by t1.t_double' , 'group by t1.t_binary', 'group by t1.t_nchar', 'group by t1.t_bool' ,'group by t1.loc ,t1.t_bigint', + 'group by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'group by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'group by t1.t_float ,t1.t_double ' , + 'PARTITION BY t1.loc' , 'PARTITION by t1.t_bigint', 'PARTITION by t1.t_int', 'PARTITION by t1.t_smallint', 'PARTITION by t1.t_tinyint', + 'PARTITION by t1.t_float', 'PARTITION by t1.t_double' , 'PARTITION by t1.t_binary', 'PARTITION by t1.t_nchar', 'PARTITION by t1.t_bool' ,'PARTITION BY t1.loc ,t1.t_bigint', + 'PARTITION by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'PARTITION by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'PARTITION by t1.t_float ,t1.t_double ', + 'group by t2.loc' , 'group by t2.t_bigint', 'group by t2.t_int', 'group by t2.t_smallint', 'group by t2.t_tinyint', + 'group by t2.t_float', 'group by t2.t_double' , 'group by t2.t_binary', 'group by t2.t_nchar', 'group by t2.t_bool' ,'group by t2.loc ,t2.t_bigint', + 'group by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'group by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'group by t2.t_float ,t2.t_double ' , + 'PARTITION BY t2.loc' , 'PARTITION by t2.t_bigint', 'PARTITION by t2.t_int', 'PARTITION by t2.t_smallint', 'PARTITION by t2.t_tinyint', + 'PARTITION by t2.t_float', 'PARTITION by t2.t_double' , 'PARTITION by t2.t_binary', 'PARTITION by t2.t_nchar', 'PARTITION by t2.t_bool' ,'PARTITION BY t2.loc ,t2.t_bigint', + 'PARTITION by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'PARTITION by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'PARTITION by t2.t_float ,t2.t_double '] + + self.partiton_where = ['PARTITION BY tbname , loc' , 'PARTITION BY tbname', 'PARTITION BY tbname, t_bigint', 'PARTITION BY tbname,t_int', 'PARTITION BY tbname, t_smallint', 'PARTITION BY tbname,t_tinyint', + 'PARTITION BY tbname,t_float', 'PARTITION BY tbname,t_double' , 'PARTITION BY tbname,t_binary', 'PARTITION BY tbname,t_nchar', 'PARTITION BY tbname,t_bool' ,'PARTITION BY tbname ,loc ,t_bigint', + 'PARTITION BY tbname,t_binary ,t_nchar ,t_bool' , 'PARTITION BY tbname,t_int ,t_smallint ,t_tinyint' , 'PARTITION BY tbname,t_float ,t_double '] + self.partiton_where_j = ['PARTITION BY t1.loc' , 'PARTITION by t1.t_bigint', 'PARTITION by t1.t_int', 'PARTITION by t1.t_smallint', 'PARTITION by t1.t_tinyint', + 'PARTITION by t1.t_float', 'PARTITION by t1.t_double' , 'PARTITION by t1.t_binary', 'PARTITION by t1.t_nchar', 'PARTITION by t1.t_bool' ,'PARTITION BY t1.loc ,t1.t_bigint', + 'PARTITION by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'PARTITION by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'PARTITION by t1.t_float ,t1.t_double ', + 'PARTITION BY t2.loc' , 'PARTITION by t2.t_bigint', 'PARTITION by t2.t_int', 'PARTITION by t2.t_smallint', 'PARTITION by t2.t_tinyint', + 'PARTITION by t2.t_float', 'PARTITION by t2.t_double' , 'PARTITION by t2.t_binary', 'PARTITION by t2.t_nchar', 'PARTITION by t2.t_bool' ,'PARTITION BY t2.loc ,t2.t_bigint', + 'PARTITION by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'PARTITION by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'PARTITION by t2.t_float ,t2.t_double '] + + + self.group_where_regular = ['group by tbname ' , 'group by tbname', 'group by tbname, q_bigint', 'group by tbname,q_int', 'group by tbname, q_smallint', 'group by tbname,q_tinyint', + 'group by tbname,q_float', 'group by tbname,q_double' , 'group by tbname,q_binary', 'group by tbname,q_nchar', 'group by tbname,q_bool' ,'group by tbname ,q_bigint', + 'group by tbname,q_binary ,q_nchar ,q_bool' , 'group by tbname,q_int ,q_smallint ,q_tinyint' , 'group by tbname,q_float ,q_double ' , + 'PARTITION BY tbname ' , 'PARTITION BY tbname', 'PARTITION BY tbname, q_bigint', 'PARTITION BY tbname,q_int', 'PARTITION BY tbname, q_smallint', 'PARTITION BY tbname,q_tinyint', + 'PARTITION BY tbname,q_float', 'PARTITION BY tbname,q_double' , 'PARTITION BY tbname,q_binary', 'PARTITION BY tbname,q_nchar', 'PARTITION BY tbname,q_bool' ,'PARTITION BY tbname ,q_bigint', + 'PARTITION BY tbname,q_binary ,q_nchar ,q_bool' , 'PARTITION BY tbname,q_int ,q_smallint ,q_tinyint' , 'PARTITION BY tbname,q_float ,q_double '] + self.group_where_regular_j = ['group by t1.q_bigint', 'group by t1.q_int', 'group by t1.q_smallint', 'group by t1.q_tinyint', + 'group by t1.q_float', 'group by t1.q_double' , 'group by t1.q_binary', 'group by t1.q_nchar', 'group by t1.q_bool' ,'group by t1.q_bigint', + 'group by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'group by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'group by t1.q_float ,t1.q_double ' , + 'PARTITION by t1.q_bigint', 'PARTITION by t1.q_int', 'PARTITION by t1.q_smallint', 'PARTITION by t1.q_tinyint', + 'PARTITION by t1.q_float', 'PARTITION by t1.q_double' , 'PARTITION by t1.q_binary', 'PARTITION by t1.q_nchar', 'PARTITION by t1.q_bool' ,'PARTITION BY t1.q_bigint', + 'PARTITION by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'PARTITION by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'PARTITION by t1.q_float ,t1.q_double ', + 'group by t2.q_bigint', 'group by t2.q_int', 'group by t2.q_smallint', 'group by t2.q_tinyint', + 'group by t2.q_float', 'group by t2.q_double' , 'group by t2.q_binary', 'group by t2.q_nchar', 'group by t2.q_bool' ,'group by t2.q_bigint', + 'group by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'group by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'group by t2.q_float ,t2.q_double ' , + 'PARTITION by t2.q_bigint', 'PARTITION by t2.q_int', 'PARTITION by t2.q_smallint', 'PARTITION by t2.q_tinyint', + 'PARTITION by t2.q_float', 'PARTITION by t2.q_double' , 'PARTITION by t2.q_binary', 'PARTITION by t2.q_nchar', 'PARTITION by t2.q_bool' ,'PARTITION BY t2.q_bigint', + 'PARTITION by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'PARTITION by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'PARTITION by t2.q_float ,t2.q_double '] + + self.partiton_where_regular = ['PARTITION BY tbname ' , 'PARTITION BY tbname', 'PARTITION BY tbname, q_bigint', 'PARTITION BY tbname,q_int', 'PARTITION BY tbname, q_smallint', 'PARTITION BY tbname,q_tinyint', + 'PARTITION BY tbname,q_float', 'PARTITION BY tbname,q_double' , 'PARTITION BY tbname,q_binary', 'PARTITION BY tbname,q_nchar', 'PARTITION BY tbname,q_bool' ,'PARTITION BY tbname ,q_bigint', + 'PARTITION BY tbname,q_binary ,q_nchar ,q_bool' , 'PARTITION BY tbname,q_int ,q_smallint ,q_tinyint' , 'PARTITION BY tbname,q_float ,q_double '] + self.partiton_where_regular_j = ['PARTITION by t1.q_bigint', 'PARTITION by t1.q_int', 'PARTITION by t1.q_smallint', 'PARTITION by t1.q_tinyint', + 'PARTITION by t1.q_float', 'PARTITION by t1.q_double' , 'PARTITION by t1.q_binary', 'PARTITION by t1.q_nchar', 'PARTITION by t1.q_bool' ,'PARTITION BY t1.q_bigint', + 'PARTITION by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'PARTITION by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'PARTITION by t1.q_float ,t1.q_double ', + 'PARTITION by t2.q_bigint', 'PARTITION by t2.q_int', 'PARTITION by t2.q_smallint', 'PARTITION by t2.q_tinyint', + 'PARTITION by t2.q_float', 'PARTITION by t2.q_double' , 'PARTITION by t2.q_binary', 'PARTITION by t2.q_nchar', 'PARTITION by t2.q_bool' ,'PARTITION BY t2.q_bigint', + 'PARTITION by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'PARTITION by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'PARTITION by t2.q_float ,t2.q_double '] + + self.having_support = ['having count(q_int) > 0','having count(q_bigint) > 0','having count(q_smallint) > 0','having count(q_tinyint) > 0','having count(q_float) > 0','having count(q_double) > 0','having count(q_bool) > 0', + 'having avg(q_int) > 0','having avg(q_bigint) > 0','having avg(q_smallint) > 0','having avg(q_tinyint) > 0','having avg(q_float) > 0','having avg(q_double) > 0', + 'having sum(q_int) > 0','having sum(q_bigint) > 0','having sum(q_smallint) > 0','having sum(q_tinyint) > 0','having sum(q_float) > 0','having sum(q_double) > 0', + 'having STDDEV(q_int) > 0','having STDDEV(q_bigint) > 0','having STDDEV(q_smallint) > 0','having STDDEV(q_tinyint) > 0','having STDDEV(q_float) > 0','having STDDEV(q_double) > 0', + 'having TWA(q_int) > 0','having TWA(q_bigint) > 0','having TWA(q_smallint) > 0','having TWA(q_tinyint) > 0','having TWA(q_float) > 0','having TWA(q_double) > 0', + 'having IRATE(q_int) > 0','having IRATE(q_bigint) > 0','having IRATE(q_smallint) > 0','having IRATE(q_tinyint) > 0','having IRATE(q_float) > 0','having IRATE(q_double) > 0', + 'having MIN(q_int) > 0','having MIN(q_bigint) > 0','having MIN(q_smallint) > 0','having MIN(q_tinyint) > 0','having MIN(q_float) > 0','having MIN(q_double) > 0', + 'having MAX(q_int) > 0','having MAX(q_bigint) > 0','having MAX(q_smallint) > 0','having MAX(q_tinyint) > 0','having MAX(q_float) > 0','having MAX(q_double) > 0', + 'having FIRST(q_int) > 0','having FIRST(q_bigint) > 0','having FIRST(q_smallint) > 0','having FIRST(q_tinyint) > 0','having FIRST(q_float) > 0','having FIRST(q_double) > 0', + 'having LAST(q_int) > 0','having LAST(q_bigint) > 0','having LAST(q_smallint) > 0','having LAST(q_tinyint) > 0','having LAST(q_float) > 0','having LAST(q_double) > 0', + 'having APERCENTILE(q_int,10) > 0','having APERCENTILE(q_bigint,10) > 0','having APERCENTILE(q_smallint,10) > 0','having APERCENTILE(q_tinyint,10) > 0','having APERCENTILE(q_float,10) > 0','having APERCENTILE(q_double,10) > 0'] + self.having_not_support = ['having TOP(q_int,10) > 0','having TOP(q_bigint,10) > 0','having TOP(q_smallint,10) > 0','having TOP(q_tinyint,10) > 0','having TOP(q_float,10) > 0','having TOP(q_double,10) > 0','having TOP(q_bool,10) > 0', + 'having BOTTOM(q_int,10) > 0','having BOTTOM(q_bigint,10) > 0','having BOTTOM(q_smallint,10) > 0','having BOTTOM(q_tinyint,10) > 0','having BOTTOM(q_float,10) > 0','having BOTTOM(q_double,10) > 0','having BOTTOM(q_bool,10) > 0', + 'having LEASTSQUARES(q_int) > 0','having LEASTSQUARES(q_bigint) > 0','having LEASTSQUARES(q_smallint) > 0','having LEASTSQUARES(q_tinyint) > 0','having LEASTSQUARES(q_float) > 0','having LEASTSQUARES(q_double) > 0','having LEASTSQUARES(q_bool) > 0', + 'having FIRST(q_bool) > 0','having IRATE(q_bool) > 0','having PERCENTILE(q_bool,10) > 0','having avg(q_bool) > 0','having LAST_ROW(q_bool) > 0','having sum(q_bool) > 0','having STDDEV(q_bool) > 0','having APERCENTILE(q_bool,10) > 0','having TWA(q_bool) > 0','having LAST(q_bool) > 0', + 'having PERCENTILE(q_int,10) > 0','having PERCENTILE(q_bigint,10) > 0','having PERCENTILE(q_smallint,10) > 0','having PERCENTILE(q_tinyint,10) > 0','having PERCENTILE(q_float,10) > 0','having PERCENTILE(q_double,10) > 0'] + self.having_tagnot_support = ['having LAST_ROW(q_int) > 0','having LAST_ROW(q_bigint) > 0','having LAST_ROW(q_smallint) > 0','having LAST_ROW(q_tinyint) > 0','having LAST_ROW(q_float) > 0','having LAST_ROW(q_double) > 0'] + + self.having_support_j = ['having count(t1.q_int) > 0','having count(t1.q_bigint) > 0','having count(t1.q_smallint) > 0','having count(t1.q_tinyint) > 0','having count(t1.q_float) > 0','having count(t1.q_double) > 0','having count(t1.q_bool) > 0', + 'having avg(t1.q_int) > 0','having avg(t1.q_bigint) > 0','having avg(t1.q_smallint) > 0','having avg(t1.q_tinyint) > 0','having avg(t1.q_float) > 0','having avg(t1.q_double) > 0', + 'having sum(t1.q_int) > 0','having sum(t1.q_bigint) > 0','having sum(t1.q_smallint) > 0','having sum(t1.q_tinyint) > 0','having sum(t1.q_float) > 0','having sum(t1.q_double) > 0', + 'having STDDEV(t1.q_int) > 0','having STDDEV(t1.q_bigint) > 0','having STDDEV(t1.q_smallint) > 0','having STDDEV(t1.q_tinyint) > 0','having STDDEV(t1.q_float) > 0','having STDDEV(t1.q_double) > 0', + 'having TWA(t1.q_int) > 0','having TWA(t1.q_bigint) > 0','having TWA(t1.q_smallint) > 0','having TWA(t1.q_tinyint) > 0','having TWA(t1.q_float) > 0','having TWA(t1.q_double) > 0', + 'having IRATE(t1.q_int) > 0','having IRATE(t1.q_bigint) > 0','having IRATE(t1.q_smallint) > 0','having IRATE(t1.q_tinyint) > 0','having IRATE(t1.q_float) > 0','having IRATE(t1.q_double) > 0', + 'having MIN(t1.q_int) > 0','having MIN(t1.q_bigint) > 0','having MIN(t1.q_smallint) > 0','having MIN(t1.q_tinyint) > 0','having MIN(t1.q_float) > 0','having MIN(t1.q_double) > 0', + 'having MAX(t1.q_int) > 0','having MAX(t1.q_bigint) > 0','having MAX(t1.q_smallint) > 0','having MAX(t1.q_tinyint) > 0','having MAX(t1.q_float) > 0','having MAX(t1.q_double) > 0', + 'having FIRST(t1.q_int) > 0','having FIRST(t1.q_bigint) > 0','having FIRST(t1.q_smallint) > 0','having FIRST(t1.q_tinyint) > 0','having FIRST(t1.q_float) > 0','having FIRST(t1.q_double) > 0', + 'having LAST(t1.q_int) > 0','having LAST(t1.q_bigint) > 0','having LAST(t1.q_smallint) > 0','having LAST(t1.q_tinyint) > 0','having LAST(t1.q_float) > 0','having LAST(t1.q_double) > 0', + 'having APERCENTILE(t1.q_int,10) > 0','having APERCENTILE(t1.q_bigint,10) > 0','having APERCENTILE(t1.q_smallint,10) > 0','having APERCENTILE(t1.q_tinyint,10) > 0','having APERCENTILE(t1.q_float,10) > 0','having APERCENTILE(t1.q_double,10) > 0'] + + # limit offset where + self.limit_where = ['limit 1 offset 1' , 'limit 1' , 'limit 2 offset 1' , 'limit 2', 'limit 12 offset 1' , 'limit 20', 'limit 20 offset 10' , 'limit 200'] + self.limit1_where = ['limit 1 offset 1' , 'limit 1' ] + self.limit_u_where = ['limit 100 offset 10' , 'limit 50' , 'limit 100' , 'limit 10' ] + + # slimit soffset where + self.slimit_where = ['slimit 1 soffset 1' , 'slimit 1' , 'slimit 2 soffset 1' , 'slimit 2'] + self.slimit1_where = ['slimit 2 soffset 1' , 'slimit 1' ] + + # aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\] + # select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile] + # calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\] + # **_ns_** express is not support stable, therefore, separated from regular tables + # calc_select_all calc_select_regular calc_select_in_ts calc_select_fill calc_select_not_interval + # calc_aggregate_all calc_aggregate_regular calc_aggregate_groupbytbname + # calc_calculate_all calc_calculate_regular calc_calculate_groupbytbname + + # calc_select_all calc_select_regular calc_select_in_ts calc_select_fill calc_select_not_interval + # select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile] + + self.calc_select_all = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' , + 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' , + 'first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' , + 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' , + 'min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' , + 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' , + 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' , + 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , + 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)'] + + self.calc_select_in_ts = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' , + 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' , + 'first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' , + 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' ] + + self.calc_select_in = ['min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' , + 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' , + 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' , + 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , + 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)'] + + self.calc_select_not_support_ts = ['first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' , + 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' , + 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , + 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)', + 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)'] + + self.calc_select_support_ts = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' , + 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' , + 'min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' , + 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' ] + + self.calc_select_regular = [ 'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)'] + + + self.calc_select_fill = ['INTERP(q_int)' ,'INTERP(q_bigint)' ,'INTERP(q_smallint)' ,'INTERP(q_tinyint)', 'INTERP(q_float)' ,'INTERP(q_double)'] + self.interp_where = ['ts = now' , 'ts = \'2020-09-13 20:26:40.000\'' , 'ts = \'2020-09-13 20:26:40.009\'' ,'tbname in (\'table_1\') and ts = now' ,'tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and ts = \'2020-09-13 20:26:40.000\'','tbname like \'table%\' and ts = \'2020-09-13 20:26:40.002\''] + + #two table join + self.calc_select_in_ts_j = ['bottom(t1.q_int,20)' , 'bottom(t1.q_bigint,20)' , 'bottom(t1.q_smallint,20)' , 'bottom(t1.q_tinyint,20)' ,'bottom(t1.q_float,20)' , 'bottom(t1.q_double,20)' , + 'top(t1.q_int,20)' , 'top(t1.q_bigint,20)' , 'top(t1.q_smallint,20)' ,'top(t1.q_tinyint,20)' ,'top(t1.q_float,20)' ,'top(t1.q_double,20)' , + 'first(t1.q_int)' , 'first(t1.q_bigint)' , 'first(t1.q_smallint)' , 'first(t1.q_tinyint)' , 'first(t1.q_float)' ,'first(t1.q_double)' ,'first(t1.q_binary)' ,'first(t1.q_nchar)' ,'first(t1.q_bool)' ,'first(t1.q_ts)' , + 'last(t1.q_int)' , 'last(t1.q_bigint)' , 'last(t1.q_smallint)' , 'last(t1.q_tinyint)' , 'last(t1.q_float)' ,'last(t1.q_double)' , 'last(t1.q_binary)' ,'last(t1.q_nchar)' ,'last(t1.q_bool)' ,'last(t1.q_ts)' , + 'bottom(t2.q_int,20)' , 'bottom(t2.q_bigint,20)' , 'bottom(t2.q_smallint,20)' , 'bottom(t2.q_tinyint,20)' ,'bottom(t2.q_float,20)' , 'bottom(t2.q_double,20)' , + 'top(t2.q_int,20)' , 'top(t2.q_bigint,20)' , 'top(t2.q_smallint,20)' ,'top(t2.q_tinyint,20)' ,'top(t2.q_float,20)' ,'top(t2.q_double,20)' , + 'first(t2.q_int)' , 'first(t2.q_bigint)' , 'first(t2.q_smallint)' , 'first(t2.q_tinyint)' , 'first(t2.q_float)' ,'first(t2.q_double)' ,'first(t2.q_binary)' ,'first(t2.q_nchar)' ,'first(t2.q_bool)' ,'first(t2.q_ts)' , + 'last(t2.q_int)' , 'last(t2.q_bigint)' , 'last(t2.q_smallint)' , 'last(t2.q_tinyint)' , 'last(t2.q_float)' ,'last(t2.q_double)' , 'last(t2.q_binary)' ,'last(t2.q_nchar)' ,'last(t2.q_bool)' ,'last(t2.q_ts)'] + + self.calc_select_in_support_ts_j = ['bottom(t1.q_int,20)' , 'bottom(t1.q_bigint,20)' , 'bottom(t1.q_smallint,20)' , 'bottom(t1.q_tinyint,20)' ,'bottom(t1.q_float,20)' , 'bottom(t1.q_double,20)' , + 'top(t1.q_int,20)' , 'top(t1.q_bigint,20)' , 'top(t1.q_smallint,20)' ,'top(t1.q_tinyint,20)' ,'top(t1.q_float,20)' ,'top(t1.q_double,20)' , + 'min(t1.q_int)' , 'min(t1.q_bigint)' , 'min(t1.q_smallint)' , 'min(t1.q_tinyint)' , 'min(t1.q_float)' ,'min(t1.q_double)' , + 'max(t1.q_int)' , 'max(t1.q_bigint)' , 'max(t1.q_smallint)' , 'max(t1.q_tinyint)' ,'max(t1.q_float)' ,'max(t1.q_double)' , + 'bottom(t2.q_int,20)' , 'bottom(t2.q_bigint,20)' , 'bottom(t2.q_smallint,20)' , 'bottom(t2.q_tinyint,20)' ,'bottom(t2.q_float,20)' , 'bottom(t2.q_double,20)' , + 'top(t2.q_int,20)' , 'top(t2.q_bigint,20)' , 'top(t2.q_smallint,20)' ,'top(t2.q_tinyint,20)' ,'top(t2.q_float,20)' ,'top(t2.q_double,20)' , + 'min(t2.q_int)' , 'min(t2.q_bigint)' , 'min(t2.q_smallint)' , 'min(t2.q_tinyint)' , 'min(t2.q_float)' ,'min(t2.q_double)' , + 'max(t2.q_int)' , 'max(t2.q_bigint)' , 'max(t2.q_smallint)' , 'max(t2.q_tinyint)' ,'max(t2.q_float)' ,'max(t2.q_double)' , + ] + + self.calc_select_in_not_support_ts_j = ['apercentile(t1.q_int,20)' , 'apercentile(t1.q_bigint,20)' ,'apercentile(t1.q_smallint,20)' ,'apercentile(t1.q_tinyint,20)' ,'apercentile(t1.q_float,20)' ,'apercentile(t1.q_double,20)' , + 'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' , + 'last_row(t1.q_double)' , 'last_row(t1.q_bool)' ,'last_row(t1.q_binary)' ,'last_row(t1.q_nchar)' ,'last_row(t1.q_ts)' , + 'apercentile(t2.q_int,20)' , 'apercentile(t2.q_bigint,20)' ,'apercentile(t2.q_smallint,20)' ,'apercentile(t2.q_tinyint,20)' ,'apercentile(t2.q_float,20)' ,'apercentile(t2.q_double,20)' , + 'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' , + 'last_row(t2.q_double)' , 'last_row(t2.q_bool)' ,'last_row(t2.q_binary)' ,'last_row(t2.q_nchar)' ,'last_row(t2.q_ts)'] + + self.calc_select_in_j = ['min(t1.q_int)' , 'min(t1.q_bigint)' , 'min(t1.q_smallint)' , 'min(t1.q_tinyint)' , 'min(t1.q_float)' ,'min(t1.q_double)' , + 'max(t1.q_int)' , 'max(t1.q_bigint)' , 'max(t1.q_smallint)' , 'max(t1.q_tinyint)' ,'max(t1.q_float)' ,'max(t1.q_double)' , + 'apercentile(t1.q_int,20)' , 'apercentile(t1.q_bigint,20)' ,'apercentile(t1.q_smallint,20)' ,'apercentile(t1.q_tinyint,20)' ,'apercentile(t1.q_float,20)' ,'apercentile(t1.q_double,20)' , + 'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' , + 'last_row(t1.q_double)' , 'last_row(t1.q_bool)' ,'last_row(t1.q_binary)' ,'last_row(t1.q_nchar)' ,'last_row(t1.q_ts)' , + 'min(t2.q_int)' , 'min(t2.q_bigint)' , 'min(t2.q_smallint)' , 'min(t2.q_tinyint)' , 'min(t2.q_float)' ,'min(t2.q_double)' , + 'max(t2.q_int)' , 'max(t2.q_bigint)' , 'max(t2.q_smallint)' , 'max(t2.q_tinyint)' ,'max(t2.q_float)' ,'max(t2.q_double)' , + 'apercentile(t2.q_int,20)' , 'apercentile(t2.q_bigint,20)' ,'apercentile(t2.q_smallint,20)' ,'apercentile(t2.q_tinyint,20)' ,'apercentile(t2.q_float,20)' ,'apercentile(t2.q_double,20)' , + 'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' , + 'last_row(t2.q_double)' , 'last_row(t2.q_bool)' ,'last_row(t2.q_binary)' ,'last_row(t2.q_nchar)' ,'last_row(t2.q_ts)'] + self.calc_select_all_j = self.calc_select_in_ts_j + self.calc_select_in_j + + self.calc_select_regular_j = [ 'PERCENTILE(t1.q_int,10)' ,'PERCENTILE(t1.q_bigint,20)' , 'PERCENTILE(t1.q_smallint,30)' ,'PERCENTILE(t1.q_tinyint,40)' ,'PERCENTILE(t1.q_float,50)' ,'PERCENTILE(t1.q_double,60)' , + 'PERCENTILE(t2.q_int,10)' ,'PERCENTILE(t2.q_bigint,20)' , 'PERCENTILE(t2.q_smallint,30)' ,'PERCENTILE(t2.q_tinyint,40)' ,'PERCENTILE(t2.q_float,50)' ,'PERCENTILE(t2.q_double,60)'] + + + self.calc_select_fill_j = ['INTERP(t1.q_int)' ,'INTERP(t1.q_bigint)' ,'INTERP(t1.q_smallint)' ,'INTERP(t1.q_tinyint)', 'INTERP(t1.q_float)' ,'INTERP(t1.q_double)' , + 'INTERP(t2.q_int)' ,'INTERP(t2.q_bigint)' ,'INTERP(t2.q_smallint)' ,'INTERP(t2.q_tinyint)', 'INTERP(t2.q_float)' ,'INTERP(t2.q_double)'] + self.interp_where_j = ['t1.ts = now' , 't1.ts = \'2020-09-13 20:26:40.000\'' , 't1.ts = \'2020-09-13 20:26:40.009\'' ,'t2.ts = now' , 't2.ts = \'2020-09-13 20:26:40.000\'' , 't2.ts = \'2020-09-13 20:26:40.009\'' , + 't1.tbname in (\'table_1\') and t1.ts = now' ,'t1.tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and t1.ts = \'2020-09-13 20:26:40.000\'','t1.tbname like \'table%\' and t1.ts = \'2020-09-13 20:26:40.002\'', + 't2.tbname in (\'table_1\') and t2.ts = now' ,'t2.tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and t2.ts = \'2020-09-13 20:26:40.000\'','t2.tbname like \'table%\' and t2.ts = \'2020-09-13 20:26:40.002\''] + + # calc_aggregate_all calc_aggregate_regular calc_aggregate_groupbytbname APERCENTILE\PERCENTILE + # aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\] + self.calc_aggregate_all = ['count(*)' , 'count(q_int)' ,'count(q_bigint)' , 'count(q_smallint)' ,'count(q_tinyint)' ,'count(q_float)' , + 'count(q_double)' ,'count(q_binary)' ,'count(q_nchar)' ,'count(q_bool)' ,'count(q_ts)' , + 'avg(q_int)' ,'avg(q_bigint)' , 'avg(q_smallint)' ,'avg(q_tinyint)' ,'avg(q_float)' ,'avg(q_double)' , + 'sum(q_int)' ,'sum(q_bigint)' , 'sum(q_smallint)' ,'sum(q_tinyint)' ,'sum(q_float)' ,'sum(q_double)' , + 'STDDEV(q_int)' ,'STDDEV(q_bigint)' , 'STDDEV(q_smallint)' ,'STDDEV(q_tinyint)' ,'STDDEV(q_float)' ,'STDDEV(q_double)', + 'APERCENTILE(q_int,10)' ,'APERCENTILE(q_bigint,20)' , 'APERCENTILE(q_smallint,30)' ,'APERCENTILE(q_tinyint,40)' ,'APERCENTILE(q_float,50)' ,'APERCENTILE(q_double,60)'] + + self.calc_aggregate_regular = ['twa(q_int)' ,'twa(q_bigint)' , 'twa(q_smallint)' ,'twa(q_tinyint)' ,'twa (q_float)' ,'twa(q_double)' , + 'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' , + 'LEASTSQUARES(q_int,15,3)' , 'LEASTSQUARES(q_bigint,10,1)' , 'LEASTSQUARES(q_smallint,20,3)' ,'LEASTSQUARES(q_tinyint,10,4)' ,'LEASTSQUARES(q_float,6,4)' ,'LEASTSQUARES(q_double,3,1)' , + 'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)'] + + self.calc_aggregate_groupbytbname = ['twa(q_int)' ,'twa(q_bigint)' , 'twa(q_smallint)' ,'twa(q_tinyint)' ,'twa (q_float)' ,'twa(q_double)' , + 'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' ] + + #two table join + self.calc_aggregate_all_j = ['count(t1.*)' , 'count(t1.q_int)' ,'count(t1.q_bigint)' , 'count(t1.q_smallint)' ,'count(t1.q_tinyint)' ,'count(t1.q_float)' , + 'count(t1.q_double)' ,'count(t1.q_binary)' ,'count(t1.q_nchar)' ,'count(t1.q_bool)' ,'count(t1.q_ts)' , + 'avg(t1.q_int)' ,'avg(t1.q_bigint)' , 'avg(t1.q_smallint)' ,'avg(t1.q_tinyint)' ,'avg(t1.q_float)' ,'avg(t1.q_double)' , + 'sum(t1.q_int)' ,'sum(t1.q_bigint)' , 'sum(t1.q_smallint)' ,'sum(t1.q_tinyint)' ,'sum(t1.q_float)' ,'sum(t1.q_double)' , + 'STDDEV(t1.q_int)' ,'STDDEV(t1.q_bigint)' , 'STDDEV(t1.q_smallint)' ,'STDDEV(t1.q_tinyint)' ,'STDDEV(t1.q_float)' ,'STDDEV(t1.q_double)', + 'APERCENTILE(t1.q_int,10)' ,'APERCENTILE(t1.q_bigint,20)' , 'APERCENTILE(t1.q_smallint,30)' ,'APERCENTILE(t1.q_tinyint,40)' ,'APERCENTILE(t1.q_float,50)' ,'APERCENTILE(t1.q_double,60)' , + 'count(t2.*)' , 'count(t2.q_int)' ,'count(t2.q_bigint)' , 'count(t2.q_smallint)' ,'count(t2.q_tinyint)' ,'count(t2.q_float)' , + 'count(t2.q_double)' ,'count(t2.q_binary)' ,'count(t2.q_nchar)' ,'count(t2.q_bool)' ,'count(t2.q_ts)' , + 'avg(t2.q_int)' ,'avg(t2.q_bigint)' , 'avg(t2.q_smallint)' ,'avg(t2.q_tinyint)' ,'avg(t2.q_float)' ,'avg(t2.q_double)' , + 'sum(t2.q_int)' ,'sum(t2.q_bigint)' , 'sum(t2.q_smallint)' ,'sum(t2.q_tinyint)' ,'sum(t2.q_float)' ,'sum(t2.q_double)' , + 'STDDEV(t2.q_int)' ,'STDDEV(t2.q_bigint)' , 'STDDEV(t2.q_smallint)' ,'STDDEV(t2.q_tinyint)' ,'STDDEV(t2.q_float)' ,'STDDEV(t2.q_double)', + 'APERCENTILE(t2.q_int,10)' ,'APERCENTILE(t2.q_bigint,20)' , 'APERCENTILE(t2.q_smallint,30)' ,'APERCENTILE(t2.q_tinyint,40)' ,'APERCENTILE(t2.q_float,50)' ,'APERCENTILE(t2.q_double,60)'] + + self.calc_aggregate_regular_j = ['twa(t1.q_int)' ,'twa(t1.q_bigint)' , 'twa(t1.q_smallint)' ,'twa(t1.q_tinyint)' ,'twa (t1.q_float)' ,'twa(t1.q_double)' , + 'IRATE(t1.q_int)' ,'IRATE(t1.q_bigint)' , 'IRATE(t1.q_smallint)' ,'IRATE(t1.q_tinyint)' ,'IRATE (t1.q_float)' ,'IRATE(t1.q_double)' , + 'LEASTSQUARES(t1.q_int,15,3)' , 'LEASTSQUARES(t1.q_bigint,10,1)' , 'LEASTSQUARES(t1.q_smallint,20,3)' ,'LEASTSQUARES(t1.q_tinyint,10,4)' ,'LEASTSQUARES(t1.q_float,6,4)' ,'LEASTSQUARES(t1.q_double,3,1)' , + 'PERCENTILE(t1.q_int,10)' ,'PERCENTILE(t1.q_bigint,20)' , 'PERCENTILE(t1.q_smallint,30)' ,'PERCENTILE(t1.q_tinyint,40)' ,'PERCENTILE(t1.q_float,50)' ,'PERCENTILE(t1.q_double,60)' , + 'twa(t2.q_int)' ,'twa(t2.q_bigint)' , 'twa(t2.q_smallint)' ,'twa(t2.q_tinyint)' ,'twa (t2.q_float)' ,'twa(t2.q_double)' , + 'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)', + 'LEASTSQUARES(t2.q_int,15,3)' , 'LEASTSQUARES(t2.q_bigint,10,1)' , 'LEASTSQUARES(t2.q_smallint,20,3)' ,'LEASTSQUARES(t2.q_tinyint,10,4)' ,'LEASTSQUARES(t2.q_float,6,4)' ,'LEASTSQUARES(t2.q_double,3,1)' , + 'PERCENTILE(t2.q_int,10)' ,'PERCENTILE(t2.q_bigint,20)' , 'PERCENTILE(t2.q_smallint,30)' ,'PERCENTILE(t2.q_tinyint,40)' ,'PERCENTILE(t2.q_float,50)' ,'PERCENTILE(t2.q_double,60)'] + + self.calc_aggregate_groupbytbname_j = ['twa(t1.q_int)' ,'twa(t1.q_bigint)' , 'twa(t1.q_smallint)' ,'twa(t1.q_tinyint)' ,'twa (t1.q_float)' ,'twa(t1.q_double)' , + 'IRATE(t1.q_int)' ,'IRATE(t1.q_bigint)' , 'IRATE(t1.q_smallint)' ,'IRATE(t1.q_tinyint)' ,'IRATE (t1.q_float)' ,'IRATE(t1.q_double)' , + 'twa(t2.q_int)' ,'twa(t2.q_bigint)' , 'twa(t2.q_smallint)' ,'twa(t2.q_tinyint)' ,'twa (t2.q_float)' ,'twa(t2.q_double)' , + 'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)' ] + + # calc_calculate_all calc_calculate_regular calc_calculate_groupbytbname + # calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\] + self.calc_calculate_all = ['SPREAD(ts)' , 'SPREAD(q_ts)' , 'SPREAD(q_int)' ,'SPREAD(q_bigint)' , 'SPREAD(q_smallint)' ,'SPREAD(q_tinyint)' ,'SPREAD(q_float)' ,'SPREAD(q_double)' , + '(SPREAD(q_int) + SPREAD(q_bigint))' , '(SPREAD(q_smallint) - SPREAD(q_float))', '(SPREAD(q_double) * SPREAD(q_tinyint))' , '(SPREAD(q_double) / SPREAD(q_float))'] + self.calc_calculate_regular = ['DIFF(q_int)' ,'DIFF(q_bigint)' , 'DIFF(q_smallint)' ,'DIFF(q_tinyint)' ,'DIFF(q_float)' ,'DIFF(q_double)' , + 'DIFF(q_int,0)' ,'DIFF(q_bigint,0)' , 'DIFF(q_smallint,0)' ,'DIFF(q_tinyint,0)' ,'DIFF(q_float,0)' ,'DIFF(q_double,0)' , + 'DIFF(q_int,1)' ,'DIFF(q_bigint,1)' , 'DIFF(q_smallint,1)' ,'DIFF(q_tinyint,1)' ,'DIFF(q_float,1)' ,'DIFF(q_double,1)' , + 'DERIVATIVE(q_int,15s,0)' , 'DERIVATIVE(q_bigint,10s,1)' , 'DERIVATIVE(q_smallint,20s,0)' ,'DERIVATIVE(q_tinyint,10s,1)' ,'DERIVATIVE(q_float,6s,0)' ,'DERIVATIVE(q_double,3s,1)' ] + self.calc_calculate_groupbytbname = self.calc_calculate_regular + + #two table join + self.calc_calculate_all_j = ['SPREAD(t1.ts)' , 'SPREAD(t1.q_ts)' , 'SPREAD(t1.q_int)' ,'SPREAD(t1.q_bigint)' , 'SPREAD(t1.q_smallint)' ,'SPREAD(t1.q_tinyint)' ,'SPREAD(t1.q_float)' ,'SPREAD(t1.q_double)' , + 'SPREAD(t2.ts)' , 'SPREAD(t2.q_ts)' , 'SPREAD(t2.q_int)' ,'SPREAD(t2.q_bigint)' , 'SPREAD(t2.q_smallint)' ,'SPREAD(t2.q_tinyint)' ,'SPREAD(t2.q_float)' ,'SPREAD(t2.q_double)' , + '(SPREAD(t1.q_int) + SPREAD(t1.q_bigint))' , '(SPREAD(t1.q_tinyint) - SPREAD(t1.q_float))', '(SPREAD(t1.q_double) * SPREAD(t1.q_tinyint))' , '(SPREAD(t1.q_double) / SPREAD(t1.q_tinyint))', + '(SPREAD(t2.q_int) + SPREAD(t2.q_bigint))' , '(SPREAD(t2.q_smallint) - SPREAD(t2.q_float))', '(SPREAD(t2.q_double) * SPREAD(t2.q_tinyint))' , '(SPREAD(t2.q_double) / SPREAD(t2.q_tinyint))', + '(SPREAD(t1.q_int) + SPREAD(t1.q_smallint))' , '(SPREAD(t2.q_smallint) - SPREAD(t2.q_float))', '(SPREAD(t1.q_double) * SPREAD(t1.q_tinyint))' , '(SPREAD(t1.q_double) / SPREAD(t1.q_float))'] + self.calc_calculate_regular_j = ['DIFF(t1.q_int)' ,'DIFF(t1.q_bigint)' , 'DIFF(t1.q_smallint)' ,'DIFF(t1.q_tinyint)' ,'DIFF(t1.q_float)' ,'DIFF(t1.q_double)' , + 'DIFF(t1.q_int,0)' ,'DIFF(t1.q_bigint,0)' , 'DIFF(t1.q_smallint,0)' ,'DIFF(t1.q_tinyint,0)' ,'DIFF(t1.q_float,0)' ,'DIFF(t1.q_double,0)' , + 'DIFF(t1.q_int,1)' ,'DIFF(t1.q_bigint,1)' , 'DIFF(t1.q_smallint,1)' ,'DIFF(t1.q_tinyint,1)' ,'DIFF(t1.q_float,1)' ,'DIFF(t1.q_double,1)' , + 'DERIVATIVE(t1.q_int,15s,0)' , 'DERIVATIVE(t1.q_bigint,10s,1)' , 'DERIVATIVE(t1.q_smallint,20s,0)' ,'DERIVATIVE(t1.q_tinyint,10s,1)' ,'DERIVATIVE(t1.q_float,6s,0)' ,'DERIVATIVE(t1.q_double,3s,1)' , + 'DIFF(t2.q_int)' ,'DIFF(t2.q_bigint)' , 'DIFF(t2.q_smallint)' ,'DIFF(t2.q_tinyint)' ,'DIFF(t2.q_float)' ,'DIFF(t2.q_double)' , + 'DIFF(t2.q_int,0)' ,'DIFF(t2.q_bigint,0)' , 'DIFF(t2.q_smallint,0)' ,'DIFF(t2.q_tinyint,0)' ,'DIFF(t2.q_float,0)' ,'DIFF(t2.q_double,0)' , + 'DIFF(t2.q_int,1)' ,'DIFF(t2.q_bigint,1)' , 'DIFF(t2.q_smallint,1)' ,'DIFF(t2.q_tinyint,1)' ,'DIFF(t2.q_float,1)' ,'DIFF(t2.q_double,1)' , + 'DERIVATIVE(t2.q_int,15s,0)' , 'DERIVATIVE(t2.q_bigint,10s,1)' , 'DERIVATIVE(t2.q_smallint,20s,0)' ,'DERIVATIVE(t2.q_tinyint,10s,1)' ,'DERIVATIVE(t2.q_float,6s,0)' ,'DERIVATIVE(t2.q_double,3s,1)' ] + self.calc_calculate_groupbytbname_j = self.calc_calculate_regular_j + + #inter && calc_aggregate_all\calc_aggregate_regular\calc_select_all + self.interval_sliding = ['interval(4w) sliding(1w) ','interval(1w) sliding(1d) ','interval(1d) sliding(1h) ' , + 'interval(1h) sliding(1m) ','interval(1m) sliding(1s) ','interval(1s) sliding(10a) ', + 'interval(1y) ','interval(1n) ','interval(1w) ','interval(1d) ','interval(1h) ','interval(1m) ','interval(1s) ' ,'interval(10a)', + 'interval(1y,1n) ','interval(1n,1w) ','interval(1w,1d) ','interval(1d,1h) ','interval(1h,1m) ','interval(1m,1s) ','interval(1s,10a) ' ,'interval(100a,30a)'] + + self.conn1 = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos/") + self.cur1 = self.conn1.cursor() + print(self.cur1) + self.cur1.execute("use %s ;" %self.db_nest) + sql = 'select * from stable_1 limit 5;' + self.cur1.execute(sql) + + + def data_matrix_equal(self, sql1,row1_s,row1_e,col1_s,col1_e, sql2,row2_s,row2_e,col2_s,col2_e): + # ----row1_start----col1_start---- + # - - - - 是一个矩阵内的数据相等- - - + # - - - - - - - - - - - - - - - - + # ----row1_end------col1_end------ + self.sql1 = sql1 + list1 =[] + tdSql.query(sql1) + for i1 in range(row1_s-1,row1_e): + #print("iiii=%d"%i1) + for j1 in range(col1_s-1,col1_e): + #print("jjjj=%d"%j1) + #print("data=%s" %(tdSql.getData(i1,j1))) + list1.append(tdSql.getData(i1,j1)) + print("=====list1-------list1---=%s" %set(list1)) + + tdSql.execute("reset query cache;") + self.sql2 = sql2 + list2 =[] + tdSql.query(sql2) + for i2 in range(row2_s-1,row2_e): + #print("iiii222=%d"%i2) + for j2 in range(col2_s-1,col2_e): + #print("jjjj222=%d"%j2) + #print("data=%s" %(tdSql.getData(i2,j2))) + list2.append(tdSql.getData(i2,j2)) + print("=====list2-------list2---=%s" %set(list2)) + + if (list1 == list2) and len(list2)>0: + # print(("=====matrix===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + tdLog.info(("===matrix===sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2)) + elif (set(list2)).issubset(set(list1)): + # 解决不同子表排列结果乱序 + # print(("=====list_issubset==matrix2in1-true===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + tdLog.info(("===matrix_issubset===sql1:'%s' matrix_set_result = sql2:'%s' matrix_set_result") %(sql1,sql2)) + #elif abs(float(str(list1).replace("]","").replace("[","").replace("e+","")) - float(str(list2).replace("]","").replace("[","").replace("e+",""))) <= 0.0001: + elif abs(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-","")) - float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-",""))) <= 0.0001: + print(("=====matrix_abs+e+===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + print(("=====matrix_abs+e+replace_after===sql1.list1:'%s',sql2.list2:'%s'") %(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-","")),float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-","")))) + tdLog.info(("===matrix_abs+e+===sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2)) + elif abs(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")) - float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-",""))) <= 0.1: + #{datetime.datetime(2021, 8, 27, 1, 46, 40), -441.46841430664057}replace + print(("=====matrix_abs+replace===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + print(("=====matrix_abs+replace_after===sql1.list1:'%s',sql2.list2:'%s'") %(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")),float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")))) + tdLog.info(("===matrix_abs+replace===sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2)) + elif abs(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")) - float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-",""))) <= 0.5: + print(("=====matrix_abs===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + print(("=====matrix_abs===sql1.list1:'%s',sql2.list2:'%s'") %(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")),float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")))) + tdLog.info(("===matrix_abs======sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2)) + else: + print(("=====matrix_error===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + tdLog.info(("sql1:'%s' matrix_result != sql2:'%s' matrix_result") %(sql1,sql2)) + return tdSql.checkEqual(list1,list2) + + def restartDnodes(self): + pass + # tdDnodes.stop(1) + # tdDnodes.start(1) + + def dropandcreateDB_random(self,database,n): + ts = 1630000000000 + num_random = 100 + fake = Faker('zh_CN') + tdSql.execute('''drop database if exists %s ;''' %database) + tdSql.execute('''create database %s keep 36500;'''%database) + tdSql.execute('''use %s;'''%database) + + tdSql.execute('''create stable stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create stable stable_2 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + + tdSql.execute('''create stable stable_null_data (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + + tdSql.execute('''create stable stable_null_childtable (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + + #tdSql.execute('''create table stable_1_1 using stable_1 tags('stable_1_1', '0' , '0' , '0' , '0' , 0 , 'binary1' , 'nchar1' , '0' , '0' ,'0') ;''') + tdSql.execute('''create table stable_1_1 using stable_1 tags('stable_1_1', '%d' , '%d', '%d' , '%d' , 0 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + tdSql.execute('''create table stable_1_2 using stable_1 tags('stable_1_2', '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , 'binary2' , 'nchar2' , '2' , '22' , \'1999-09-09 09:09:09.090\') ;''') + tdSql.execute('''create table stable_1_3 using stable_1 tags('stable_1_3', '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , 'binary3' , 'nchar3nchar3' , '-3.3' , '-33.33' , \'2099-09-09 09:09:09.090\') ;''') + #tdSql.execute('''create table stable_1_4 using stable_1 tags('stable_1_4', '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''') + tdSql.execute('''create table stable_1_4 using stable_1 tags('stable_1_4', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + + # tdSql.execute('''create table stable_2_1 using stable_2 tags('stable_2_1' , '0' , '0' , '0' , '0' , 0 , 'binary21' , 'nchar21' , '0' , '0' ,'0') ;''') + # tdSql.execute('''create table stable_2_2 using stable_2 tags('stable_2_2' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''') + + # tdSql.execute('''create table stable_null_data_1 using stable_null_data tags('stable_null_data_1', '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''') + + tdSql.execute('''create table stable_2_1 using stable_2 tags('stable_2_1' , '0' , '0' , '0' , '0' , 0 , 'binary21' , 'nchar21' , '0' , '0' ,\'2099-09-09 09:09:09.090\') ;''') + tdSql.execute('''create table stable_2_2 using stable_2 tags('stable_2_2' , '%d' , '%d', '%d' , '%d' , 0 , 'binary2.%s' , 'nchar2.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + + tdSql.execute('''create table stable_null_data_1 using stable_null_data tags('stable_null_data_1', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + + #regular table + tdSql.execute('''create table regular_table_1 \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + tdSql.execute('''create table regular_table_2 \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + tdSql.execute('''create table regular_table_3 \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + + tdSql.execute('''create table regular_table_null \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + + + for i in range(num_random*n): + tdSql.execute('''insert into stable_1_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double , q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1), + fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + tdSql.execute('''insert into regular_table_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1) , + fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1) , + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_1_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8)\ + values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1), + fake.random_int(min=0, max=9223372036854775807, step=1), + fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + tdSql.execute('''insert into regular_table_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1), + fake.random_int(min=0, max=9223372036854775807, step=1), + fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_1_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +1, fake.random_int(min=-2147483647, max=0, step=1), + fake.random_int(min=-9223372036854775807, max=0, step=1), + fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i +1, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + tdSql.execute('''insert into regular_table_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +1, fake.random_int(min=-2147483647, max=0, step=1), + fake.random_int(min=-9223372036854775807, max=0, step=1), + fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i +1, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=-0, max=2147483647, step=1), + fake.random_int(min=-0, max=9223372036854775807, step=1), + fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +1, fake.random_int(min=-0, max=2147483647, step=1), + fake.random_int(min=-0, max=9223372036854775807, step=1), + fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +10, fake.random_int(min=-0, max=2147483647, step=1), + fake.random_int(min=-0, max=9223372036854775807, step=1), + fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.query("select count(*) from stable_1;") + tdSql.checkData(0,0,3*num_random*n) + tdSql.query("select count(*) from regular_table_1;") + tdSql.checkData(0,0,num_random*n) + + def math_nest(self,mathlist): + + print("==========%s===start=============" %mathlist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + if (mathlist == ['ABS','SQRT']) or (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['FLOOR','CEIL','ROUND']) \ + or (mathlist == ['CSUM']) or (mathlist == ['']): + math_functions = mathlist + fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)'] + fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)', + '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] + fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (mathlist == ['UNIQUE']) or (mathlist == ['HYPERLOGLOG']): + math_functions = mathlist + fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_binary)','(q_nchar)','(q_bool)','(q_ts)', + '(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)','(q_binary_null)','(q_nchar_null)','(q_bool_null)','(q_ts_null)'] + fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)', + '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] + fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (mathlist == ['POW','LOG']) or (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']): + math_functions = mathlist + num = random.randint(0, 1000) + fun_fix_column = ['(q_bigint,num)','(q_smallint,num)','(q_tinyint,num)','(q_int,num)','(q_float,num)','(q_double,num)', + '(q_bigint_null,num)','(q_smallint_null,num)','(q_tinyint_null,num)','(q_int_null,num)','(q_float_null,num)','(q_double_null,num)'] + fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) + fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) + + fun_fix_column_j = ['(t1.q_bigint,num)','(t1.q_smallint,num)','(t1.q_tinyint,num)','(t1.q_int,num)','(t1.q_float,num)','(t1.q_double,num)', + '(t1.q_bigint_null,num)','(t1.q_smallint_null,num)','(t1.q_tinyint_null,num)','(t1.q_int_null,num)','(t1.q_float_null,num)','(t1.q_double_null,num)', + '(t2.q_bigint,num)','(t2.q_smallint,num)','(t2.q_tinyint,num)','(t2.q_int,num)','(t2.q_float,num)','(t2.q_double,num)', + '(t2.q_bigint_null,num)','(t2.q_smallint_null,num)','(t2.q_tinyint_null,num)','(t2.q_int_null,num)','(t2.q_float_null,num)','(t2.q_double_null,num)'] + fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) + fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) + + tdSql.query("select 1-1 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , floor(asct1) from ( select " + sql += "%s as asct1, " % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select floor(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + # sql += "%s as asct2, " % math_fun_2 + # sql += "%s, " % random.choice(self.s_s_select) + # sql += "%s, " % random.choice(self.q_select) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + #tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-2 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , abs(asct1) from ( select " + sql += "%s as asct1, " % math_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2 from ( select " + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select abs(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + # sql += "%s, " % random.choice(self.s_s_select) + # sql += "%s, " % random.choice(self.q_select) + sql += "from regular_table_1 where " + sql += "%s )" % random.choice(self.q_where) + #sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select floor(asct2) from ( select " + sql += "%s as asct2 " % math_fun_2 + # sql += "%s, " % random.choice(self.s_s_select) + # sql += "%s, " % random.choice(self.q_select) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #TD-15473 self.cur1.execute(sql) + + tdSql.query("select 1-3 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % math_fun_2 + sql += "%s as asct1, " % math_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % math_fun_2 + sql += "%s as asct1, " % math_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 1-4 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , asct1 from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "%s, " % math_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select count(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 + sql += "from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-5 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts ," + sql += "%s, " % math_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % math_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select " + # sql += "%s, " % math_fun_1 + # sql += "%s, " % random.choice(self.q_select) + # sql += "%s, " % random.choice(self.q_select) + sql += "%s " % math_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15973 tdSql.query(sql) + #TD-15973 self.cur1.execute(sql) + + tdSql.query("select 1-6 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % math_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select max(asct1) from ( select " + #sql += "%s, " % math_fun_join_1 + sql += "%s as asct1 " % math_fun_join_2 + # sql += "t1.%s, " % random.choice(self.q_select) + # sql += "t2.%s, " % random.choice(self.q_select) + # sql += "%s, " % math_fun_join_1 + sql += "from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-7 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , abs(asct1) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select abs(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-8 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts,floor(asct1) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select floor(asct1) " + sql += "from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-9 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select max(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 + # sql += "t1.%s, " % random.choice(self.q_select) + # sql += "t1.%s, " % random.choice(self.q_select) + # sql += "t2.%s, " % random.choice(self.q_select) + # sql += "t2.%s, " % random.choice(self.q_select) + sql += "from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql)# TD-16039 + # self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 1-10 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select min(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select max(asct2) from ( select " + sql += "%s as asct2 " % math_fun_2 + sql += "from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 1-11 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , min(asct1), max(asct2) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + #sql += "%s " % random.choice(limit1_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select min(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct2 " % math_fun_2 + sql += " from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 1-12 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select max(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-13 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts ," + sql += "%s, " % math_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % math_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql) # TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select " + sql += "%s " % math_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD15973 tdSql.query(sql) + #TD15973 self.cur1.execute(sql) + + tdSql.query("select 1-14 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select avg(asct1),count(asct2) from ( select " + sql += "%s as asct1, " % math_fun_1 + sql += "%s as asct2" % math_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select avg(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + tdSql.query("select 1-15 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select max(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f %s sql start!" %mathlist) + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f %s sql over!" %mathlist) + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %mathlist) + + + def str_nest(self,strlist): + + print("==========%s===start=============" %strlist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['LENGTH','CHAR_LENGTH']) \ + or (strlist == ['']): + str_functions = strlist + fun_fix_column = ['(q_nchar)','(q_binary)','(q_nchar_null)','(q_binary_null)'] + fun_column_1 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) + str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) + str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)','(t1.q_nchar_null)','(t1.q_binary_null)', + '(t2.q_nchar)','(t2.q_binary)','(t2.q_nchar_null)','(t2.q_binary_null)'] + fun_column_join_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_s = ['(q_nchar)','(q_binary)','(q_nchar_null)','(q_binary_null)','(loc)','(tbname)'] + fun_column_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) + str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) + str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_binary)','(t1.q_nchar_null)','(t1.q_binary_null)','(t1.loc)','(t1.tbname)', + '(t2.q_nchar)','(t2.q_binary)','(t2.q_nchar_null)','(t2.q_binary_null)','(t2.loc)','(t2.tbname)'] + fun_column_join_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_s_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_s_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (strlist == ['SUBSTR']) : + str_functions = strlist + pos = random.randint(1, 20) + sub_len = random.randint(1, 10) + fun_fix_column = ['(q_nchar,pos)','(q_binary,pos)','(q_nchar_null,pos)','(q_binary_null,pos)', + '(q_nchar,pos,sub_len)','(q_binary,pos,sub_len)','(q_nchar_null,pos,sub_len)','(q_binary_null,pos,sub_len)',] + fun_column_1 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) + str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + fun_column_2 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) + str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + + fun_fix_column_j = ['(t1.q_nchar,pos)','(t1.q_binary,pos)','(t1.q_nchar_null,pos)','(t1.q_binary_null,pos)', + '(t1.q_nchar,pos,sub_len)','(t1.q_binary,pos,sub_len)','(t1.q_nchar_null,pos,sub_len)','(t1.q_binary_null,pos,sub_len)', + '(t2.q_nchar,pos)','(t2.q_binary,pos)','(t2.q_nchar_null,pos)','(t2.q_binary_null,pos)', + '(t2.q_nchar,pos,sub_len)','(t2.q_binary,pos,sub_len)','(t2.q_nchar_null,pos,sub_len)','(t2.q_binary_null,pos,sub_len)'] + fun_column_join_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + fun_column_join_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + + fun_fix_column_s = ['(q_nchar,pos)','(q_binary,pos)','(q_nchar_null,pos)','(q_binary_null,pos)','(loc,pos)', + '(q_nchar,pos,sub_len)','(q_binary,pos,sub_len)','(q_nchar_null,pos,sub_len)','(q_binary_null,pos,sub_len)','(loc,pos,sub_len)',] + fun_column_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) + str_fun_s_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + fun_column_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) + str_fun_s_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + + fun_fix_column_s_j = ['(t1.q_nchar,pos)','(t1.q_binary,pos)','(t1.q_nchar_null,pos)','(t1.q_binary_null,pos)','(t1.loc,pos)', + '(t1.q_nchar,pos,sub_len)','(t1.q_binary,pos,sub_len)','(t1.q_nchar_null,pos,sub_len)','(t1.q_binary_null,pos,sub_len)','(t1.loc,pos,sub_len)', + '(t2.q_nchar,pos)','(t2.q_binary,pos)','(t2.q_nchar_null,pos)','(t2.q_binary_null,pos)','(t2.loc,pos)', + '(t2.q_nchar,pos,sub_len)','(t2.q_binary,pos,sub_len)','(t2.q_nchar_null,pos,sub_len)','(t2.q_binary_null,pos,sub_len)','(t2.loc,pos,sub_len)'] + fun_column_join_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s_j,1) + str_fun_join_s_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + fun_column_join_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s_j,1) + str_fun_join_s_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + + elif (strlist == ['CONCAT']) : + str_functions = strlist + i = random.randint(2,8) + fun_fix_column = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','q_nchar_null', + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + column1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+column1+')' + str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + column2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+column2+')' + str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)', + '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)', + '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j1 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+column_j1+')' + str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + column_j2 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+column_j2+')' + str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_s = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','loc','q_nchar_null', + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + column_s1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_s_1 = str(random.sample(str_functions,1))+'('+column_s1+')' + str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","") + + column_s2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_s_2 = str(random.sample(str_functions,1))+'('+column_s2+')' + str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)','(t1.loc)', + '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)','(t2.loc)', + '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j_s1 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_s_1 = str(random.sample(str_functions,1))+'('+column_j_s1+')' + str_fun_join_s_1 = str(fun_column_join_s_1).replace("[","").replace("]","").replace("'","") + + column_j_s2 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_s_2 = str(random.sample(str_functions,1))+'('+column_j_s2+')' + str_fun_join_s_2 = str(fun_column_join_s_2).replace("[","").replace("]","").replace("'","") + + elif (strlist == ['CONCAT_WS']): + str_functions = strlist + i = random.randint(2,8) + fun_fix_column = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','q_nchar_null', + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + separators = ['',' ','abc','123','!','@','#','$','%','^','&','*','(',')','-','_','+','=','{', + '[','}',']','|',';',':',',','.','<','>','?','/','~','`','taos','涛思'] + separator = str(random.sample(separators,i)).replace("[","").replace("]","") + + column1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column1+')' + str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + column2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column2+')' + str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)', + '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)', + '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j1 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j1+')' + str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + column_j2 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j2+')' + str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_s = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','loc','q_nchar_null', + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + column_s1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_s_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_s1+')' + str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","") + + column_s2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_s_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_s2+')' + str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)','(t1.loc)', + '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)','(t2.loc)', + '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j_s1 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_s_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j_s1+')' + str_fun_join_s_1 = str(fun_column_join_s_1).replace("[","").replace("]","").replace("'","") + + column_j_s2 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_s_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j_s2+')' + str_fun_join_s_2 = str(fun_column_join_s_2).replace("[","").replace("]","").replace("'","") + + + tdSql.query("select 1-1 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']) : + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-2 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']) : + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2 from ( select " + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2 from ( select " + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + tdSql.query("select 1-3 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % str_fun_2 + sql += "%s as asct1, " % str_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % str_fun_2 + sql += "%s as asct1, " % str_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 1-4 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "%s, " % str_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "%s, " % str_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-5 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts ," + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % str_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select ts ," + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % str_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-6 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-7 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-8 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts, LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-9 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + self.restartDnodes() + tdSql.query("select 1-10 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 1-11 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + #sql += "%s " % random.choice(limit1_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + #sql += "%s " % random.choice(limit1_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + + tdSql.query("select 1-12 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-13 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts ," + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql) # TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select ts ," + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-14 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, " % str_fun_s_1 + sql += "%s as asct2" % str_fun_s_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, " % str_fun_s_1 + sql += "%s as asct2" % str_fun_s_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + tdSql.query("select 1-15 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15955 tdSql.query(sql) + #TD-15955 self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15955 tdSql.query(sql) + #TD-15955 self.cur1.execute(sql) + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f %s sql start!" %strlist) + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f %s sql over!" %strlist) + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %strlist) + + def time_nest(self,timelist): + + print("==========%s===start=============" %timelist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMEZONE']): + time_functions = timelist + fun_fix_column = ['()'] + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['()'] + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (timelist == ['TIMETRUNCATE']): + time_functions = timelist + + t = time.time() + t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) + fun_fix_column = ['q_ts','ts','_c0','_C0','_rowts','1600000000000','1600000000000000','1600000000000000000', + '%d' %t, '%d000' %t, '%d000000' %t,'t_to_s'] + + timeunits = ['1u' , '1a' ,'1s', '1m' ,'1h', '1d'] + timeunit = str(random.sample(timeunits,1)).replace("[","").replace("]","").replace("'","") + + column_1 = ['(%s,timeutil)'%(random.sample(fun_fix_column,1))] + fun_column_1 = random.sample(time_functions,1)+random.sample(column_1,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") + time_fun_1 = str(time_fun_1).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) + + column_2 = ['(%s,timeutil)'%(random.sample(fun_fix_column,1))] + fun_column_2 = random.sample(time_functions,1)+random.sample(column_2,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") + time_fun_2 = str(time_fun_2).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) + + + fun_fix_column_j = ['(t1.q_ts)','(t1.ts)', '(t2.q_ts)','(t2.ts)','(1600000000000)','(1600000000000000)','(1600000000000000000)', + '(%d)' %t, '(%d000)' %t, '(%d000000)' %t,'t_to_s'] + + column_j1 = ['(%s,timeutil)'%(random.sample(fun_fix_column_j,1))] + fun_column_join_1 = random.sample(time_functions,1)+random.sample(column_j1,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") + time_fun_join_1 = str(time_fun_join_1).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) + + column_j2 = ['(%s,timeutil)'%(random.sample(fun_fix_column_j,1))] + fun_column_join_2 = random.sample(time_functions,1)+random.sample(column_j2,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") + time_fun_join_2 = str(time_fun_join_2).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) + + elif (timelist == ['TO_ISO8601']): + time_functions = timelist + + t = time.time() + fun_fix_column = ['(now())','(ts)','(q_ts)','(_rowts)','(_c0)','(_C0)', + '(1600000000000)','(1600000000000000)','(1600000000000000000)', + '(%d)' %t, '(%d000)' %t, '(%d000000)' %t] + + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_ts)','(t1.ts)', '(t2.q_ts)','(t2.ts)','(1600000000000)','(1600000000000000)','(1600000000000000000)','(now())', + '(%d)' %t, '(%d000)' %t, '(%d000000)' %t] + + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (timelist == ['TO_UNIXTIMESTAMP']): + time_functions = timelist + + t = time.time() + t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) + fun_fix_column = ['(q_nchar)','(q_nchar1)','(q_nchar2)','(q_nchar3)','(q_nchar4)','(q_nchar_null)','(q_binary)','(q_binary5)','(q_binary6)','(q_binary7)','(q_binary8)','(q_binary_null)','(t_to_s)'] + + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_1 = str(time_fun_1).replace("t_to_s","%s" %t_to_s) + + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_2 = str(time_fun_2).replace("t_to_s","%s" %t_to_s) + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)', '(t2.q_nchar)','(t2.q_binary)','(t_to_s)'] + + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_join_1 = str(time_fun_join_1).replace("t_to_s","%s" %t_to_s) + + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_join_2 = str(time_fun_join_2).replace("t_to_s","%s" %t_to_s) + + elif (timelist == ['TIMEDIFF']): + time_functions = timelist + + t = time.time() + t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) + fun_fix_column = ['(q_nchar)','(q_nchar1)','(q_nchar2)','(q_nchar3)','(q_nchar4)','(q_nchar_null)','(q_binary)','(q_binary5)','(q_binary6)','(q_binary7)','(q_binary8)','(q_binary_null)','(t_to_s)'] + + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_1 = str(time_fun_1).replace("t_to_s","%s" %t_to_s) + + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_2 = str(time_fun_2).replace("t_to_s","%s" %t_to_s) + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)', '(t2.q_nchar)','(t2.q_binary)','(t_to_s)'] + + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_join_1 = str(time_fun_join_1).replace("t_to_s","%s" %t_to_s) + + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_join_2 = str(time_fun_join_2).replace("t_to_s","%s" %t_to_s) + + elif (timelist == ['ELAPSED']): + time_functions = timelist + + fun_fix_column = ['(ts)','(q_ts)','(_c0)','(_C0)','(_rowts)','(ts,time_unit)','(_c0,time_unit)','(_C0,time_unit)','(_rowts,time_unit)'] + + time_units = ['nums','numm','numh','numd','numa'] + time_unit = str(random.sample(time_units,1)).replace("[","").replace("]","").replace("'","") + time_num1 = random.randint(0, 1000) + time_unit1 = time_unit.replace("num","%d" %time_num1) + time_num2 = random.randint(0, 1000) + time_unit2 = time_unit.replace("num","%d" %time_num2) + + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit1) + + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit2) + + + fun_fix_column_j = ['(t1.ts)','(t1.q_ts)', '(t2.ts)','(t2.q_ts)','(t1.ts,time_unit)','(t1.q_ts,time_unit)','(t2.ts,time_unit)','(t2.q_ts,time_unit)'] + + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit1) + + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit2) + + + elif (timelist == ['CAST']) : + str_functions = timelist + #下面的4个是全的,这个只是1个 + i = random.randint(1,4) + if i ==1: + print('===========cast_1===========') + fun_fix_column = ['q_bool','q_bool_null','q_bigint','q_bigint_null','q_smallint','q_smallint_null', + 'q_tinyint','q_tinyint_null','q_int','q_int_null','q_float','q_float_null','q_double','q_double_null'] + type_names = ['BIGINT','BINARY(100)','TIMESTAMP','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['t1.q_bool','t1.q_bool_null','t1.q_bigint','t1.q_bigint_null','t1.q_smallint','t1.q_smallint_null', + 't1.q_tinyint','t1.q_tinyint_null','t1.q_int','t1.q_int_null','t1.q_float','t1.q_float_null','t1.q_double','t1.q_double_null', + 't2.q_bool','t2.q_bool_null','t2.q_bigint','t2.q_bigint_null','t2.q_smallint','t2.q_smallint_null', + 't2.q_tinyint','t2.q_tinyint_null','t2.q_int','t2.q_int_null','t2.q_float','t2.q_float_null','t2.q_double','t2.q_double_null'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + elif i==2: + print('===========cast_2===========') + fun_fix_column = ['q_binary','q_binary_null','q_binary1','q_binary2','q_binary3','q_binary4'] + type_names = ['BIGINT','BINARY(100)','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['t1.q_binary','t1.q_binary_null','t1.q_binary1','t1.q_binary2','t1.q_binary3','t1.q_smallint_null','t1.q_binary4', + 't2.q_binary','t2.q_binary_null','t2.q_bigint','t2.q_binary1','t2.q_binary2','t2.q_binary3','t2.q_binary4'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + elif i==3: + print('===========cast_3===========') + fun_fix_column = ['q_nchar','q_nchar_null','q_nchar5','q_nchar6','q_nchar7','q_nchar8'] + type_names = ['BIGINT','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['t1.q_nchar','t1.q_nchar_null','t1.q_nchar5','t1.q_nchar6','t1.q_nchar7','t1.q_nchar8', + 't2.q_nchar','t2.q_nchar_null','t2.q_nchar5','t2.q_nchar6','t2.q_nchar7','t2.q_nchar8'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + elif i==4: + print('===========cast_4===========') + fun_fix_column = ['q_ts','q_ts_null','_C0','_c0','ts','_rowts'] + type_names = ['BIGINT','TIMESTAMP','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['t1.q_ts','t1.q_ts_null','t1.ts','t2.q_ts','t2.q_ts_null','t2.ts'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + elif (timelist == ['CAST_1']) : + str_functions = timelist + + print('===========cast_1===========') + fun_fix_column = ['q_bool','q_bool_null','q_bigint','q_bigint_null','q_smallint','q_smallint_null', + 'q_tinyint','q_tinyint_null','q_int','q_int_null','q_float','q_float_null','q_double','q_double_null'] + type_names = ['BIGINT','BINARY(100)','TIMESTAMP','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_1","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_1","") + + fun_fix_column_j = ['t1.q_bool','t1.q_bool_null','t1.q_bigint','t1.q_bigint_null','t1.q_smallint','t1.q_smallint_null', + 't1.q_tinyint','t1.q_tinyint_null','t1.q_int','t1.q_int_null','t1.q_float','t1.q_float_null','t1.q_double','t1.q_double_null', + 't2.q_bool','t2.q_bool_null','t2.q_bigint','t2.q_bigint_null','t2.q_smallint','t2.q_smallint_null', + 't2.q_tinyint','t2.q_tinyint_null','t2.q_int','t2.q_int_null','t2.q_float','t2.q_float_null','t2.q_double','t2.q_double_null'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_1","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_1","") + + elif (timelist == ['CAST_2']) : + str_functions = timelist + print('===========cast_2===========') + fun_fix_column = ['q_binary','q_binary_null','q_binary1','q_binary2','q_binary3','q_binary4'] + type_names = ['BIGINT','BINARY(100)','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_2","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_2","") + + fun_fix_column_j = ['t1.q_binary','t1.q_binary_null','t1.q_binary1','t1.q_binary2','t1.q_binary3','t1.q_smallint_null','t1.q_binary4', + 't2.q_binary','t2.q_binary_null','t2.q_bigint','t2.q_binary1','t2.q_binary2','t2.q_binary3','t2.q_binary4'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_2","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_2","") + + elif (timelist == ['CAST_3']) : + str_functions = timelist + print('===========cast_3===========') + fun_fix_column = ['q_nchar','q_nchar_null','q_nchar5','q_nchar6','q_nchar7','q_nchar8'] + type_names = ['BIGINT','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_3","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_3","") + + fun_fix_column_j = ['t1.q_nchar','t1.q_nchar_null','t1.q_nchar5','t1.q_nchar6','t1.q_nchar7','t1.q_nchar8', + 't2.q_nchar','t2.q_nchar_null','t2.q_nchar5','t2.q_nchar6','t2.q_nchar7','t2.q_nchar8'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_3","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_3","") + + elif (timelist == ['CAST_4']) : + str_functions = timelist + print('===========cast_4===========') + fun_fix_column = ['q_ts','q_ts_null','_C0','_c0','ts','_rowts'] + type_names = ['BIGINT','TIMESTAMP','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_4","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_4","") + + fun_fix_column_j = ['t1.q_ts','t1.q_ts_null','t1.ts','t2.q_ts','t2.q_ts_null','t2.ts'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_4","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_4","") + + tdSql.query("select 1-1 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) \ + or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , asct1,now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select max(asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-2 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now),now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , timediff(asct2,now),now(),today(),timezone() from ( select " + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2,now(),today(),timezone() from ( select " + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select min(asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1 " % time_fun_1 + sql += " from regular_table_1 where " + sql += "%s )" % random.choice(self.q_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select avg(asct2),now(),today(),timezone() from ( select " + sql += "%s as asct2 " % time_fun_2 + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-3 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % time_fun_2 + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % time_fun_2 + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select abs(asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1," % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2," % time_fun_2 + sql += "%s as asct1 " % time_fun_1 + sql += "from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-4 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s, " % time_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s, " % time_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select floor(asct1) from ( select " + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s " % time_fun_join_1 + sql += " from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-5 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['ELAPSED']) : + sql = "select now(),today(),timezone(), " + sql += "%s, " % time_fun_1 + sql += "%s " % time_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + else: + sql = "select ts ,now(),today(),timezone(), " + sql += "%s, " % time_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % time_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-6 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select (asct1)*111 from ( select " + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s " % time_fun_join_1 + sql += " from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-7 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # tdSql.checkRows(300) + # self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # tdSql.checkRows(300) + # self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select (asct1)/asct2 ,now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) #同时出现core:TD-16095和TD-16042 + # self.cur1.execute(sql) + + tdSql.query("select 1-8 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # tdSql.checkRows(300) + # self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # tdSql.checkRows(300) + # self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select floor(abs(asct1)),now(),today(),timezone() " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # self.cur1.execute(sql) + + tdSql.query("select 1-9 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) TD-16039 + # self.cur1.execute(sql) TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , asct1 from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select min(asct1*110) from ( select " + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1 " % time_fun_join_2 + sql += "from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + self.restartDnodes() + tdSql.query("select 1-10 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2),now(),today(),timezone() from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select abs(asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1 ," % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select max(asct2),now(),today(),timezone() from ( select " + sql += "%s as asct1 ," % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += "from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 1-11 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now), timediff(now,asct2) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql)#TD-15473 + # self.cur1.execute(sql)#TD-15473 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1,now()),(now(),asct2) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql)#TD-15473 + # self.cur1.execute(sql)#TD-15473 + elif (timelist == ['ELAPSED']) : + sql = "select asct1+asct2,now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1 ," % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += " from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql)#TD-15473 + self.cur1.execute(sql)#TD-15473 + + tdSql.query("select 1-12 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , asct1,now() from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select min(floor(asct1)),now() from ( select " + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1 " % time_fun_join_2 + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-13 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(%s,now)," % time_fun_2 + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % time_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts ,now(),today(),timezone(), " + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % time_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select now(),today(),timezone(), " + sql += "%s as asct1, " % time_fun_1 + sql += "%s " % time_fun_2 + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + tdSql.query("select 1-14 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now),timediff(now,asct2) from ( select ts ts ," + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2" % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),(now()),asct2 from ( select ts ts ," + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2" % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select ts , (asct1)*asct2,now(),(now()) from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2" % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + tdSql.query("select 1-15 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now),timediff(now,asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , asct1,(now()),(now()),asct2 ,now(),today(),timezone() from ( select t1.ts as ts," + sql += "%s as asct2, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select asct1,(now()),(now()),asct2 ,now(),today(),timezone() from ( select " + sql += "%s as asct2, " % time_fun_join_1 + sql += "%s as asct1 " % time_fun_join_2 + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) # TD-16039 + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f %s sql start!" %timelist) + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f %s sql over!" %timelist) + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %timelist) + + def base_nest(self,baselist): + + print("==========%s===start=============" %baselist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + if (baselist == ['A']) or (baselist == ['S']) or (baselist == ['F']) \ + or (baselist == ['C']): + base_functions = baselist + fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)'] + fun_column_1 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) + base_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) + base_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)', + '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] + fun_column_join_1 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) + base_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) + base_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (baselist == ['P']) or (baselist == ['M']) or (baselist == ['S'])or (baselist == ['T']): + base_functions = baselist + num = random.randint(0, 1000) + fun_fix_column = ['(q_bigint,num)','(q_smallint,num)','(q_tinyint,num)','(q_int,num)','(q_float,num)','(q_double,num)', + '(q_bigint_null,num)','(q_smallint_null,num)','(q_tinyint_null,num)','(q_int_null,num)','(q_float_null,num)','(q_double_null,num)'] + fun_column_1 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) + base_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) + fun_column_2 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) + base_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) + + fun_fix_column_j = ['(t1.q_bigint,num)','(t1.q_smallint,num)','(t1.q_tinyint,num)','(t1.q_int,num)','(t1.q_float,num)','(t1.q_double,num)', + '(t1.q_bigint_null,num)','(t1.q_smallint_null,num)','(t1.q_tinyint_null,num)','(t1.q_int_null,num)','(t1.q_float_null,num)','(t1.q_double_null,num)', + '(t2.q_bigint,num)','(t2.q_smallint,num)','(t2.q_tinyint,num)','(t2.q_int,num)','(t2.q_float,num)','(t2.q_double,num)', + '(t2.q_bigint_null,num)','(t2.q_smallint_null,num)','(t2.q_tinyint_null,num)','(t2.q_int_null,num)','(t2.q_float_null,num)','(t2.q_double_null,num)'] + fun_column_join_1 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) + base_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) + fun_column_join_2 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) + base_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) + + tdSql.query("select 1-1 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , floor(asct1) from ( select " + sql += "%s as asct1, " % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-2 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , abs(asct1) from ( select " + sql += "%s as asct1, " % base_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2 from ( select " + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + tdSql.query("select 1-3 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % base_fun_2 + sql += "%s as asct1, " % base_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 1-4 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , asct1 from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "%s, " % base_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-5 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts ," + sql += "%s, " % base_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % base_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-6 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % base_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-7 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , abs(asct1) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + tdSql.query("select 1-8 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts,floor(asct1) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + tdSql.query("select 1-9 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 1-10 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 1-11 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , min(asct1), max(asct2) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + #sql += "%s " % random.choice(limit1_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + + tdSql.query("select 1-12 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-13 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts ," + sql += "%s, " % base_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % base_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + tdSql.query("select 1-14 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select avg(asct1),count(asct2) from ( select " + sql += "%s as asct1, " % base_fun_1 + sql += "%s as asct2" % base_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-15 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f %s sql start!" %baselist) + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f %s sql over!" %baselist) + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %baselist) + + def function_before_26(self): + + print('=====================2.6 old function start ===========') + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + #1 select * from (select column form regular_table where <\>\in\and\or order by) + tdSql.query("select 1-1 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " ===暂时不支持select * ,用下面这一行 + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + #1 outer union not support + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 1-2 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") union " + #sql += "select ts , * from ( select " + sql += "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 1-2 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") union all " + #sql += "select ts , * from ( select " + sql += "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(200) + self.cur1.execute(sql) + + #1 inter union not support + tdSql.query("select 1-3 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "" + sql += " union select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15607 tdSql.query(sql) + #tdSql.checkRows(200) + #self.cur1.execute(sql) + + tdSql.query("select 1-3 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " union all select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15607 tdSql.query(sql) + # tdSql.checkRows(300) + #self.cur1.execute(sql) + + #join:select * from (select column form regular_table1,regular_table2 where t1.ts=t2.ts and <\>\in\and\or order by) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 1-4 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select t1.ts ," + sql = "select * from ( select t1.ts ," + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + + #2 select column from (select * form regular_table ) where <\>\in\and\or order by + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 2-1 from stable_1;") + for i in range(self.fornum): + sql = "select ts ," + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s " % random.choice(self.q_select) + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + #join: select column from (select column form regular_table1,regular_table2 )where t1.ts=t2.ts and <\>\in\and\or order by + #cross join not supported yet + tdSql.query("select 2-2 from stable_1;") + for i in range(self.fornum): + sql = "select ts , * from ( select t1.ts ," + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 ) where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.order_u_where) + #sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) + + #3 select * from (select column\tag form stable where <\>\in\and\or order by ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 3-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + tdSql.query("select 3-1 from stable_1;") + for i in range(self.fornum): + sql = "select ts, " + sql += "%s " % random.choice(self.s_r_select) + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + # select ts,* from (select column\tag form stable1,stable2 where t1.ts = t2.ts and <\>\in\and\or order by ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 3-2 from stable_1;") + for i in range(self.fornum): + sql = "select ts , * from ( select t1.ts , " + sql += "t1.%s, " % random.choice(self.s_s_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.s_s_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # TD-15609 tdSql.query(sql) + # tdSql.checkRows(100) + #self.cur1.execute(sql) + + #3 outer union not support + self.restartDnodes() + tdSql.query("select 3-3 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") union " + sql += "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") union all " + sql += "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(600) + self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 3-4 from stable_1;") + for i in range(self.fornum): + sql = "select ts , * from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + + #join:select * from (select column form stable1,stable2 where t1.ts=t2.ts and <\>\in\and\or order by) + tdSql.query("select 3-5 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select t1.ts ," + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # TD-15609 tdSql.query(sql) + # tdSql.checkRows(100) + #self.cur1.execute(sql) + + tdSql.query("select 3-6 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select t1.ts ," + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # TD-15609 同上 tdSql.query(sql) + # tdSql.checkRows(100) + #self.cur1.execute(sql) + + #4 select column from (select * form stable where <\>\in\and\or order by ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 4-1 from stable_1;") + for i in range(self.fornum): + sql = "select ts , " + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + #5 select distinct column\tag from (select * form stable where <\>\in\and\or order by limit offset ) + tdSql.query("select 5-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.dqt_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15500 tdSql.query(sql) + #self.cur1.execute(sql) + + #5-1 select distinct column\tag from (select calc form stable where <\>\in\and\or order by limit offset ) + tdSql.query("select 5-2 from stable_1;") + for i in range(self.fornum): + sql = "select distinct c5_1 " + sql += " from ( select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += " as c5_1 from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + #sql += "%s " % random.choice(order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + #tdSql.checkRows(1)有的函数还没有提交,会不返回结果,先忽略 + self.cur1.execute(sql) + + #6-error select * from (select distinct(tag) form stable where <\>\in\and\or order by limit ) + tdSql.query("select 6-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.dt_select) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) + tdSql.query("select 6-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.dt_select) + sql += " from stable_1 where " + sql += "%s ) ;" % random.choice(self.qt_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #7-error select * from (select distinct(tag) form stable where <\>\in\and\or order by limit ) + tdSql.query("select 7-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.dq_select) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[0] , self.limit_where[1]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) #distinct 和 order by 不能混合使用 + tdSql.query("select 7-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.dq_select) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + #sql += "%s " % random.choice(order_desc_where) + sql += "%s " % random.choice([self.limit_where[0] , self.limit_where[1]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(1) + self.cur1.execute(sql) + + #calc_select,TWA/Diff/Derivative/Irate are not allowed to apply to super table directly + #8 select * from (select ts,calc form ragular_table where <\>\in\and\or order by ) + + # dcDB = self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 8-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select ts ," + sql += "%s " % random.choice(self.calc_select_support_ts) + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) # 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function + self.cur1.execute(sql) + tdSql.query("select 8-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_not_support_ts) + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) # 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function + #self.cur1.execute(sql) + + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 8-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select t1.ts, " + sql += "%s " % random.choice(self.calc_select_in_support_ts_j) + sql += "from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql)# 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function + self.cur1.execute(sql) + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_in_not_support_ts_j) + sql += "from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + ##top返回结果有问题 tdSql.checkRows(1) + #self.cur1.execute(sql) + + #9 select * from (select ts,calc form stable where <\>\in\and\or order by ) + # self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 9-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_not_support_ts) + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + # self.cur1.execute(sql) + tdSql.query("select 9-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select ts ," + sql += "%s " % random.choice(self.calc_select_support_ts) + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 9-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_in_not_support_ts_j) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + tdSql.query("select 9-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select t1.ts," + sql += "%s " % random.choice(self.calc_select_in_support_ts_j) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #10 select calc from (select * form regualr_table where <\>\in\and\or order by ) + tdSql.query("select 10-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "as calc10_1 from ( select * from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(1) + self.cur1.execute(sql) + + #10-1 select calc from (select * form regualr_table where <\>\in\and\or order by ) + # rsDn = self.restartDnodes() + # self.dropandcreateDB_random("%s" %db, 1) + # rsDn = self.restartDnodes() + tdSql.query("select 10-2 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_all) + sql += "as calc10_2 from ( select * from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + #10-2 select calc from (select * form regualr_tables where <\>\in\and\or order by ) + tdSql.query("select 10-3 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s as calc10_3 " % random.choice(self.calc_select_all) + sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += " and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 10-4 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s as calc10_4 " % random.choice(self.calc_select_all) + sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += " and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + #11 select calc from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 11-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "as calc11_1 from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(1) + self.cur1.execute(sql) + + #11-1 select calc from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 11-2 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_all) + sql += "as calc11_1 from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + #不好计算结果 tdSql.checkRows(1) + + #11-2 select calc from (select * form stables where <\>\in\and\or order by limit ) + tdSql.query("select 11-3 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_all) + sql += "as calc11_1 from ( select * from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 11-4 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_all) + sql += "as calc11_1 from ( select * from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + + #12 select calc-diff from (select * form regualr_table where <\>\in\and\or order by limit ) + ##self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 12-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from ( select * from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + ##目前derivative不支持 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + tdSql.query("select 12-2 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #目前derivative不支持 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + tdSql.query("select 12-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #目前derivative不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + #12-1 select calc-diff from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 12-3 from stable_1;") + self.restartDnodes() + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #目前derivative不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 12-4 from stable_1;") + #join query does not support group by + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_calculate_regular_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.group_where_j) + sql += ") " + #sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) 目前de函数不支持,另外看看需要不需要将group by和pari by分开 + #self.cur1.execute(sql) + + tdSql.query("select 12-5 from stable_1;") + #join query does not support group by + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_calculate_regular_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.group_where_j) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #derivative not support tdSql.query(sql) + #self.cur1.execute(sql) + + + #13 select calc-diff as diffns from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 13-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " as calc13_1 from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.orders_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #derivative not support tdSql.query(sql) + #self.cur1.execute(sql) + + #14 select * from (select calc_aggregate_alls as agg from stable where <\>\in\and\or group by order by slimit soffset ) + # TD-5955 select * from ( select count (q_double) from stable_1 where t_bool = true or t_bool = false group by loc order by ts asc slimit 1 ) ; + tdSql.query("select 14-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all) + sql += "%s " % random.choice(self.calc_aggregate_all) + sql += " as calc14_3 from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.group_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15678 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + # error group by in out query + tdSql.query("select 14-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all) + sql += "%s " % random.choice(self.calc_aggregate_all) + sql += " as calc14_3 from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.group_where) + sql += "%s " % random.choice(self.having_support) + sql += "%s " % random.choice(self.orders_desc_where) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.group_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15678 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + #14-2 select * from (select calc_aggregate_all_js as agg from stables where <\>\in\and\or group by order by slimit soffset ) + tdSql.query("select 14-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all_j) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all_j) + sql += "%s " % random.choice(self.calc_aggregate_all_j) + sql += " as calc14_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 14-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all_j) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all_j) + sql += "%s " % random.choice(self.calc_aggregate_all_j) + sql += " as calc14_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #15 select * from (select calc_aggregate_regulars as agg from regular_table where <\>\in\and\or order by slimit soffset ) + tdSql.query("select 15-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular) + sql += "%s " % random.choice(self.calc_aggregate_regular) + sql += " as calc15_3 from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where_regular) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #Invalid function name: twa' + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + tdSql.query("select 15-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s " % random.choice(self.calc_aggregate_regular_j) + sql += " as calc15_3 from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.group_where_regular_j) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #Invalid function name: twa' + #self.cur1.execute(sql) + + tdSql.query("select 15-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s " % random.choice(self.calc_aggregate_regular_j) + sql += " as calc15_3 from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.group_where_regular_j) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #Invalid function name: twa' + #self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 15-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname) + sql += " as calc15_3 from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + sql += "%s " % random.choice(self.having_support) + sql += "%s " % random.choice(self.order_desc_where) + sql += ") " + sql += "order by calc15_1 " + sql += "%s " % random.choice(self.limit_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #Invalid function name: twa',可能还的去掉order by + #self.cur1.execute(sql) + + tdSql.query("select 15-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += " as calc15_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.group_where_j) + sql += "%s " % random.choice(self.having_support_j) + #sql += "%s " % random.choice(orders_desc_where) + sql += ") " + sql += "order by calc15_1 " + sql += "%s " % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #'Invalid function name: irate' + #self.cur1.execute(sql) + + tdSql.query("select 15-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += " as calc15_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.group_where_j) + sql += "%s " % random.choice(self.having_support_j) + sql += "%s " % random.choice(self.orders_desc_where) + sql += ") " + sql += "order by calc15_1 " + sql += "%s " % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15678 #tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 15-5 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname) + sql += " as calc15_3 from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + sql += ") " + sql += "order by calc15_1 " + sql += "%s " % random.choice(self.limit_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #'Invalid function name: irate' + #self.cur1.execute(sql) + + #16 select * from (select calc_aggregate_regulars as agg from regular_table where <\>\in\and\or order by limit offset ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 16-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_0 , " % random.choice(self.calc_calculate_all) + sql += "%s as calc16_1 , " % random.choice(self.calc_aggregate_all) + sql += "%s as calc16_2 " % random.choice(self.calc_select_in) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + #sql += "%s " % random.choice(having_support)having和 partition不能混合使用 + sql += ") " + sql += "order by calc16_0 " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 16-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_0 " % random.choice(self.calc_calculate_all_j) + sql += ", %s as calc16_1 " % random.choice(self.calc_aggregate_all_j) + #sql += ", %s as calc16_2 " % random.choice(self.calc_select_in_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += ") " + sql += "order by calc16_0 " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 16-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_0 " % random.choice(self.calc_calculate_all_j) + sql += ", %s as calc16_1 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += ") " + sql += "order by calc16_0 " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 16-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql)#Invalid function name: derivative' + #self.cur1.execute(sql) + + tdSql.query("select 16-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql)#Invalid function name: derivative' + #self.cur1.execute(sql) + + tdSql.query("select 16-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql)#Invalid function name: derivative' + #self.cur1.execute(sql) + + tdSql.query("select 16-5 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 , " % random.choice(self.calc_calculate_all) + sql += "%s as calc16_1 , " % random.choice(self.calc_calculate_regular) + sql += "%s as calc16_2 " % random.choice(self.calc_select_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + #sql += "%s " % random.choice(having_support) + sql += ") " + sql += "order by calc16_1 " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 16-6 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #Invalid function name: derivative' tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 16-7 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #Invalid function name: derivative' tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 16-8 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #Invalid function name: derivative' tdSql.query(sql) + #self.cur1.execute(sql) + + #17 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or interval_sliding group by having order by limit offset )interval_sliding + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 17-1 from stable_1;") + for i in range(self.fornum): + #this is having_support , but tag-select cannot mix with last_row,other select can + sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(having_support) + #sql += "%s " % random.choice(order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-2 from stable_1;") + for i in range(self.fornum): + #this is having_support , but tag-select cannot mix with last_row,other select can + sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.interval_sliding) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-2.2 from stable_1;") + for i in range(self.fornum): + #this is having_support , but tag-select cannot mix with last_row,other select can + sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 17-3 from stable_1;") + for i in range(self.fornum): + #this is having_tagnot_support , because tag-select cannot mix with last_row... + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.having_tagnot_support) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-4 from stable_1;") + for i in range(self.fornum): + #this is having_tagnot_support , because tag-select cannot mix with last_row... + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-4.2 from stable_1;") + for i in range(self.fornum): + #this is having_tagnot_support , because tag-select cannot mix with last_row... + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-5 from stable_1;") + for i in range(self.fornum): + #having_not_support + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.interval_sliding) + # sql += "%s " % random.choice(self.having_not_support) + # sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-6 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-7 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-7.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 17-8 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-9 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-10 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #18 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or session order by limit )interval_sliding + tdSql.query("select 18-1 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.session_where) + #sql += "%s " % random.choice(self.fill_where) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 18-3 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.session_where) + #sql += "%s " % random.choice(self.fill_where) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-4 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-5 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.session_where) + #sql += "%s " % random.choice(self.fill_where) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-6 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-7 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #19 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or session order by limit )interval_sliding + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 19-1 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.state_window) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.state_u_window) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.state_u_window) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-3 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.state_window) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-4 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + #sql += "%s " % random.choice(self.state_window) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-5 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.state_window) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) #'STATE_WINDOW not support for super table query' + + tdSql.query("select 19-6 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + #sql += "%s " % random.choice(self.state_window) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-7 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #20 select * from (select calc_select_fills form regualr_table or stable where <\>\in\and\or fill_where group by order by limit offset ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 20-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill) + sql += "%s ," % random.choice(self.calc_select_fill) + sql += "%s " % random.choice(self.calc_select_fill) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.interp_where) + sql += "%s " % random.choice(self.fill_where) + sql += "%s " % random.choice(self.group_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + rsDn = self.restartDnodes() + tdSql.query("select 20-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s and " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.interp_where_j) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s and " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.interp_where_j) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill) + sql += "%s ," % random.choice(self.calc_select_fill) + sql += "%s " % random.choice(self.calc_select_fill) + sql += " from stable_1 where " + sql += "%s " % self.interp_where[2] + sql += "%s " % random.choice(self.fill_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1, table_1 t2 where t1.ts = t2.ts and " + #sql += "%s and " % random.choice(self.t_join_where) + sql += "%s " % self.interp_where_j[random.randint(0,5)] + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1, stable_1_1 t2 where t1.ts = t2.ts and " + sql += "%s and " % random.choice(self.qt_u_or_where) + sql += "%s " % self.interp_where_j[random.randint(0,5)] + sql += "%s " % random.choice(self.fill_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + ##interp不支持 tdSql.error(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-5 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill) + sql += "%s ," % random.choice(self.calc_select_fill) + sql += "%s " % random.choice(self.calc_select_fill) + sql += " from regular_table_1 where " + sql += "%s " % self.interp_where[1] + sql += "%s " % random.choice(self.fill_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + ##interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-6 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + #sql += "%s " % random.choice(self.interp_where_j) + sql += "%s " % self.interp_where_j[random.randint(0,5)] + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + ##interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + #1 select * from (select * from (select * form regular_table where <\>\in\and\or order by limit )) + tdSql.query("select 1-1 from stable_1;") + for i in range(self.fornum): + # sql_start = "select * from ( " + # sql_end = ")" + for_num = random.randint(1, 15); + sql = "select * from (" * for_num + sql += "select * from ( select * from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ")) " + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + sql2 = "select * from ( select * from ( select " + sql2 += "%s, " % random.choice(self.s_r_select) + sql2 += "%s, " % random.choice(self.q_select) + sql2 += "ts from regular_table_1 where " + sql2 += "%s " % random.choice(self.q_where) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) + self.cur1.execute(sql2) + + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1) + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql ,1,10,3,3) + self.data_matrix_equal('%s' %sql ,1,10,3,3,'%s' %sql2 ,1,10,3,3) + + for i in range(self.fornum): + for_num = random.randint(1, 15); + sql = "select ts from (" * for_num + sql += "select * from ( select * from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ")) " + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + sql2 = "select * from ( select * from ( select " + sql2 += "%s, " % random.choice(self.s_r_select) + sql2 += "%s, " % random.choice(self.q_select) + sql2 += "ts from regular_table_1 where " + sql2 += "%s " % random.choice(self.q_where) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) + self.cur1.execute(sql2) + + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1) + + #2 select * from (select * from (select * form stable where <\>\in\and\or order by limit )) + tdSql.query("select 2-1 from stable_1;") + for i in range(self.fornum): + for_num = random.randint(1, 15); + sql = "select * from (" * for_num + sql += "select * from ( select * from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.qt_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ")) " + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + sql2 = "select * from ( select * from ( select " + sql2 += "%s, " % random.choice(self.s_s_select) + sql2 += "%s, " % random.choice(self.qt_select) + sql2 += "ts from stable_1 where " + sql2 += "%s " % random.choice(self.q_where) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) + self.cur1.execute(sql2) + + self.data_matrix_equal('%s' %sql ,1,10,3,3,'%s' %sql2 ,1,10,3,3) + + for i in range(self.fornum): + for_num = random.randint(1, 15); + sql = "select ts from (" * for_num + sql += "select * from ( select * from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.qt_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ")) " + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + sql2 = "select ts from ( select * from ( select " + sql2 += "%s, " % random.choice(self.s_s_select) + sql2 += "%s, " % random.choice(self.qt_select) + sql2 += "ts from stable_1 where " + sql2 += "%s " % random.choice(self.q_where) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) + self.cur1.execute(sql2) + + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1) + + #3 select ts ,calc from (select * form stable where <\>\in\and\or order by limit ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 3-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.orders_desc_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #'Invalid function name: derivative' tdSql.query(sql) + #self.cur1.execute(sql) + + #4 select * from (select calc form stable where <\>\in\and\or order by limit ) + tdSql.query("select 4-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + #sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #5 select ts ,tbname from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 5-1 from stable_1;") + for i in range(self.fornum): + sql = "select ts , tbname , " + sql += "%s ," % random.choice(self.calc_calculate_regular) + sql += "%s ," % random.choice(self.dqt_select) + sql += "%s " % random.choice(self.qt_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.orders_desc_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) + + #special sql + tdSql.query("select 6-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select _block_dist() from stable_1);" + # tdSql.query(sql) + # tdSql.checkRows(1) + sql = "select _block_dist() from (select * from stable_1);" + tdSql.error(sql) + sql = "select * from (select database());" + tdSql.error(sql) + sql = "select * from (select client_version());" + tdSql.error(sql) + sql = "select * from (select client_version() as version);" + tdSql.error(sql) + sql = "select * from (select server_version());" + tdSql.error(sql) + sql = "select * from (select server_version() as version);" + tdSql.error(sql) + sql = "select * from (select server_status());" + tdSql.error(sql) + sql = "select * from (select server_status() as status);" + tdSql.error(sql) + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f sql start!") + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f sql over!") + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print('=====================2.6 old function end ===========') + + + + def run(self): + tdSql.prepare() + + startTime = time.time() + + # + + + #self.math_nest(['TAIL']) #TD-16009 + # self.math_nest(['HYPERLOGLOG']) #TD-16038 + # self.math_nest(['UNIQUE']) + + + + # # + #self.function_before_26() #TD-16031 + + # self.math_nest(['ABS','SQRT']) #TD-16042 + # self.math_nest(['SIN','COS','TAN','ASIN','ACOS','ATAN']) + # self.math_nest(['POW','LOG']) #TD-16039 + # self.math_nest(['FLOOR','CEIL','ROUND']) + # #self.math_nest(['SAMPLE']) #TD-16017 + # #self.math_nest(['CSUM']) #TD-15936 crash + # self.math_nest(['MAVG']) + + self.str_nest(['LTRIM','RTRIM','LOWER','UPPER']) + self.str_nest(['LENGTH','CHAR_LENGTH']) + self.str_nest(['SUBSTR']) #TD-16042 + self.str_nest(['CONCAT']) #TD-16002 偶尔 + self.str_nest(['CONCAT_WS']) #TD-16002 偶尔 + # self.time_nest(['CAST']) #TD-16017偶尔,放到time里起来弄 + self.time_nest(['CAST_1']) + self.time_nest(['CAST_2']) + self.time_nest(['CAST_3']) + self.time_nest(['CAST_4']) + + + + # self.time_nest(['NOW','TODAY']) # + # self.time_nest(['TIMEZONE']) # + # self.time_nest(['TIMETRUNCATE']) #TD-16039 + # self.time_nest(['TO_ISO8601']) + # self.time_nest(['TO_UNIXTIMESTAMP'])#core多 + # self.time_nest(['ELAPSED']) + + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/percentile.py b/tests/system-test/2-query/percentile.py new file mode 100644 index 0000000000000000000000000000000000000000..8df9bcb9ce4df065a151d33116f1331298ee35fd --- /dev/null +++ b/tests/system-test/2-query/percentile.py @@ -0,0 +1,83 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from platform import java_ver +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + intData = [] + floatData = [] + + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + for i in range(self.rowNum): + tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + intData.append(i + 1) + floatData.append(i + 0.1) + + # percentile verifacation + tdSql.error("select percentile(ts ,20) from test") + tdSql.error("select percentile(col7 ,20) from test") + tdSql.error("select percentile(col8 ,20) from test") + tdSql.error("select percentile(col9 ,20) from test") + column_list = [1,2,3,4,11,12,13,14] + percent_list = [0,50,100] + for i in column_list: + for j in percent_list: + tdSql.query(f"select percentile(col{i}, {j}) from test") + tdSql.checkData(0, 0, np.percentile(intData, j)) + + for i in [5,6]: + for j in percent_list: + tdSql.query(f"select percentile(col{i}, {j}) from test") + tdSql.checkData(0, 0, np.percentile(floatData, j)) + + tdSql.execute("create table meters (ts timestamp, voltage int) tags(loc nchar(20))") + tdSql.execute("create table t0 using meters tags('beijing')") + tdSql.execute("create table t1 using meters tags('shanghai')") + for i in range(self.rowNum): + tdSql.execute("insert into t0 values(%d, %d)" % (self.ts + i, i + 1)) + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i, i + 1)) + + # tdSql.error("select percentile(voltage, 20) from meters") + + + + tdSql.execute("create table st(ts timestamp, k int)") + tdSql.execute("insert into st values(now, -100)(now+1a,-99)") + tdSql.query("select apercentile(k, 20) from st") + tdSql.checkData(0, 0, -100.00) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/sample.py b/tests/system-test/2-query/sample.py new file mode 100644 index 0000000000000000000000000000000000000000..94e06347d2923fc60d99768c667b927dde5dfd83 --- /dev/null +++ b/tests/system-test/2-query/sample.py @@ -0,0 +1,863 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from pstats import Stats +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re +import taos + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def sample_query_form(self, sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + ''' + sample function: + + :param sel: string, must be "select", required parameters; + :param func: string, in this case must be "sample(", otherwise return other function, required parameters; + :param col: string, column name, required parameters; + :param m_comm: string, comma between col and k , required parameters; + :param k: int/float,the width of the sliding window, [1,100], required parameters; + :param r_comm: string, must be ")", use with "(" in func, required parameters; + :param alias: string, result column another name,or add other funtion; + :param fr: string, must be "from", required parameters; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :return: sample query statement,default: select sample(c1, 1) from t1 + ''' + + return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}" + + def checksample(self,sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + # print(self.sample_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition)) + line = sys._getframe().f_back.f_lineno + + if not all([sel , func , col , m_comm , k , r_comm , fr , table_expr]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + + sql = "select * from t1" + collist = tdSql.getColNameList(sql) + + if not isinstance(col, str): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if len([x for x in col.split(",") if x.strip()]) != 1: + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + col = col.replace(",", "").replace(" ","") + + if any([re.compile('^[a-zA-Z]{1}.*$').match(col) is None , not col.replace(".","").isalnum()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if '.' in col: + if any([col.split(".")[0] not in table_expr, col.split(".")[1] not in collist]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if "." not in col: + if col not in collist: + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + # colname = col if "." not in col else col.split(".")[1] + # col_index = collist.index(colname) + # if any([tdSql.cursor.istype(col_index, "TIMESTAMP"), tdSql.cursor.istype(col_index, "BOOL")]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # + # if any([tdSql.cursor.istype(col_index, "BINARY") , tdSql.cursor.istype(col_index,"NCHAR")]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + + if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != "select"]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["(" not in table_expr, "stb" in table_expr, "group" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["group" in condition.lower(), "tbname" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + alias_list = ["tbname", "_c0", "st", "ts"] + if all([alias, "," not in alias]): + if any([ not alias.isalnum(), re.compile('^[a-zA-Z]{1}.*$').match(col) is None ]): + # actually, column alias also support "_", but in this case,forbidden that。 + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all([alias, "," in alias]): + if all(parm != alias.lower().split(",")[1].strip() for parm in alias_list): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + condition_exception = [ "-", "+", "/", "*", "~", "^", "insert", "distinct", + "count", "avg", "twa", "irate", "sum", "stddev", "leastquares", + "min", "max", "first", "last", "top", "bottom", "percentile", + "apercentile", "last_row", "interp", "diff", "derivative", + "spread", "ceil", "floor", "round", "interval", "fill", "slimit", "soffset"] + if "union" not in condition.lower(): + if any(parm in condition.lower().strip() for parm in condition_exception): + + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if not any([isinstance(k, int) , isinstance(k, float)]) : + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + if not(1 <= k < 1001): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + k = int(k // 1) + pre_sql = re.sub("sample\([a-z0-9 .,]*\)", f"count({col})", self.sample_query_form( + col=col, table_expr=table_expr, condition=condition + )) + tdSql.query(pre_sql) + if tdSql.queryRows == 0: + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + sample_result = tdSql.queryResult + sample_len = tdSql.queryRows + + if "group" in condition: + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"and {tb_condition}='{group_name}' and {col} is not null", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}' and {col} is not null", clear_condition) + + tdSql.query(f"select ts, {col} {alias} from {table_expr} {pre_condition}") + # pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + # pre_sample = np.convolve(pre_data, np.ones(k), "valid")/k + pre_sample = tdSql.queryResult + pre_len = tdSql.queryRows + step = pre_len if pre_len < k else k + # tdSql.query(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + for i in range(step): + if sample_result[pre_row:pre_row+step][i] not in pre_sample: + tdLog.exit(f"case in {line} is failed: sample data is not in {group_name}") + else: + tdLog.info(f"case in {line} is success: sample data is in {group_name}") + + # for j in range(len(pre_sample)): + # print(f"case in {line}:", end='') + # tdSql.checkData(pre_row+j, 1, pre_sample[j]) + pre_row += step + return + elif "union" in condition: + union_sql_0 = self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + # union_sample_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + # union_sample_1 = tdSql.queryResult + row_union_1 = tdSql.queryRows + + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + # for i in range(tdSql.queryRows): + # print(f"case in {line}: ", end='') + # if i < row_union_0: + # tdSql.checkData(i, 1, union_sample_0[i][1]) + # else: + # tdSql.checkData(i, 1, union_sample_1[i-row_union_0][1]) + if row_union_0 + row_union_1 != sample_len: + tdLog.exit(f"case in {line} is failed: sample data is not in ") + else: + tdLog.info(f"case in {line} is success: sample data is in ") + return + + else: + if "where" in condition: + condition = re.sub('where', f"where {col} is not null and ", condition) + else: + condition = f"where {col} is not null" + condition + print(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + tdSql.query(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + # offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_sample = tdSql.queryResult + # pre_len = tdSql.queryRows + # for i in range(sample_len): + # if sample_result[pre_row:pre_row + step][i] not in pre_sample: + # tdLog.exit(f"case in {line} is failed: sample data is not in {group_name}") + # else: + # tdLog.info(f"case in {line} is success: sample data is in {group_name}") + + pass + + def sample_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checksample() + case2 = {"col": "c2"} + self.checksample(**case2) + case3 = {"col": "c5"} + self.checksample(**case3) + case4 = {"col": "c7"} + self.checksample(**case4) + case5 = {"col": "c8"} + self.checksample(**case5) + case6 = {"col": "c9"} + self.checksample(**case6) + + # # case7~8: nested query + # case7 = {"table_expr": "(select c1 from stb1)"} + # self.checksample(**case7) + # case8 = {"table_expr": "(select sample(c1, 1) c1 from stb1 group by tbname)"} + # self.checksample(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checksample(**case9) + # case10 = {"alias": ", _c0"} + # self.checksample(**case10) + case11 = {"alias": ", st1"} + self.checksample(**case11) + case12 = {"alias": ", c1"} + self.checksample(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checksample(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checksample(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checksample(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checksample(**case16) + + # # case17: only support normal table join + # case17 = { + # "col": "t1.c1", + # "table_expr": "t1, t2", + # "condition": "where t1.ts=t2.ts" + # } + # self.checksample(**case17) + # # case18~19: with group by + # case19 = { + # "table_expr": "stb1", + # "condition": "partition by tbname" + # } + # self.checksample(**case19) + + # # case20~21: with order by + # case20 = {"condition": "order by ts"} + # self.checksample(**case20) + # case21 = { + # "table_expr": "stb1", + # "condition": "partition by tbname order by tbname" + # } + # self.checksample(**case21) + + # case22: with union + case22 = { + "condition": "union all select sample( c1 , 1 ) from t2" + } + self.checksample(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checksample(**case23) + + # case24: value k range[1, 100], can be int or float, k = floor(k) + case24 = {"k": 3} + self.checksample(**case24) + case25 = {"k": 2.999} + self.checksample(**case25) + case26 = {"k": 1000} + self.checksample(**case26) + case27 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 " + } + self.checksample(**case27) # with slimit + case28 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + self.checksample(**case28) # with soffset + + pass + + def sample_error_query(self) -> None : + # unusual test + + # form test + err1 = {"col": ""} + self.checksample(**err1) # no col + err2 = {"sel": ""} + self.checksample(**err2) # no select + err3 = {"func": "sample", "col": "", "m_comm": "", "k": "", "r_comm": ""} + self.checksample(**err3) # no sample condition: select sample from + err4 = {"col": "", "m_comm": "", "k": ""} + self.checksample(**err4) # no sample condition: select sample() from + err5 = {"func": "sample", "r_comm": ""} + self.checksample(**err5) # no brackets: select sample col, k from + err6 = {"fr": ""} + self.checksample(**err6) # no from + err7 = {"k": ""} + self.checksample(**err7) # no k + err8 = {"table_expr": ""} + self.checksample(**err8) # no table_expr + + # err9 = {"col": "st1"} + # self.checksample(**err9) # col: tag + tdSql.query(" select sample(st1 ,1) from t1 ") + err10 = {"col": 1} + self.checksample(**err10) # col: value + err11 = {"col": "NULL"} + self.checksample(**err11) # col: NULL + err12 = {"col": "%_"} + self.checksample(**err12) # col: %_ + err13 = {"col": "c3"} + self.checksample(**err13) # col: timestamp col + err14 = {"col": "_c0"} + # self.checksample(**err14) # col: Primary key + err15 = {"col": "avg(c1)"} + # self.checksample(**err15) # expr col + err16 = {"col": "c4"} + self.checksample(**err16) # binary col + err17 = {"col": "c10"} + self.checksample(**err17) # nchar col + err18 = {"col": "c6"} + self.checksample(**err18) # bool col + err19 = {"col": "'c1'"} + self.checksample(**err19) # col: string + err20 = {"col": None} + self.checksample(**err20) # col: None + err21 = {"col": "''"} + self.checksample(**err21) # col: '' + err22 = {"col": "tt1.c1"} + self.checksample(**err22) # not table_expr col + err23 = {"col": "t1"} + self.checksample(**err23) # tbname + err24 = {"col": "stb1"} + self.checksample(**err24) # stbname + err25 = {"col": "db"} + self.checksample(**err25) # datbasename + err26 = {"col": "True"} + self.checksample(**err26) # col: BOOL 1 + err27 = {"col": True} + self.checksample(**err27) # col: BOOL 2 + err28 = {"col": "*"} + self.checksample(**err28) # col: all col + err29 = {"func": "sample[", "r_comm": "]"} + self.checksample(**err29) # form: sample[col, k] + err30 = {"func": "sample{", "r_comm": "}"} + self.checksample(**err30) # form: sample{col, k} + err31 = {"col": "[c1]"} + self.checksample(**err31) # form: sample([col], k) + err32 = {"col": "c1, c2"} + self.checksample(**err32) # form: sample(col, col2, k) + err33 = {"col": "c1, 2"} + self.checksample(**err33) # form: sample(col, k1, k2) + err34 = {"alias": ", count(c1)"} + self.checksample(**err34) # mix with aggregate function 1 + err35 = {"alias": ", avg(c1)"} + self.checksample(**err35) # mix with aggregate function 2 + err36 = {"alias": ", min(c1)"} + self.checksample(**err36) # mix with select function 1 + err37 = {"alias": ", top(c1, 5)"} + self.checksample(**err37) # mix with select function 2 + err38 = {"alias": ", spread(c1)"} + self.checksample(**err38) # mix with calculation function 1 + err39 = {"alias": ", diff(c1)"} + self.checksample(**err39) # mix with calculation function 2 + # err40 = {"alias": "+ 2"} + # self.checksample(**err40) # mix with arithmetic 1 + # tdSql.query(" select sample(c1 , 1) + 2 from t1 ") + err41 = {"alias": "+ avg(c1)"} + self.checksample(**err41) # mix with arithmetic 2 + err42 = {"alias": ", c1"} + self.checksample(**err42) # mix with other col + # err43 = {"table_expr": "stb1"} + # self.checksample(**err43) # select stb directly + err44 = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + self.checksample(**err44) # stb join + err45 = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + self.checksample(**err45) # interval + err46 = { + "table_expr": "t1", + "condition": "group by c6" + } + # self.checksample(**err46) # group by normal col + + err49 = {"k": "2021-01-01 00:00:00.000"} + self.checksample(**err49) # k: timestamp + err50 = {"k": False} + self.checksample(**err50) # k: False + err51 = {"k": "%"} + self.checksample(**err51) # k: special char + err52 = {"k": ""} + self.checksample(**err52) # k: "" + err53 = {"k": None} + self.checksample(**err53) # k: None + err54 = {"k": "NULL"} + self.checksample(**err54) # k: null + err55 = {"k": "binary(4)"} + self.checksample(**err55) # k: string + err56 = {"k": "c1"} + self.checksample(**err56) # k: sring,col name + err57 = {"col": "c1, 1, c2"} + self.checksample(**err57) # form: sample(col1, k1, col2, k2) + err58 = {"col": "c1 cc1"} + self.checksample(**err58) # form: sample(col newname, k) + err59 = {"k": "'1'"} + # self.checksample(**err59) # formL sample(colm, "1") + err60 = {"k": "-1-(-2)"} + # self.checksample(**err60) # formL sample(colm, -1-2) + err61 = {"k": 1001} + self.checksample(**err61) # k: right out of [1, 1000] + err62 = {"k": -1} + self.checksample(**err62) # k: negative number + err63 = {"k": 0} + self.checksample(**err63) # k: 0 + err64 = {"k": 2**63-1} + self.checksample(**err64) # k: max(bigint) + err65 = {"k": 1-2**63} + # self.checksample(**err65) # k: min(bigint) + err66 = {"k": -2**63} + self.checksample(**err66) # k: NULL + err67 = {"k": 0.999999} + self.checksample(**err67) # k: left out of [1, 1000] + + pass + + def sample_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def sample_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + + def check_sample(self , sample_query , origin_query ): + + tdSql.query(origin_query) + + origin_datas = tdSql.queryResult + + tdSql.query(sample_query) + + sample_datas = tdSql.queryResult + status = True + for ind , sample_data in enumerate(sample_datas): + if sample_data not in origin_datas: + status = False + + if status: + tdLog.info(" sample data is in datas groups ,successed sql is : %s" % sample_query ) + else: + tdLog.exit(" sample data is not in datas groups ,failed sql is : %s" % sample_query ) + + + def basic_sample_query(self): + tdSql.execute(" drop database if exists db ") + tdSql.execute(" create database if not exists db days 300 ") + tdSql.execute(" use db ") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + # basic query for sample + + # params test for all + tdSql.error(" select sample(c1,c1) from t1 ") + tdSql.error(" select sample(c1,now) from t1 ") + tdSql.error(" select sample(c1,tbname) from t1 ") + tdSql.error(" select sample(c1,ts) from t1 ") + tdSql.error(" select sample(c1,false) from t1 ") + tdSql.error(" select sample(123,1) from t1 ") + + tdSql.query(" select sample(c1,2) from t1 ") + tdSql.checkRows(2) + tdSql.query(" select sample(c1,10) from t1 ") + tdSql.checkRows(9) + tdSql.query(" select sample(c8,10) from t1 ") + tdSql.checkRows(9) + tdSql.query(" select sample(c1,999) from t1 ") + tdSql.checkRows(9) + tdSql.query(" select sample(c1,1000) from t1 ") + tdSql.checkRows(9) + tdSql.query(" select sample(c8,1000) from t1 ") + tdSql.checkRows(9) + tdSql.error(" select sample(c1,-1) from t1 ") + + # bug need fix + # tdSql.query("select sample(c1 ,2) , 123 from stb1;") + + # all type support + tdSql.query(" select sample(c1 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c2 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c3 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c4 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c5 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c6 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c7 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c8 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c9 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c10 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(t1 , 20 ) from ct1 ") + tdSql.checkRows(13) + # filter data + + tdSql.query(" select sample(c1, 20 ) from t1 where c1 is null ") + tdSql.checkRows(0) + + tdSql.query(" select sample(c1, 20 ) from t1 where c1 =6 ") + tdSql.checkRows(1) + + tdSql.query(" select sample(c1, 20 ) from t1 where c1 > 6 ") + tdSql.checkRows(3) + + self.check_sample("select sample(c1, 20 ) from t1 where c1 > 6" , "select c1 from t1 where c1 > 6") + + tdSql.query(" select sample( c1 , 1 ) from t1 where c1 in (0, 1,2) ") + tdSql.checkRows(1) + + tdSql.query("select sample( c1 ,3 ) from t1 where c1 between 1 and 10 ") + tdSql.checkRows(3) + + self.check_sample("select sample( c1 ,3 ) from t1 where c1 between 1 and 10" ,"select c1 from t1 where c1 between 1 and 10") + + # join + + tdSql.query("select sample( ct4.c1 , 1 ) from ct1, ct4 where ct4.ts=ct1.ts") + + # partition by tbname + + tdSql.query("select sample(c1,2) from stb1 partition by tbname") + tdSql.checkRows(4) + + self.check_sample("select sample(c1,2) from stb1 partition by tbname" , "select c1 from stb1 partition by tbname") + + # nest query + # tdSql.query("select sample(c1,2) from (select c1 from t1); ") + # tdSql.checkRows(2) + + # union all + tdSql.query("select sample(c1,2) from t1 union all select sample(c1,3) from t1") + tdSql.checkRows(5) + + # fill interval + + # not support mix with other function + tdSql.error("select top(c1,2) , sample(c1,2) from ct1") + tdSql.error("select max(c1) , sample(c1,2) from ct1") + tdSql.error("select c1 , sample(c1,2) from ct1") + + # bug for mix with scalar + # tdSql.error("select 123 , sample(c1,100) from ct1") + # tdSql.error("select sample(c1,100)+2 from ct1") + # tdSql.error("select abs(sample(c1,100)) from ct1") + + def sample_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 10 + self.sample_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.sample_current_query() + self.sample_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.sample_current_query() + self.sample_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + # self.sample_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # self.sample_current_query() + # self.sample_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + # self.sample_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + # self.sample_current_query() + # self.sample_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.sample_test_table(tbnum) + self.sample_test_data(tbnum, per_table_rows, nowtime) + self.sample_current_query() + self.sample_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.sample_current_query() + self.sample_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.sample_current_query() + self.sample_error_query() + + self.basic_sample_query() + + def run(self): + import traceback + try: + # run in develop branch + self.sample_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/spread.py b/tests/system-test/2-query/spread.py new file mode 100644 index 0000000000000000000000000000000000000000..d2dbbd03ede83d65ee475db23da144c2c4d6f4e7 --- /dev/null +++ b/tests/system-test/2-query/spread.py @@ -0,0 +1,358 @@ +import datetime + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + +ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ] + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __query_condition(self,tbname): + query_condition = [f"cast({col} as bigint)" for col in ALL_COL] + for num_col in NUM_COL: + query_condition.extend( + ( + f"{tbname}.{num_col}", + f"abs( {tbname}.{num_col} )", + f"acos( {tbname}.{num_col} )", + f"asin( {tbname}.{num_col} )", + f"atan( {tbname}.{num_col} )", + f"avg( {tbname}.{num_col} )", + f"ceil( {tbname}.{num_col} )", + f"cos( {tbname}.{num_col} )", + f"count( {tbname}.{num_col} )", + f"floor( {tbname}.{num_col} )", + f"log( {tbname}.{num_col}, {tbname}.{num_col})", + f"max( {tbname}.{num_col} )", + f"min( {tbname}.{num_col} )", + f"pow( {tbname}.{num_col}, 2)", + f"round( {tbname}.{num_col} )", + f"sum( {tbname}.{num_col} )", + f"sin( {tbname}.{num_col} )", + f"sqrt( {tbname}.{num_col} )", + f"tan( {tbname}.{num_col} )", + f"cast( {tbname}.{num_col} as timestamp)", + ) + ) + [ query_condition.append(f"{num_col} + {any_col}") for any_col in ALL_COL ] + for char_col in CHAR_COL: + query_condition.extend( + ( + f"count({tbname}.{char_col})", + f"sum(cast({tbname}.{char_col}) as bigint)", + f"max(cast({tbname}.{char_col}) as bigint)", + f"min(cast({tbname}.{char_col}) as bigint)", + f"avg(cast({tbname}.{char_col}) as bigint)", + ) + ) + query_condition.extend( + ( + 1010, + ) + ) + + return query_condition + + def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): + table_reference = tb_list[0] + join_condition = table_reference + join = "inner join" if INNER else "join" + for i in range(len(tb_list[1:])): + join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + + return join_condition + + def __where_condition(self, col=None, tbname=None, query_conditon=None): + if query_conditon and isinstance(query_conditon, str): + if query_conditon.startswith("count"): + query_conditon = query_conditon[6:-1] + elif query_conditon.startswith("max"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("sum"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("min"): + query_conditon = query_conditon[4:-1] + + if query_conditon: + return f" where {query_conditon} is not null" + if col in NUM_COL: + return f" where abs( {tbname}.{col} ) >= 0" + if col in CHAR_COL: + return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " + if col in BOOLEAN_COL: + return f" where {tbname}.{col} in (false, true) " + if col in TS_TYPE_COL or col in PRIMARY_COL: + return f" where cast( {tbname}.{col} as binary(16) ) is not null " + + return "" + + def __group_condition(self, col, having = None): + if isinstance(col, str): + if col.startswith("count"): + col = col[6:-1] + elif col.startswith("max"): + col = col[4:-1] + elif col.startswith("sum"): + col = col[4:-1] + elif col.startswith("min"): + col = col[4:-1] + return f" group by {col} having {having}" if having else f" group by {col} " + + def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]: + return + return f"select spread({select_clause}) from {from_clause} {where_condition} {group_condition}" + + @property + def __tb_list(self): + return [ + "ct1", + "ct4", + "t1", + "ct2", + "stb1", + ] + + def sql_list(self): + sqls = [] + __no_join_tblist = self.__tb_list + for tb in __no_join_tblist: + select_claus_list = self.__query_condition(tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition(col=select_claus) + where_claus = self.__where_condition(query_conditon=select_claus) + having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") + sqls.extend( + ( + self.__single_sql(select_claus, tb, where_claus, having_claus), + self.__single_sql(select_claus, tb,), + self.__single_sql(select_claus, tb, where_condition=where_claus), + self.__single_sql(select_claus, tb, group_condition=group_claus), + ) + ) + + # return filter(None, sqls) + return list(filter(None, sqls)) + + def __get_type(self, col): + if tdSql.cursor.istype(col, "BOOL"): + return "BOOL" + if tdSql.cursor.istype(col, "INT"): + return "INT" + if tdSql.cursor.istype(col, "BIGINT"): + return "BIGINT" + if tdSql.cursor.istype(col, "TINYINT"): + return "TINYINT" + if tdSql.cursor.istype(col, "SMALLINT"): + return "SMALLINT" + if tdSql.cursor.istype(col, "FLOAT"): + return "FLOAT" + if tdSql.cursor.istype(col, "DOUBLE"): + return "DOUBLE" + if tdSql.cursor.istype(col, "BINARY"): + return "BINARY" + if tdSql.cursor.istype(col, "NCHAR"): + return "NCHAR" + if tdSql.cursor.istype(col, "TIMESTAMP"): + return "TIMESTAMP" + if tdSql.cursor.istype(col, "JSON"): + return "JSON" + if tdSql.cursor.istype(col, "TINYINT UNSIGNED"): + return "TINYINT UNSIGNED" + if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"): + return "SMALLINT UNSIGNED" + if tdSql.cursor.istype(col, "INT UNSIGNED"): + return "INT UNSIGNED" + if tdSql.cursor.istype(col, "BIGINT UNSIGNED"): + return "BIGINT UNSIGNED" + + def spread_check(self): + sqls = self.sql_list() + tdLog.printNoPrefix("===step 1: curent case, must return query OK") + for i in range(len(sqls)): + tdLog.info(f"sql: {sqls[i]}") + tdSql.query(sqls[i]) + + def __test_current(self): + tdSql.query("select spread(ts) from ct1") + tdSql.checkRows(1) + tdSql.query("select spread(c1) from ct2") + tdSql.checkRows(1) + tdSql.query("select spread(c1) from ct4 group by c1") + tdSql.checkRows(self.rows + 3) + tdSql.query("select spread(c1) from ct4 group by c7") + tdSql.checkRows(3) + tdSql.query("select spread(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts") + tdSql.checkRows(1) + + self.spread_check() + + def __test_error(self): + + tdLog.printNoPrefix("===step 0: err case, must return err") + tdSql.error( "select spread() from ct1" ) + tdSql.error( "select spread(1, 2) from ct2" ) + tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from ct4" ) + tdSql.error( f"select spread({BOOLEAN_COL[0]}) from t1" ) + tdSql.error( f"select spread({CHAR_COL[0]}) from stb1" ) + + # tdSql.error( ''' select spread(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']) + # from ct1 + # where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null + # group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] + # having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' ) + # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") + + def all_test(self): + self.__test_error() + self.__test_current() + + def __create_tb(self): + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/statecount.py b/tests/system-test/2-query/statecount.py new file mode 100644 index 0000000000000000000000000000000000000000..2634d9a9aba00cc7a5d1013f7a43a7b2ce0e2919 --- /dev/null +++ b/tests/system-test/2-query/statecount.py @@ -0,0 +1,431 @@ +from math import floor +from random import randint, random +from numpy import equal +import taos +import sys +import datetime +import inspect + +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def prepare_datas(self): + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + def test_errors(self): + error_sql_lists = [ + # "select statecount(c1,'GT',5) from t1" + "select statecount from t1", + "select statecount(123--123)==1 from t1", + "select statecount(123,123) from t1", + "select statecount(c1,ts) from t1", + "select statecount(c1,c1,ts) from t1", + "select statecount(c1 ,c2 ) from t1", + "select statecount(c1 ,NULL) from t1", + #"select statecount(c1 ,'NULL',1.0) from t1", + "select statecount(c1 ,'GT','1') from t1", + "select statecount(c1 ,'GT','tbname') from t1", + "select statecount(c1 ,'GT','*') from t1", + "select statecount(c1 ,'GT',ts) from t1", + "select statecount(c1 ,'GT',max(c1)) from t1", + "select statecount(abs(c1) ,'GT',1) from t1", + "select statecount(c1+2 ,'GT',1) from t1", + "select statecount(c1 ,'GT',1,1u) from t1", + "select statecount(c1 ,'GT',1,now) from t1", + "select statecount(c1 ,'GT','1') from t1", + "select statecount(c1 ,'GT','1',True) from t1", + "select statecount(statecount(c1) ab from t1)", + "select statecount(c1 ,'GT',1,,)int from t1", + "select statecount('c1','GT',1) from t1", + "select statecount('c1','GT' , NULL) from t1", + "select statecount('c1','GT', 1 , '') from t1", + "select statecount('c1','GT', 1 ,c%) from t1", + "select statecount(c1 ,'GT',1,t1) from t1", + "select statecount(c1 ,'GT',1,True) from t1", + "select statecount(c1 ,'GT',1) , count(c1) from t1", + "select statecount(c1 ,'GT',1) , avg(c1) from t1", + "select statecount(c1 ,'GT',1) , min(c1) from t1", + "select statecount(c1 ,'GT',1) , spread(c1) from t1", + "select statecount(c1 ,'GT',1) , diff(c1) from t1", + "select statecount(c1 ,'GT',1) , abs(c1) from t1", + "select statecount(c1 ,'GT',1) , c1 from t1", + ] + for error_sql in error_sql_lists: + tdSql.error(error_sql) + pass + + def support_types(self): + other_no_value_types = [ + "select statecount(ts,'GT',1) from t1" , + "select statecount(c7,'GT',1) from t1", + "select statecount(c8,'GT',1) from t1", + "select statecount(c9,'GT',1) from t1", + "select statecount(ts,'GT',1) from ct1" , + "select statecount(c7,'GT',1) from ct1", + "select statecount(c8,'GT',1) from ct1", + "select statecount(c9,'GT',1) from ct1", + "select statecount(ts,'GT',1) from ct3" , + "select statecount(c7,'GT',1) from ct3", + "select statecount(c8,'GT',1) from ct3", + "select statecount(c9,'GT',1) from ct3", + "select statecount(ts,'GT',1) from ct4" , + "select statecount(c7,'GT',1) from ct4", + "select statecount(c8,'GT',1) from ct4", + "select statecount(c9,'GT',1) from ct4", + "select statecount(ts,'GT',1) from stb1 partition by tbname" , + "select statecount(c7,'GT',1) from stb1 partition by tbname", + "select statecount(c8,'GT',1) from stb1 partition by tbname", + "select statecount(c9,'GT',1) from stb1 partition by tbname" + ] + + for type_sql in other_no_value_types: + tdSql.error(type_sql) + tdLog.info("support type ok , sql is : %s"%type_sql) + + type_sql_lists = [ + "select statecount(c1,'GT',1) from t1", + "select statecount(c2,'GT',1) from t1", + "select statecount(c3,'GT',1) from t1", + "select statecount(c4,'GT',1) from t1", + "select statecount(c5,'GT',1) from t1", + "select statecount(c6,'GT',1) from t1", + + "select statecount(c1,'GT',1) from ct1", + "select statecount(c2,'GT',1) from ct1", + "select statecount(c3,'GT',1) from ct1", + "select statecount(c4,'GT',1) from ct1", + "select statecount(c5,'GT',1) from ct1", + "select statecount(c6,'GT',1) from ct1", + + "select statecount(c1,'GT',1) from ct3", + "select statecount(c2,'GT',1) from ct3", + "select statecount(c3,'GT',1) from ct3", + "select statecount(c4,'GT',1) from ct3", + "select statecount(c5,'GT',1) from ct3", + "select statecount(c6,'GT',1) from ct3", + + "select statecount(c1,'GT',1) from stb1 partition by tbname", + "select statecount(c2,'GT',1) from stb1 partition by tbname", + "select statecount(c3,'GT',1) from stb1 partition by tbname", + "select statecount(c4,'GT',1) from stb1 partition by tbname", + "select statecount(c5,'GT',1) from stb1 partition by tbname", + "select statecount(c6,'GT',1) from stb1 partition by tbname", + + "select statecount(c6,'GT',1) as alisb from stb1 partition by tbname", + "select statecount(c6,'GT',1) alisb from stb1 partition by tbname", + ] + + for type_sql in type_sql_lists: + tdSql.query(type_sql) + + def support_opers(self): + oper_lists = ['LT','lt','Lt','lT','GT','gt','Gt','gT','LE','le','Le','lE','GE','ge','Ge','gE','NE','ne','Ne','nE','EQ','eq','Eq','eQ'] + + oper_errors = [",","*","NULL","tbname","ts","sum","_c0"] + + for oper in oper_lists: + tdSql.query(f"select statecount(c1 ,'{oper}',1) as col from t1") + tdSql.checkRows(12) + + for oper in oper_errors: + tdSql.error(f"select statecount(c1 ,'{oper}',1) as col from t1") + + + def basic_statecount_function(self): + + # basic query + tdSql.query("select c1 from ct3") + tdSql.checkRows(0) + tdSql.query("select c1 from t1") + tdSql.checkRows(12) + tdSql.query("select c1 from stb1") + tdSql.checkRows(25) + + # used for empty table , ct3 is empty + tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.checkRows(0) + tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.checkRows(0) + tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.checkRows(0) + tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.checkRows(0) + tdSql.query("select statecount(c6,'GT',1) from ct3") + tdSql.checkRows(0) + tdSql.query("select statecount(c6,'GT',1) from ct3") + + # will support _rowts mix with + # tdSql.query("select (c6,'GT',1),_rowts from ct3") + + # auto check for t1 table + # used for regular table + tdSql.query("select statecount(c6,'GT',1) from t1") + + # unique with super tags + + tdSql.query("select statecount(c6,'GT',1) from ct1") + tdSql.checkRows(13) + + tdSql.query("select statecount(c6,'GT',1) from ct4") + tdSql.checkRows(12) + + tdSql.error("select statecount(c6,'GT',1),tbname from ct1") + tdSql.error("select statecount(c6,'GT',1),t1 from ct1") + + # unique with common col + tdSql.error("select statecount(c6,'GT',1) ,ts from ct1") + tdSql.error("select statecount(c6,'GT',1) ,c1 from ct1") + + # unique with scalar function + tdSql.error("select statecount(c6,'GT',1) ,abs(c1) from ct1") + tdSql.error("select statecount(c6,'GT',1) , unique(c2) from ct1") + tdSql.error("select statecount(c6,'GT',1) , abs(c2)+2 from ct1") + + + # unique with aggregate function + tdSql.error("select statecount(c6,'GT',1) ,sum(c1) from ct1") + tdSql.error("select statecount(c6,'GT',1) ,max(c1) from ct1") + tdSql.error("select statecount(c6,'GT',1) ,csum(c1) from ct1") + tdSql.error("select statecount(c6,'GT',1) ,count(c1) from ct1") + + # unique with filter where + tdSql.query("select statecount(c6,'GT',1) from ct4 where c1 is null") + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query("select statecount(c1,'GT',1) from t1 where c1 >2 ") + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 2) + tdSql.checkData(2, 0, 3) + tdSql.checkData(4, 0, 5) + tdSql.checkData(5, 0, 6) + + tdSql.query("select statecount(c2,'GT',1) from t1 where c2 between 0 and 99999") + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 2) + tdSql.checkData(6, 0, -1) + + + # unique with union all + tdSql.query("select statecount(c1,'GT',1) from ct4 union all select statecount(c1,'GT',1) from ct1") + tdSql.checkRows(25) + tdSql.query("select statecount(c1,'GT',1) from ct4 union all select distinct(c1) from ct4") + tdSql.checkRows(22) + + # unique with join + # prepare join datas with same ts + + tdSql.execute(" use db ") + tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)") + tdSql.execute(" create table tb1 using st1 tags(1)") + tdSql.execute(" create table tb2 using st1 tags(2)") + + tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)") + tdSql.execute(" create table ttb1 using st2 tags(1)") + tdSql.execute(" create table ttb2 using st2 tags(2)") + + start_ts = 1622369635000 # 2021-05-30 18:13:55 + + for i in range(10): + ts_value = start_ts+i*1000 + tdSql.execute(f" insert into tb1 values({ts_value} , {i})") + tdSql.execute(f" insert into tb2 values({ts_value} , {i})") + + tdSql.execute(f" insert into ttb1 values({ts_value} , {i})") + tdSql.execute(f" insert into ttb2 values({ts_value} , {i})") + + tdSql.query("select statecount(tb1.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.checkRows(10) + tdSql.checkData(0,0,-1) + tdSql.checkData(1,0,-1) + tdSql.checkData(2,0,1) + tdSql.checkData(9,0,8) + + tdSql.query("select statecount(tb1.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts union all select statecount(tb2.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.checkRows(20) + + # nest query + # tdSql.query("select unique(c1) from (select c1 from ct1)") + tdSql.query("select c1 from (select statecount(c1,'GT',1) c1 from t1)") + tdSql.checkRows(12) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, -1) + tdSql.checkData(2, 0, 1) + tdSql.checkData(10, 0, 8) + + tdSql.query("select sum(c1) from (select statecount(c1,'GT',1) c1 from t1)") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 35) + + tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select statecount(c1,'GT',1) c1 from ct1)") + tdSql.checkRows(2) + + tdSql.query("select 1-abs(c1) from (select statecount(c1,'GT',1) c1 from t1)") + tdSql.checkRows(12) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 0.000000000) + tdSql.checkData(3, 0, -1.000000000) + + + # bug for stable + #partition by tbname + # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.checkRows(21) + + # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.checkRows(21) + + # group by + tdSql.query("select statecount(c1,'GT',1) from ct1 group by c1") + tdSql.error("select statecount(c1,'GT',1) from ct1 group by tbname") + + # super table + + def check_unit_time(self): + tdSql.execute(" use db ") + tdSql.error("select stateduration(c1,'GT',1,1b) from ct1") + tdSql.error("select stateduration(c1,'GT',1,1u) from ct1") + tdSql.query("select stateduration(c1,'GT',1,1s) from t1") + tdSql.checkData(10,0,63072035) + tdSql.query("select stateduration(c1,'GT',1,1000s) from t1") + tdSql.checkData(10,0,int(63072035/1000)) + tdSql.query("select stateduration(c1,'GT',1,1m) from t1") + tdSql.checkData(10,0,int(63072035/60)) + tdSql.query("select stateduration(c1,'GT',1,1h) from t1") + tdSql.checkData(10,0,int(63072035/60/60)) + tdSql.query("select stateduration(c1,'GT',1,1d) from t1") + tdSql.checkData(10,0,int(63072035/60/24/60)) + tdSql.query("select stateduration(c1,'GT',1,1w) from t1") + tdSql.checkData(10,0,int(63072035/60/7/24/60)) + + + def check_boundary_values(self): + + tdSql.execute("drop database if exists bound_test") + tdSql.execute("create database if not exists bound_test") + tdSql.execute("use bound_test") + tdSql.execute( + "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + ) + tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute( + f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.error( + f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.query("select statecount(c1,'GT',1) from sub1_bound") + tdSql.checkRows(5) + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table ==============") + + self.prepare_datas() + + tdLog.printNoPrefix("==========step2:test errors ==============") + + self.test_errors() + + tdLog.printNoPrefix("==========step3:support types ============") + + self.support_types() + + tdLog.printNoPrefix("==========step4:support opers ============") + self.support_opers() + + tdLog.printNoPrefix("==========step5: statecount basic query ============") + + self.basic_statecount_function() + + tdLog.printNoPrefix("==========step6: statecount boundary query ============") + + self.check_boundary_values() + + tdLog.printNoPrefix("==========step6: statecount unit time test ============") + + self.check_unit_time() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/stateduration.py b/tests/system-test/2-query/stateduration.py new file mode 100644 index 0000000000000000000000000000000000000000..fa71009ef210e6a14c5abe04fbfe0f0b95c6598a --- /dev/null +++ b/tests/system-test/2-query/stateduration.py @@ -0,0 +1,265 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.ts = 1537146000000 + self.param_list = ['LT','lt','Lt','lT','GT','gt','Gt','gT','LE','le','Le','lE','GE','ge','Ge','gE','NE','ne','Ne','nE','EQ','eq','Eq','eQ'] + self.row_num = 10 + def run(self): + tdSql.prepare() + # timestamp = 1ms , time_unit = 1s + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + for i in range(self.row_num): + tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + integer_list = [1,2,3,4,11,12,13,14] + float_list = [5,6] + + for i in integer_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5) from test") + tdSql.checkRows(10) + if j in ['LT' ,'lt','Lt','lT']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GT','gt', 'Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (0,), (0,), (0,), (0,)]) + elif j in ['LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in [ 'GE','ge','Ge','gE']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (0,), (0,), (0,), (0,), (0,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (-1,), (0,), (0,), (0,), (0,), (0,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + for i in float_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5) from test") + tdSql.checkRows(10) + if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (0,), (0,), (0,), (0,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (0,), (0,), (0,), (0,), (0,), (0,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + + error_column_list = ['ts','col7','col8','col9','a',1] + for i in error_column_list: + for j in self.param_list: + tdSql.error(f"select stateduration({i},{j},5) from test") + + error_param_list = ['a',1] + for i in error_param_list: + tdSql.error(f"select stateduration(col1,{i},5) from test") + + # timestamp = 1s, time_unit =1s + tdSql.execute('''create table test1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + for i in range(self.row_num): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + for i in integer_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5) from test1") + tdSql.checkRows(10) + # print(tdSql.queryResult) + if j in ['LT' ,'lt','Lt','lT']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GT','gt', 'Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in [ 'GE','ge','Ge','gE']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + for i in float_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5) from test1") + tdSql.checkRows(10) + print(tdSql.queryResult) + if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + + + # timestamp = 1m, time_unit =1m + tdSql.execute('''create table test2(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + for i in range(self.row_num): + tdSql.execute("insert into test2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i*1000*60, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + for i in integer_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1m) from test2") + tdSql.checkRows(10) + # print(tdSql.queryResult) + if j in ['LT' ,'lt','Lt','lT']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GT','gt', 'Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in [ 'GE','ge','Ge','gE']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + for i in float_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1m) from test2") + tdSql.checkRows(10) + print(tdSql.queryResult) + if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + + # timestamp = 1h, time_unit =1h + tdSql.execute('''create table test3(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + for i in range(self.row_num): + tdSql.execute("insert into test3 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i*1000*60*60, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + for i in integer_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from test3") + tdSql.checkRows(10) + # print(tdSql.queryResult) + if j in ['LT' ,'lt','Lt','lT']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GT','gt', 'Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in [ 'GE','ge','Ge','gE']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + for i in float_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from test3") + tdSql.checkRows(10) + print(tdSql.queryResult) + if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + + # timestamp = 1h,time_unit =1m + for i in integer_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1m) from test3") + tdSql.checkRows(10) + # print(tdSql.queryResult) + if j in ['LT' ,'lt','Lt','lT']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GT','gt', 'Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (60,), (120,), (180,), (240,)]) + elif j in ['LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (240,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in [ 'GE','ge','Ge','gE']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (60,), (120,), (180,), (240,), (300,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (-1,), (0,), (60,), (120,), (180,), (240,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + for i in float_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1m) from test3") + tdSql.checkRows(10) + print(tdSql.queryResult) + if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (240,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (60,), (120,), (180,), (240,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (240,), (300,), (360,), (420,), (480,), (540,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + + # for stb + tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(t0 int)''') + tdSql.execute('create table stb_1 using stb tags(1)') + for i in range(self.row_num): + tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i*1000*60*60, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + for i in integer_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from stb") + tdSql.checkRows(10) + # print(tdSql.queryResult) + if j in ['LT' ,'lt','Lt','lT']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GT','gt', 'Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in [ 'GE','ge','Ge','gE']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + for i in float_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from stb") + tdSql.checkRows(10) + print(tdSql.queryResult) + if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/sum.py b/tests/system-test/2-query/sum.py index 9521b005ddcfebedc08c4b618c936726ec4e3c85..ea0e1f7fae214fa009e7230b524959b7afef59da 100644 --- a/tests/system-test/2-query/sum.py +++ b/tests/system-test/2-query/sum.py @@ -218,13 +218,13 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) + tdDnodes.stop(1) + tdDnodes.start(1) - # tdSql.execute("use db") + tdSql.execute("use db") - # tdLog.printNoPrefix("==========step4:after wal, all check again ") - # self.all_test() + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/top.py b/tests/system-test/2-query/top.py new file mode 100644 index 0000000000000000000000000000000000000000..fbbbb2c99acc2cce1b0cb53a0dafd7f18ec01011 --- /dev/null +++ b/tests/system-test/2-query/top.py @@ -0,0 +1,106 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + + + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + + # top verifacation + tdSql.error("select top(ts, 10) from test") + tdSql.error("select top(col1, 0) from test") + tdSql.error("select top(col1, 101) from test") + tdSql.error("select top(col2, 0) from test") + tdSql.error("select top(col2, 101) from test") + tdSql.error("select top(col3, 0) from test") + tdSql.error("select top(col3, 101) from test") + tdSql.error("select top(col4, 0) from test") + tdSql.error("select top(col4, 101) from test") + tdSql.error("select top(col5, 0) from test") + tdSql.error("select top(col5, 101) from test") + tdSql.error("select top(col6, 0) from test") + tdSql.error("select top(col6, 101) from test") + tdSql.error("select top(col7, 10) from test") + tdSql.error("select top(col8, 10) from test") + tdSql.error("select top(col9, 10) from test") + tdSql.error("select top(col11, 0) from test") + tdSql.error("select top(col11, 101) from test") + tdSql.error("select top(col12, 0) from test") + tdSql.error("select top(col12, 101) from test") + tdSql.error("select top(col13, 0) from test") + tdSql.error("select top(col13, 101) from test") + tdSql.error("select top(col14, 0) from test") + tdSql.error("select top(col14, 101) from test") + + tdSql.query("select top(col1, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col2, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col3, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col4, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col11, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col12, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col13, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select top(col14, 2) from test") + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + tdSql.query("select ts,top(col1, 2),ts from test1") + tdSql.checkRows(2) + tdSql.query("select top(col14, 100) from test") + tdSql.checkRows(10) + tdSql.query("select ts,top(col1, 2),ts from test group by tbname") + tdSql.checkRows(2) + tdSql.query('select top(col2,1) from test interval(1y) order by col2') + tdSql.checkData(0,0,10) + + tdSql.error("select * from test where bottom(col2,1)=1") + tdSql.error("select top(col14, 0) from test;") + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py index 935e91afdbd0ec0417222acc0d85d99799aed545..88767ab888c9bfe11c329eecd41f78442436cafb 100644 --- a/tests/system-test/2-query/union.py +++ b/tests/system-test/2-query/union.py @@ -35,8 +35,6 @@ class TDTestCase: for char_col in CHAR_COL: query_condition.extend( ( - f"rtrim( {tbname}.{char_col} )", - f"substr( {tbname}.{char_col}, 1 )", f"count( {tbname}.{char_col} )", f"cast( {tbname}.{char_col} as nchar(3) )", ) @@ -45,11 +43,7 @@ class TDTestCase: for num_col in NUM_COL: query_condition.extend( ( - f"{tbname}.{num_col}", - f"floor( {tbname}.{num_col} )", f"log( {tbname}.{num_col}, {tbname}.{num_col})", - f"sin( {tbname}.{num_col} )", - f"sqrt( {tbname}.{num_col} )", ) ) @@ -96,7 +90,6 @@ class TDTestCase: return "" - def __group_condition(self, col, having = None): if isinstance(col, str): if col.startswith("count"): @@ -114,15 +107,10 @@ class TDTestCase: return return f"select {select_clause} from {from_clause} {where_condition} {group_condition}" - @property def __join_tblist(self): return [ - ["ct1", "ct2"], - ["ct1", "ct4"], ["ct1", "t1"], - ["ct2", "ct4"], - ["ct2", "t1"], ["ct4", "t1"], # ["ct1", "ct2", "ct4"], # ["ct1", "ct2", "t1"], @@ -135,9 +123,7 @@ class TDTestCase: def __tb_liast(self): return [ "ct1", - "ct2", "ct4", - "t1", ] def sql_list(self): @@ -152,15 +138,7 @@ class TDTestCase: having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null") sqls.extend( ( - self.__single_sql(select_claus, join_tb, where_claus, group_claus), - self.__single_sql(select_claus, join_tb, where_claus, having_claus), - self.__single_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus), self.__single_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus), - self.__single_sql(select_claus, join_tb, where_claus), - self.__single_sql(select_claus, join_tb, having_claus), - self.__single_sql(select_claus, join_tb, group_claus), - self.__single_sql(select_claus, join_tb), - ) ) __no_join_tblist = self.__tb_liast @@ -172,12 +150,7 @@ class TDTestCase: having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") sqls.extend( ( - self.__single_sql(select_claus, join_tb, where_claus, group_claus), - self.__single_sql(select_claus, join_tb, where_claus, having_claus), - self.__single_sql(select_claus, join_tb, where_claus), - self.__single_sql(select_claus, join_tb, group_claus), - self.__single_sql(select_claus, join_tb, having_claus), - self.__single_sql(select_claus, join_tb), + self.__single_sql(select_claus, tb, where_claus, having_claus), ) ) @@ -221,6 +194,8 @@ class TDTestCase: for i in range(len(sqls)): tdSql.query(sqls[i]) res1_type = self.__get_type(0) + # if i % 5 == 0: + # tdLog.success(f"{i} : sql is already executing!") for j in range(len(sqls[i:])): tdSql.query(sqls[j+i]) order_union_type = False @@ -246,22 +221,12 @@ class TDTestCase: rev_order_type = True if all_union_type: - tdSql.query(f"{sqls[i]} union {sqls[j+i]}") - tdSql.query(f"{sqls[j+i]} union {sqls[i]}") - tdSql.checkCols(1) - tdSql.query(f"{sqls[i]} union all {sqls[j+i]}") - tdSql.query(f"{sqls[j+i]} union all {sqls[i]}") - tdSql.checkCols(1) + tdSql.execute(f"{sqls[i]} union {sqls[j+i]}") + tdSql.execute(f"{sqls[j+i]} union all {sqls[i]}") elif order_union_type: - tdSql.query(f"{sqls[i]} union {sqls[j+i]}") - tdSql.checkCols(1) - tdSql.query(f"{sqls[i]} union all {sqls[j+i]}") - tdSql.checkCols(1) + tdSql.execute(f"{sqls[i]} union all {sqls[j+i]}") elif rev_order_type: - tdSql.query(f"{sqls[j+i]} union {sqls[i]}") - tdSql.checkCols(1) - tdSql.query(f"{sqls[j+i]} union all {sqls[i]}") - tdSql.checkCols(1) + tdSql.execute(f"{sqls[j+i]} union {sqls[i]}") else: tdSql.error(f"{sqls[i]} union {sqls[j+i]}") @@ -273,7 +238,7 @@ class TDTestCase: tdSql.error( "select c1 from ct1 union all drop table ct3" ) tdSql.error( "select c1 from ct1 union all '' " ) tdSql.error( " '' union all select c1 from ct1 " ) - tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") + # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") def all_test(self): self.__test_error() diff --git a/tests/system-test/2-query/union1.py b/tests/system-test/2-query/union1.py new file mode 100644 index 0000000000000000000000000000000000000000..ea6940246e6fed6b9a2c8512f69fde4d3d3a6d70 --- /dev/null +++ b/tests/system-test/2-query/union1.py @@ -0,0 +1,370 @@ +import datetime + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __query_condition(self,tbname): + query_condition = [] + for char_col in CHAR_COL: + query_condition.extend( + ( + f"count( {tbname}.{char_col} )", + f"cast( {tbname}.{char_col} as nchar(3) )", + ) + ) + + for num_col in NUM_COL: + query_condition.extend( + ( + f"log( {tbname}.{num_col}, {tbname}.{num_col})", + ) + ) + + query_condition.extend( + ( + ''' "test12" ''', + # 1010, + ) + ) + + return query_condition + + def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False): + table_reference = tb_list[0] + join_condition = table_reference + join = "inner join" if INNER else "join" + for i in range(len(tb_list[1:])): + join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}" + + return join_condition + + def __where_condition(self, col=None, tbname=None, query_conditon=None): + if query_conditon and isinstance(query_conditon, str): + if query_conditon.startswith("count"): + query_conditon = query_conditon[6:-1] + elif query_conditon.startswith("max"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("sum"): + query_conditon = query_conditon[4:-1] + elif query_conditon.startswith("min"): + query_conditon = query_conditon[4:-1] + + + if query_conditon: + return f" where {query_conditon} is not null" + if col in NUM_COL: + return f" where abs( {tbname}.{col} ) >= 0" + if col in CHAR_COL: + return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' " + if col in BOOLEAN_COL: + return f" where {tbname}.{col} in (false, true) " + if col in TS_TYPE_COL or col in PRIMARY_COL: + return f" where cast( {tbname}.{col} as binary(16) ) is not null " + + return "" + + def __group_condition(self, col, having = None): + if isinstance(col, str): + if col.startswith("count"): + col = col[6:-1] + elif col.startswith("max"): + col = col[4:-1] + elif col.startswith("sum"): + col = col[4:-1] + elif col.startswith("min"): + col = col[4:-1] + return f" group by {col} having {having}" if having else f" group by {col} " + + def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""): + if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]: + return + return f"select {select_clause} from {from_clause} {where_condition} {group_condition}" + + @property + def __join_tblist(self): + return [ + ["ct1", "ct2"], + # ["ct1", "ct2", "ct4"], + # ["ct1", "ct2", "t1"], + # ["ct1", "ct4", "t1"], + # ["ct2", "ct4", "t1"], + # ["ct1", "ct2", "ct4", "t1"], + ] + + @property + def __tb_liast(self): + return [ + "t1", + "stb1", + ] + + def sql_list(self): + sqls = [] + __join_tblist = self.__join_tblist + for join_tblist in __join_tblist: + for join_tb in join_tblist: + select_claus_list = self.__query_condition(join_tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition( col=select_claus) + where_claus = self.__where_condition(query_conditon=select_claus) + having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null") + sqls.extend( + ( + self.__single_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus), + ) + ) + __no_join_tblist = self.__tb_liast + for tb in __no_join_tblist: + select_claus_list = self.__query_condition(tb) + for select_claus in select_claus_list: + group_claus = self.__group_condition(col=select_claus) + where_claus = self.__where_condition(query_conditon=select_claus) + having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null") + sqls.extend( + ( + self.__single_sql(select_claus, tb, where_claus, having_claus), + ) + ) + + # return filter(None, sqls) + return list(filter(None, sqls)) + + def __get_type(self, col): + if tdSql.cursor.istype(col, "BOOL"): + return "BOOL" + if tdSql.cursor.istype(col, "INT"): + return "INT" + if tdSql.cursor.istype(col, "BIGINT"): + return "BIGINT" + if tdSql.cursor.istype(col, "TINYINT"): + return "TINYINT" + if tdSql.cursor.istype(col, "SMALLINT"): + return "SMALLINT" + if tdSql.cursor.istype(col, "FLOAT"): + return "FLOAT" + if tdSql.cursor.istype(col, "DOUBLE"): + return "DOUBLE" + if tdSql.cursor.istype(col, "BINARY"): + return "BINARY" + if tdSql.cursor.istype(col, "NCHAR"): + return "NCHAR" + if tdSql.cursor.istype(col, "TIMESTAMP"): + return "TIMESTAMP" + if tdSql.cursor.istype(col, "JSON"): + return "JSON" + if tdSql.cursor.istype(col, "TINYINT UNSIGNED"): + return "TINYINT UNSIGNED" + if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"): + return "SMALLINT UNSIGNED" + if tdSql.cursor.istype(col, "INT UNSIGNED"): + return "INT UNSIGNED" + if tdSql.cursor.istype(col, "BIGINT UNSIGNED"): + return "BIGINT UNSIGNED" + + def union_check(self): + sqls = self.sql_list() + for i in range(len(sqls)): + tdSql.query(sqls[i]) + res1_type = self.__get_type(0) + # if i % 5 == 0: + # tdLog.success(f"{i} : sql is already executing!") + for j in range(len(sqls[i:])): + tdSql.query(sqls[j+i]) + order_union_type = False + rev_order_type = False + all_union_type = False + res2_type = self.__get_type(0) + + if res2_type == res1_type: + all_union_type = True + elif res1_type in ( "BIGINT" , "NCHAR" ) and res2_type in ("BIGINT" , "NCHAR"): + all_union_type = True + elif res1_type in ("BIGINT", "NCHAR"): + order_union_type = True + elif res2_type in ("BIGINT", "NCHAR"): + rev_order_type = True + elif res1_type == "TIMESAMP" and res2_type not in ("BINARY", "NCHAR"): + order_union_type = True + elif res2_type == "TIMESAMP" and res1_type not in ("BINARY", "NCHAR"): + rev_order_type = True + elif res1_type == "BINARY" and res2_type != "NCHAR": + order_union_type = True + elif res2_type == "BINARY" and res1_type != "NCHAR": + rev_order_type = True + + if all_union_type: + tdSql.execute(f"{sqls[i]} union {sqls[j+i]}") + tdSql.execute(f"{sqls[j+i]} union all {sqls[i]}") + elif order_union_type: + tdSql.execute(f"{sqls[i]} union all {sqls[j+i]}") + elif rev_order_type: + tdSql.execute(f"{sqls[j+i]} union {sqls[i]}") + else: + tdSql.error(f"{sqls[i]} union {sqls[j+i]}") + + def __test_error(self): + + tdSql.error( "show tables union show tables" ) + tdSql.error( "create table errtb1 union all create table errtb2" ) + tdSql.error( "drop table ct1 union all drop table ct3" ) + tdSql.error( "select c1 from ct1 union all drop table ct3" ) + tdSql.error( "select c1 from ct1 union all '' " ) + tdSql.error( " '' union all select c1 from ct1 " ) + # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ") + + def all_test(self): + self.__test_error() + self.union_check() + + + def __create_tb(self): + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py new file mode 100644 index 0000000000000000000000000000000000000000..227efa6f9ceda24df73830bd46838fd657b67d48 --- /dev/null +++ b/tests/system-test/2-query/unique.py @@ -0,0 +1,457 @@ +from math import floor +from random import randint, random +from numpy import equal +import taos +import sys +import datetime +import inspect + +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def prepare_datas(self): + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + def test_errors(self): + error_sql_lists = [ + "select unique from t1", + "select unique(123--123)==1 from t1", + "select unique(123,123) from t1", + "select unique(c1,ts) from t1", + "select unique(c1,c1,ts) from t1", + "select unique(c1) as 'd1' from t1", + "select unique(c1 ,c2 ) from t1", + "select unique(c1 ,NULL) from t1", + "select unique(,) from t1;", + "select unique(floor(c1) ab from t1)", + "select unique(c1) as int from t1", + "select unique('c1') from t1", + "select unique(NULL) from t1", + "select unique('') from t1", + "select unique(c%) from t1", + "select unique(t1) from t1", + "select unique(True) from t1", + "select unique(c1) , count(c1) from t1", + "select unique(c1) , avg(c1) from t1", + "select unique(c1) , min(c1) from t1", + "select unique(c1) , spread(c1) from t1", + "select unique(c1) , diff(c1) from t1", + "select unique(c1) , abs(c1) from t1", + "select unique(c1) , c1 from t1", + "select unique from stb1 partition by tbname", + "select unique(123--123)==1 from stb1 partition by tbname", + "select unique(123) from stb1 partition by tbname", + "select unique(c1,ts) from stb1 partition by tbname", + "select unique(c1,c1,ts) from stb1 partition by tbname", + "select unique(c1) as 'd1' from stb1 partition by tbname", + "select unique(c1 ,c2 ) from stb1 partition by tbname", + "select unique(c1 ,NULL) from stb1 partition by tbname", + "select unique(,) from stb1 partition by tbname;", + "select unique(floor(c1) ab from stb1 partition by tbname)", + "select unique(c1) as int from stb1 partition by tbname", + "select unique('c1') from stb1 partition by tbname", + "select unique(NULL) from stb1 partition by tbname", + "select unique('') from stb1 partition by tbname", + "select unique(c%) from stb1 partition by tbname", + #"select unique(t1) from stb1 partition by tbname", + "select unique(True) from stb1 partition by tbname", + "select unique(c1) , count(c1) from stb1 partition by tbname", + "select unique(c1) , avg(c1) from stb1 partition by tbname", + "select unique(c1) , min(c1) from stb1 partition by tbname", + "select unique(c1) , spread(c1) from stb1 partition by tbname", + "select unique(c1) , diff(c1) from stb1 partition by tbname", + "select unique(c1) , abs(c1) from stb1 partition by tbname", + "select unique(c1) , c1 from stb1 partition by tbname" + + ] + for error_sql in error_sql_lists: + tdSql.error(error_sql) + pass + + def support_types(self): + other_no_value_types = [ + "select unique(ts) from t1" , + "select unique(c7) from t1", + "select unique(c8) from t1", + "select unique(c9) from t1", + "select unique(ts) from ct1" , + "select unique(c7) from ct1", + "select unique(c8) from ct1", + "select unique(c9) from ct1", + "select unique(ts) from ct3" , + "select unique(c7) from ct3", + "select unique(c8) from ct3", + "select unique(c9) from ct3", + "select unique(ts) from ct4" , + "select unique(c7) from ct4", + "select unique(c8) from ct4", + "select unique(c9) from ct4", + "select unique(ts) from stb1 partition by tbname" , + "select unique(c7) from stb1 partition by tbname", + "select unique(c8) from stb1 partition by tbname", + "select unique(c9) from stb1 partition by tbname" + ] + + for type_sql in other_no_value_types: + tdSql.query(type_sql) + tdLog.info("support type ok , sql is : %s"%type_sql) + + type_sql_lists = [ + "select unique(c1) from t1", + "select unique(c2) from t1", + "select unique(c3) from t1", + "select unique(c4) from t1", + "select unique(c5) from t1", + "select unique(c6) from t1", + + "select unique(c1) from ct1", + "select unique(c2) from ct1", + "select unique(c3) from ct1", + "select unique(c4) from ct1", + "select unique(c5) from ct1", + "select unique(c6) from ct1", + + "select unique(c1) from ct3", + "select unique(c2) from ct3", + "select unique(c3) from ct3", + "select unique(c4) from ct3", + "select unique(c5) from ct3", + "select unique(c6) from ct3", + + "select unique(c1) from stb1 partition by tbname", + "select unique(c2) from stb1 partition by tbname", + "select unique(c3) from stb1 partition by tbname", + "select unique(c4) from stb1 partition by tbname", + "select unique(c5) from stb1 partition by tbname", + "select unique(c6) from stb1 partition by tbname", + + "select unique(c6) as alisb from stb1 partition by tbname", + "select unique(c6) alisb from stb1 partition by tbname", + ] + + for type_sql in type_sql_lists: + tdSql.query(type_sql) + + def check_unique_table(self , unique_sql): + # unique_sql = "select unique(c1) from ct1" + origin_sql = unique_sql.replace("unique(","").replace(")","") + tdSql.query(unique_sql) + unique_result = tdSql.queryResult + + unique_datas = [] + for elem in unique_result: + unique_datas.append(elem[0]) + + + tdSql.query(origin_sql) + origin_result = tdSql.queryResult + origin_datas = [] + for elem in origin_result: + origin_datas.append(elem[0]) + + pre_unique = [] + for elem in origin_datas: + if elem in pre_unique: + continue + else: + pre_unique.append(elem) + + if pre_unique == unique_datas: + tdLog.info(" unique query check pass , unique sql is: %s" %unique_sql) + else: + tdLog.exit(" unique query check fail , unique sql is: %s " %unique_sql) + + def basic_unique_function(self): + + # basic query + tdSql.query("select c1 from ct3") + tdSql.checkRows(0) + tdSql.query("select c1 from t1") + tdSql.checkRows(12) + tdSql.query("select c1 from stb1") + tdSql.checkRows(25) + + # used for empty table , ct3 is empty + tdSql.query("select unique(c1) from ct3") + tdSql.checkRows(0) + tdSql.query("select unique(c2) from ct3") + tdSql.checkRows(0) + tdSql.query("select unique(c3) from ct3") + tdSql.checkRows(0) + tdSql.query("select unique(c4) from ct3") + tdSql.checkRows(0) + tdSql.query("select unique(c5) from ct3") + tdSql.checkRows(0) + tdSql.query("select unique(c6) from ct3") + + # will support _rowts mix with + # tdSql.query("select unique(c6),_rowts from ct3") + + # auto check for t1 table + # used for regular table + tdSql.query("select unique(c1) from t1") + + tdSql.query("desc t1") + col_lists_rows = tdSql.queryResult + col_lists = [] + for col_name in col_lists_rows: + col_lists.append(col_name[0]) + + for col in col_lists: + self.check_unique_table(f"select unique({col}) from t1") + + # unique with super tags + + tdSql.query("select unique(c1) from ct1") + tdSql.checkRows(10) + + tdSql.query("select unique(c1) from ct4") + tdSql.checkRows(10) + + tdSql.error("select unique(c1),tbname from ct1") + tdSql.error("select unique(c1),t1 from ct1") + + # unique with common col + tdSql.error("select unique(c1) ,ts from ct1") + tdSql.error("select unique(c1) ,c1 from ct1") + + # unique with scalar function + tdSql.error("select unique(c1) ,abs(c1) from ct1") + tdSql.error("select unique(c1) , unique(c2) from ct1") + tdSql.error("select unique(c1) , abs(c2)+2 from ct1") + + + # unique with aggregate function + tdSql.error("select unique(c1) ,sum(c1) from ct1") + tdSql.error("select unique(c1) ,max(c1) from ct1") + tdSql.error("select unique(c1) ,csum(c1) from ct1") + tdSql.error("select unique(c1) ,count(c1) from ct1") + + # unique with filter where + tdSql.query("select unique(c1) from ct4 where c1 is null") + tdSql.checkData(0, 0, None) + + tdSql.query("select unique(c1) from ct4 where c1 >2 ") + tdSql.checkData(0, 0, 8) + tdSql.checkData(1, 0, 7) + tdSql.checkData(2, 0, 6) + tdSql.checkData(5, 0, 3) + + tdSql.query("select unique(c1) from ct4 where c2 between 0 and 99999") + tdSql.checkData(0, 0, 8) + tdSql.checkData(1, 0, 7) + tdSql.checkData(2, 0, 6) + tdSql.checkData(3, 0, 5) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 3) + tdSql.checkData(6, 0, 2) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 0) + + # unique with union all + tdSql.query("select unique(c1) from ct4 union all select c1 from ct1") + tdSql.checkRows(23) + tdSql.query("select unique(c1) from ct4 union all select distinct(c1) from ct4") + tdSql.checkRows(20) + tdSql.query("select unique(c2) from ct4 union all select abs(c2)/2 from ct4") + tdSql.checkRows(22) + + # unique with join + # prepare join datas with same ts + + tdSql.execute(" use db ") + tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)") + tdSql.execute(" create table tb1 using st1 tags(1)") + tdSql.execute(" create table tb2 using st1 tags(2)") + + tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)") + tdSql.execute(" create table ttb1 using st2 tags(1)") + tdSql.execute(" create table ttb2 using st2 tags(2)") + + start_ts = 1622369635000 # 2021-05-30 18:13:55 + + for i in range(10): + ts_value = start_ts+i*1000 + tdSql.execute(f" insert into tb1 values({ts_value} , {i})") + tdSql.execute(f" insert into tb2 values({ts_value} , {i})") + + tdSql.execute(f" insert into ttb1 values({ts_value} , {i})") + tdSql.execute(f" insert into ttb2 values({ts_value} , {i})") + + tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.checkRows(10) + tdSql.checkData(0,0,0) + tdSql.checkData(1,0,1) + tdSql.checkData(2,0,2) + tdSql.checkData(9,0,9) + + tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts union all select unique(tb1.num) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.checkRows(20) + tdSql.checkData(0,0,0) + tdSql.checkData(1,0,1) + tdSql.checkData(2,0,2) + tdSql.checkData(9,0,9) + + # nest query + # tdSql.query("select unique(c1) from (select c1 from ct1)") + tdSql.query("select c1 from (select unique(c1) c1 from ct4)") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 8) + tdSql.checkData(9, 0, 0) + + tdSql.query("select sum(c1) from (select unique(c1) c1 from ct1)") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 45) + + tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select unique(c1) c1 from ct1)") + tdSql.checkRows(2) + tdSql.checkData(0, 0, 45) + tdSql.checkData(1, 0, 45) + + tdSql.query("select 1-abs(c1) from (select unique(c1) c1 from ct4)") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, -7.000000000) + + + # bug for stable + #partition by tbname + # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.checkRows(21) + + # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.checkRows(21) + + # group by + tdSql.error("select unique(c1) from ct1 group by c1") + tdSql.error("select unique(c1) from ct1 group by tbname") + + # super table + + + + + def check_boundary_values(self): + + tdSql.execute("drop database if exists bound_test") + tdSql.execute("create database if not exists bound_test") + tdSql.execute("use bound_test") + tdSql.execute( + "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + ) + tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute( + f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.error( + f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.query("select unique(c2) from sub1_bound") + tdSql.checkRows(5) + tdSql.checkData(0,0,9223372036854775807) + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table ==============") + + self.prepare_datas() + + tdLog.printNoPrefix("==========step2:test errors ==============") + + self.test_errors() + + tdLog.printNoPrefix("==========step3:support types ============") + + self.support_types() + + tdLog.printNoPrefix("==========step4: floor basic query ============") + + self.basic_unique_function() + + tdLog.printNoPrefix("==========step5: floor boundary query ============") + + self.check_boundary_values() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/basic5.py b/tests/system-test/7-tmq/basic5.py index 4a29cacd97b7bad3bcd469fe1ebc2b445061397a..500e8671217f5d4bb8ae0793f288791095303135 100644 --- a/tests/system-test/7-tmq/basic5.py +++ b/tests/system-test/7-tmq/basic5.py @@ -22,8 +22,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -186,7 +186,7 @@ class TDTestCase: time.sleep(1) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 @@ -228,7 +228,7 @@ class TDTestCase: 'stbName': 'stb', \ 'ctbNum': 10, \ 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ + 'batchNum': 200, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -300,7 +300,7 @@ class TDTestCase: time.sleep(1) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 @@ -349,8 +349,8 @@ class TDTestCase: 'vgroups': 1, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 30000, \ - 'batchNum': 100, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 200, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -381,8 +381,8 @@ class TDTestCase: 'vgroups': 1, \ 'stbName': 'stb2', \ 'ctbNum': 10, \ - 'rowsPerTbl': 30000, \ - 'batchNum': 100, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 200, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict2['cfg'] = cfgPath tdSql.execute("create stable if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict2['dbName'], parameterDict2['stbName'])) @@ -432,7 +432,7 @@ class TDTestCase: time.sleep(1) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 diff --git a/tests/system-test/7-tmq/db.py b/tests/system-test/7-tmq/db.py new file mode 100644 index 0000000000000000000000000000000000000000..0115686798e6ed9e8d048d327d0f1eb8026ee9b3 --- /dev/null +++ b/tests/system-test/7-tmq/db.py @@ -0,0 +1,462 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("drop database if exists %s "%(cdbName)) + tdSql.query("create database %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumeContentTable(self,id=0,cdbName='cdb'): + tdSql.query("drop table if exists %s.content_%d "%(cdbName, id)) + tdSql.query("create table %s.content_%d (ts timestamp, contentOfRow binary(1024))"%cdbName, id) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbPrefix,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(ctbPrefix,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data_interlaceByMultiTbl(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + ctbDict = {} + for i in range(ctbNum): + ctbDict[i] = 0 + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfCtb = 0 + while rowsOfCtb < rowsPerTbl: + for i in range(ctbNum): + sql += " %s.%s_%d values "%(dbName,ctbPrefix,i) + for k in range(batchNum): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + ctbDict[i], ctbDict[i], ctbDict[i]) + ctbDict[i] += 1 + if (0 == ctbDict[i]%batchNum) or (ctbDict[i] == rowsPerTbl): + tsql.execute(sql) + sql = "insert into " + break + rowsOfCtb = ctbDict[0] + + tdLog.debug("insert data ............ [OK]") + return + + def insert_data(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(ctbPrefix,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(ctbPrefix,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def insert_data_with_autoCreateTbl(self,tsql,dbName,stbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data wiht auto create child table ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'autodata_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s.%s_%d using %s.%s tags (%d) values " %(dbName,ctbPrefix,i,dbName,stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: ") + ''' + subscribe one db, multi normal table which have not same schema, and include rows of all tables in one insert sql + ''' + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db1', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + tdSql.execute("create table %s.ntb0 (ts timestamp, c1 int)"%(parameterDict["dbName"])) + tdSql.execute("create table %s.ntb1 (ts timestamp, c1 int, c2 float)"%(parameterDict["dbName"])) + tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(parameterDict["dbName"])) + tdSql.execute("create table %s.ntb3 (ts timestamp, c1 int, c2 float, c3 binary(32), c4 timestamp)"%(parameterDict["dbName"])) + + tdSql.execute("insert into %s.ntb0 values(now, 1) %s.ntb1 values(now, 1, 1) %s.ntb2 values(now, 1, 1, '1') %s.ntb3 values(now, 1, 1, '1', now)"%(parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"])) + tdSql.execute("insert into %s.ntb0 values(now, 2)(now+1s, 3) \ + %s.ntb1 values(now, 2, 2)(now+1s, 3, 3) \ + %s.ntb2 values(now, 2, 2, '2')(now+1s, 3, 3, '3') \ + %s.ntb3 values(now, 2, 2, '2', now)(now+1s, 3, 3, '3', now)"\ + %(parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"])) + tdSql.execute("insert into %s.ntb0 values(now, 4)(now+1s, 5) \ + %s.ntb1 values(now, 4, 4)(now+1s, 5, 5) \ + %s.ntb2 values(now, 4, 4, '4')(now+1s, 5, 5, '5') \ + %s.ntb3 values(now, 4, 4, '4', now)(now+1s, 5, 5, '5', now) \ + %s.ntb0 values(now+2s, 6)(now+3s, 7) \ + %s.ntb1 values(now+2s, 6, 6)(now+3s, 7, 7) \ + %s.ntb2 values(now+2s, 6, 6, '6')(now+3s, 7, 7, '7') \ + %s.ntb3 values(now+2s, 6, 6, '6', now)(now+3s, 7, 7, '7', now)"\ + %(parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"])) + numOfNtb = 4 + rowsOfPerNtb = 7 + + tdLog.info("create topics from db") + topicFromDb = 'topic_db_mulit_tbl' + + tdSql.execute("create topic %s as database %s" %(topicFromDb, parameterDict['dbName'])) + consumerId = 0 + expectrowcnt = numOfNtb * rowsOfPerNtb + topicList = topicFromDb + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,enable.auto.commit:false,\ + auto.commit.interval.ms:6000,auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 10 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromDb) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: ") + ''' + subscribe one stb, multi child talbe and normal table which have not same schema, and include rows of all tables in one insert sql + ''' + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db2', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + dbName = parameterDict["dbName"] + + self.create_database(tdSql, dbName) + + tdSql.execute("create stable %s.stb (ts timestamp, s1 bigint, s2 binary(32), s3 double) tags (t1 int, t2 binary(32))"%(dbName)) + tdSql.execute("create table %s.ctb0 using %s.stb tags(0, 'ctb0')"%(dbName,dbName)) + tdSql.execute("create table %s.ctb1 using %s.stb tags(1, 'ctb1')"%(dbName,dbName)) + + tdSql.execute("create table %s.ntb0 (ts timestamp, c1 binary(32))"%(dbName)) + tdSql.execute("create table %s.ntb1 (ts timestamp, c1 binary(32), c2 float)"%(dbName)) + tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(dbName)) + tdSql.execute("create table %s.ntb3 (ts timestamp, c1 int, c2 float, c3 binary(32), c4 timestamp)"%(dbName)) + + tdSql.execute("insert into %s.ntb0 values(now, 'ntb0-11') \ + %s.ntb1 values(now, 'ntb1', 11) \ + %s.ntb2 values(now, 11, 11, 'ntb2') \ + %s.ctb0 values(now, 11, 'ctb0', 11) \ + %s.ntb3 values(now, 11, 11, 'ntb3', now) \ + %s.ctb1 values(now, 11, 'ctb1', 11)"\ + %(dbName,dbName,dbName,dbName,dbName,dbName)) + + tdSql.execute("insert into %s.ntb0 values(now, 'ntb0-12')(now+1s, 'ntb0-13') \ + %s.ntb1 values(now, 'ntb1', 12)(now+1s, 'ntb1', 13) \ + %s.ntb2 values(now, 12, 12, 'ntb2')(now+1s, 13, 13, 'ntb2') \ + %s.ctb0 values(now, 12, 'ctb0', 12)(now+1s, 13, 'ctb0', 13) \ + %s.ntb3 values(now, 12, 12, 'ntb3', now)(now+1s, 13, 13, 'ntb3', now) \ + %s.ctb1 values(now, 12, 'ctb1', 12)(now+1s, 13, 'ctb1', 13)"\ + %(dbName,dbName,dbName,dbName,dbName,dbName)) + tdSql.execute("insert into %s.ntb0 values(now, 'ntb0-14')(now+1s, 'ntb0-15') \ + %s.ntb1 values(now, 'ntb1', 14)(now+1s, 'ntb1', 15) \ + %s.ntb2 values(now, 14, 14, 'ntb2')(now+1s, 15, 15, 'ntb2') \ + %s.ctb0 values(now, 14, 'ctb0', 14)(now+1s, 15, 'ctb0', 15) \ + %s.ntb3 values(now, 14, 14, 'ntb3', now)(now+1s, 15, 15, 'ntb3', now) \ + %s.ctb1 values(now, 14, 'ctb1', 14)(now+1s, 15, 'ctb1', 15) \ + %s.ntb0 values(now+2s, 'ntb0-16')(now+3s, 'ntb0-17') \ + %s.ntb1 values(now+2s, 'ntb1', 16)(now+3s, 'ntb1', 17) \ + %s.ntb2 values(now+2s, 16, 16, 'ntb2')(now+3s, 17, 17, 'ntb2') \ + %s.ctb0 values(now+2s, 16, 'ctb0', 16)(now+3s, 17, 'ctb0', 17) \ + %s.ntb3 values(now+2s, 16, 16, 'ntb3', now)(now+3s, 17, 17, 'ntb3', now) \ + %s.ctb1 values(now+2s, 16, 'ctb1', 16)(now+3s, 17, 'ctb1', 17)"\ + %(dbName,dbName,dbName,dbName,dbName,dbName,dbName,dbName,dbName,dbName,dbName,dbName)) + numOfNtb = 4 + numOfCtb = 2 + rowsOfPerNtb = 7 + + tdLog.info("create topics from db") + topicFromStb = 'topic_stb_mulit_tbl' + + tdSql.execute("create topic %s as stable %s.stb" %(topicFromStb, dbName)) + consumerId = 0 + expectrowcnt = numOfCtb * rowsOfPerNtb + topicList = topicFromStb + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,enable.auto.commit:false,\ + auto.commit.interval.ms:6000,auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 10 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,dbName,showMsg, showRow) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: ") + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + # self.tmqCase1(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) + # self.tmqCase3(cfgPath, buildPath) + # self.tmqCase4(cfgPath, buildPath) + # self.tmqCase5(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/schema.py b/tests/system-test/7-tmq/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..633a097db61ed876c9ebd1994c156c4f64c7ceb5 --- /dev/null +++ b/tests/system-test/7-tmq/schema.py @@ -0,0 +1,700 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("drop database if exists %s "%(cdbName)) + tdSql.query("create database %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumeContentTable(self,id=0,cdbName='cdb'): + tdSql.query("drop table if exists %s.content_%d "%(cdbName, id)) + tdSql.query("create table %s.content_%d (ts timestamp, contentOfRow binary(1024))"%cdbName, id) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbPrefix,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(ctbPrefix,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data_interlaceByMultiTbl(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + ctbDict = {} + for i in range(ctbNum): + ctbDict[i] = 0 + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfCtb = 0 + while rowsOfCtb < rowsPerTbl: + for i in range(ctbNum): + sql += " %s.%s_%d values "%(dbName,ctbPrefix,i) + for k in range(batchNum): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + ctbDict[i], ctbDict[i], ctbDict[i]) + ctbDict[i] += 1 + if (0 == ctbDict[i]%batchNum) or (ctbDict[i] == rowsPerTbl): + tsql.execute(sql) + sql = "insert into " + break + rowsOfCtb = ctbDict[0] + + tdLog.debug("insert data ............ [OK]") + return + + def insert_data(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(ctbPrefix,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(ctbPrefix,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def insert_data_with_autoCreateTbl(self,tsql,dbName,stbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data wiht auto create child table ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'autodata_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s.%s_%d using %s.%s tags (%d) values " %(dbName,ctbPrefix,i,dbName,stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: ") + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db1', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 23, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdLog.info("create database, super table, child table, normal table") + ntbName = 'ntb1' + self.create_database(tdSql, parameterDict["dbName"]) + # self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + # self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + # self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"])) + tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10))"%(parameterDict["dbName"],ntbName)) + + tdLog.info("create topics from super table and normal table") + columnTopicFromStb = 'column_topic_from_stb1' + columnTopicFromNtb = 'column_topic_from_ntb1' + + tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName'])) + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(columnTopicFromNtb, parameterDict['dbName'], ntbName)) + + tsLog.info("======== super table test:") + # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic + tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName'])) + + # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic + tdSql.query("alter table %s.%s modify column c4 binary(60)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s modify tag t4 binary(60)"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s drop column c3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop column c4new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop tag t3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop tag t4new"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s add column c3 int"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s add column c4 float"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s add tag t3 int"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s add tag t4 float"%(parameterDict['dbName'], parameterDict['stbName'])) + + tsLog.info("======== normal table test:") + # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic + tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], ntbName)) + + tdSql.error("alter table %s.%s modify column c2 binary(60)"%(parameterDict['dbName'], ntbName)) + + tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], ntbName)) + + # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic + tdSql.query("alter table %s.%s modify column c4 binary(60)"%(parameterDict['dbName'], ntbName)) + + tdSql.query("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], ntbName)) + tdSql.query("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], ntbName)) + + tdSql.query("alter table %s.%s drop column c3new"%(parameterDict['dbName'], ntbName)) + tdSql.query("alter table %s.%s drop column c4new"%(parameterDict['dbName'], ntbName)) + + tdSql.query("alter table %s.%s add column c3 int"%(parameterDict['dbName'], ntbName)) + tdSql.query("alter table %s.%s add column c4 float"%(parameterDict['dbName'], ntbName)) + + tdLog.info("======== child table test:") + parameterDict['stbName'] = 'stb12' + ctbName = 'stb12_0' + tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName'])) + tdSql.query("create table %s.%s using %s.%s tags (1, '2', 3, '4', '5')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName'])) + + tdLog.info("create topics from child table") + columnTopicFromCtb = 'column_topic_from_ctb1' + + tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s" %(columnTopicFromCtb,parameterDict['dbName'],ctbName)) + + # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic + tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s set tag t1 10"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s set tag t2 '20'"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName'])) + + # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic + tdSql.query("alter table %s.%s modify column c4 binary(60)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s set tag t3 30"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s set tag t4 '40'"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s set tag t5 '50'"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s drop column c3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop column c4new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop tag t3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop tag t4new"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s add column c3 int"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s add column c4 float"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s add tag t3 int"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s add tag t4 float"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: ") + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db1', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb2', \ + 'ctbPrefix': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 23, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + # tdLog.info("create database, super table, child table, normal table") + ntbName = 'ntb2' + tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"])) + tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10))"%(parameterDict["dbName"],ntbName)) + + tdLog.info("create topics from super table and normal table") + columnTopicFromStb = 'column_topic_from_stb2' + columnTopicFromNtb = 'column_topic_from_ntb2' + + tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s where c3 > 3 and c4 like 'abc' and t3 = 5 and t4 = 'beijing'" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName'])) + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s where c3 > 3 and c4 like 'abc'" %(columnTopicFromNtb, parameterDict['dbName'], ntbName)) + + tsLog.info("======== super table test:") + # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic + tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName'])) + + # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic + tdSql.query("alter table %s.%s modify column c5 nchar(60)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s modify tag t5 nchar(60)"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s rename tag t5 t5new"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s drop column c5new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop tag t5new"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s add column c5 int"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s add tag t5 float"%(parameterDict['dbName'], parameterDict['stbName'])) + + tsLog.info("======== normal table test:") + # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic + tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], ntbName)) + + tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], ntbName)) + + tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], ntbName)) + + # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic + tdSql.query("alter table %s.%s modify column c5 nchar(60)"%(parameterDict['dbName'], ntbName)) + + tdSql.query("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], ntbName)) + + tdSql.query("alter table %s.%s drop column c5new"%(parameterDict['dbName'], ntbName)) + + tdSql.query("alter table %s.%s add column c5 float"%(parameterDict['dbName'], ntbName)) + + tdLog.info("======== child table test:") + parameterDict['stbName'] = 'stb21' + ctbName = 'stb21_0' + tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName'])) + tdSql.query("create table %s.%s using %s.%s tags (1, '2', 3, '4', '5')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName'])) + + tdLog.info("create topics from child table") + columnTopicFromCtb = 'column_topic_from_ctb2' + + tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s where c3 > 3 and c4 like 'abc' and t3 = 5 and t4 = 'beijing'" %(columnTopicFromCtb,parameterDict['dbName'],ctbName)) + + # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic + tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s set tag t1 11"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s set tag t2 '22'"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s set tag t3 33"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s set tag t4 '44'"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName'])) + + # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic + tdSql.query("alter table %s.%s modify column c5 nchar(60)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s modify tag t5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s set tag t5 '50'"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s rename tag t5 t5new"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s drop column c5new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop tag t5new"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.query("alter table %s.%s add column c5 float"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s add tag t5 float"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: ") + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db1', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb3', \ + 'ctbPrefix': 'stb3', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 23, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + # tdLog.info("create database, super table, child table, normal table") + ntbName = 'ntb3' + tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"])) + tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10))"%(parameterDict["dbName"],ntbName)) + + tdLog.info("create topics from super table and normal table") + columnTopicFromStb = 'star_topic_from_stb3' + columnTopicFromNtb = 'star_topic_from_ntb3' + + tdSql.execute("create topic %s as select * from %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName'])) + tdSql.execute("create topic %s as select * from %s.%s " %(columnTopicFromNtb, parameterDict['dbName'], ntbName)) + + tsLog.info("======== super table test:") + # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic + tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t5"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify column c5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify tag t5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t5 t5new"%(parameterDict['dbName'], parameterDict['stbName'])) + + # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic + tdSql.query("alter table %s.%s add column c6 int"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s add tag t6 float"%(parameterDict['dbName'], parameterDict['stbName'])) + + tsLog.info("======== normal table test:") + # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic + tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s drop column c5"%(parameterDict['dbName'], ntbName)) + + tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s modify column c5 nchar(40)"%(parameterDict['dbName'], ntbName)) + + tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], ntbName)) + tdSql.error("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], ntbName)) + + # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic + tdSql.query("alter table %s.%s add column c6 float"%(parameterDict['dbName'], ntbName)) + + tdLog.info("======== child table test:") + parameterDict['stbName'] = 'stb31' + ctbName = 'stb31_0' + tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName'])) + tdSql.query("create table %s.%s using %s.%s tags (10, 100, '1000')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName'])) + + tdLog.info("create topics from child table") + columnTopicFromCtb = 'column_topic_from_ctb3' + + tdSql.execute("create topic %s as select * from %s.%s " %(columnTopicFromCtb,parameterDict['dbName'],ctbName)) + + # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic + tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t5"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify column c5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s modify tag t5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s set tag t1 10"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s set tag t2 '20'"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s set tag t3 30"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s set tag t4 '40'"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s set tag t5 '50'"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s rename tag t5 t5new"%(parameterDict['dbName'], parameterDict['stbName'])) + + # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic + tdSql.query("alter table %s.%s add column c6 float"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s add tag t6 float"%(parameterDict['dbName'], parameterDict['stbName'])) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase1(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) + self.tmqCase3(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeDb.py b/tests/system-test/7-tmq/subscribeDb.py index 66c79fd29222714e24f4a83ac417502758c02eff..43b707e65127586124caa16f8e5ec060d57a9f11 100644 --- a/tests/system-test/7-tmq/subscribeDb.py +++ b/tests/system-test/7-tmq/subscribeDb.py @@ -22,8 +22,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -167,7 +167,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -182,7 +182,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicName1 @@ -197,7 +197,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -223,7 +223,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicName1 @@ -236,7 +236,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 20 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -264,7 +264,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -279,7 +279,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -298,7 +298,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 20 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -313,8 +313,8 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + if not (totalConsumeRows >= expectrowcnt): tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicName1) @@ -330,7 +330,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb1', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -343,7 +343,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -364,7 +364,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 10 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -399,7 +399,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -416,7 +416,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb2', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -427,7 +427,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] @@ -446,7 +446,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -470,336 +470,6 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 3 end ...... ") - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: Produce while two consumers to subscribe one db, include 2 stb") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db4', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db4', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db1' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - - tdLog.printNoPrefix("======== test case 4 end ...... ") - - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: Produce while two consumers to subscribe one db, firstly create one stb, after start consume create other stb") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db5', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db5', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - tdLog.info("create topics from db") - topicName1 = 'topic_db1' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db60', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db61', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db60' - topicName2 = 'topic_db61' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 + ',' + topicName2 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - #consumerId = 1 - #self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - tdSql.query("drop topic %s"%topicName2) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db70', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db71', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db60' - topicName2 = 'topic_db61' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 + ',' + topicName2 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - tdSql.query("drop topic %s"%topicName2) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - def run(self): tdSql.prepare() @@ -815,12 +485,7 @@ class TDTestCase: self.tmqCase2(cfgPath, buildPath) self.tmqCase2a(cfgPath, buildPath) self.tmqCase3(cfgPath, buildPath) - self.tmqCase4(cfgPath, buildPath) - self.tmqCase5(cfgPath, buildPath) - self.tmqCase6(cfgPath, buildPath) - self.tmqCase7(cfgPath, buildPath) - - + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/7-tmq/subscribeDb0.py b/tests/system-test/7-tmq/subscribeDb0.py new file mode 100644 index 0000000000000000000000000000000000000000..ce273367c75d014d4a6d4228f97e50fd7f3b7df6 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeDb0.py @@ -0,0 +1,515 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum): + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("use %s" %dbName) + tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + event.set() + tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + t = time.time() + startTs = int(round(t * 1000)) + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + print ("input parameters:") + print (parameterDict) + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + self.create_tables(tsql,\ + parameterDict["dbName"],\ + parameterDict["vgroups"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"]) + + self.insert_data(tsql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"],\ + parameterDict["startTs"]) + return + + def tmqCase4(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 4: Produce while two consumers to subscribe one db, include 2 stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db4', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db4', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def tmqCase5(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 5: Produce while two consumers to subscribe one db, firstly create one stb, after start consume create other stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db5', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db5', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows < expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 5 end ...... ") + + def tmqCase6(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db60', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db61', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db60' + topicName2 = 'topic_db61' + + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ',' + topicName2 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + #consumerId = 1 + #self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + tdSql.query("drop topic %s"%topicName2) + + tdLog.printNoPrefix("======== test case 6 end ...... ") + + def tmqCase7(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db70', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db71', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db60' + topicName2 = 'topic_db61' + + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ',' + topicName2 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + tdSql.query("drop topic %s"%topicName2) + + tdLog.printNoPrefix("======== test case 7 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase4(cfgPath, buildPath) + self.tmqCase5(cfgPath, buildPath) + self.tmqCase6(cfgPath, buildPath) + self.tmqCase7(cfgPath, buildPath) + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeDb1.py b/tests/system-test/7-tmq/subscribeDb1.py index a00bed30e4ad680b0113d562a7c88c63a3b6af45..ca87f0dba533404aaf14a6cd2437417d962260ed 100644 --- a/tests/system-test/7-tmq/subscribeDb1.py +++ b/tests/system-test/7-tmq/subscribeDb1.py @@ -22,8 +22,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -183,7 +183,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2 topicList = topicName1 @@ -198,7 +198,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -261,7 +261,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2 topicList = topicName1 @@ -276,7 +276,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -291,10 +291,9 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") - tdLog.info("again start consume processer") self.initConsumerTable() @@ -303,12 +302,13 @@ class TDTestCase: self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) expectRows = 1 resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 + totalConsumeRows2 = 0 for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/2)) + totalConsumeRows2 += resultList[i] + + tdLog.info("firstly act consume rows: %d"%(totalConsumeRows)) + tdLog.info("secondly act consume rows: %d, expect consume rows: %d"%(totalConsumeRows2, expectrowcnt)) + if totalConsumeRows + totalConsumeRows2 != expectrowcnt: tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicName1) @@ -339,7 +339,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicName1 @@ -354,7 +354,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 15 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -382,6 +382,7 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") + time.sleep(15) tdSql.query("drop topic %s"%topicName1) tdLog.printNoPrefix("======== test case 10 end ...... ") @@ -410,7 +411,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicName1 @@ -425,7 +426,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -453,6 +454,7 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") + time.sleep(15) tdSql.query("drop topic %s"%topicName1) tdLog.printNoPrefix("======== test case 11 end ...... ") diff --git a/tests/system-test/7-tmq/subscribeStb.py b/tests/system-test/7-tmq/subscribeStb.py index a0b3668d47bb45a637da035f19da3cbe01dfa9c1..2b7f0d3d5ff06ea0c36f9768c3a7f6d3eae715a0 100644 --- a/tests/system-test/7-tmq/subscribeStb.py +++ b/tests/system-test/7-tmq/subscribeStb.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -346,1024 +346,6 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 2 end ...... ") - def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db3', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 50000, \ - 'batchNum': 13, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(5) - tdLog.info("drop som child table of stb1") - dropTblNum = 4 - tdSql.query("drop table if exists %s.%s_1"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_2"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_3"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_4"%(parameterDict["dbName"], parameterDict["stbName"])) - - tdLog.info("drop some child tables, then start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) - - if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): - tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 3 end ...... ") - - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db4', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 4 end ...... ") - - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db5', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != (expectrowcnt * (1 + 1/4)): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db6', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db7', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 13 end ...... ") - def run(self): tdSql.prepare() @@ -1377,9 +359,6 @@ class TDTestCase: self.tmqCase1(cfgPath, buildPath) self.tmqCase2(cfgPath, buildPath) - # self.tmqCase3(cfgPath, buildPath) - # self.tmqCase4(cfgPath, buildPath) - # self.tmqCase5(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeStb0.py b/tests/system-test/7-tmq/subscribeStb0.py index 1d56103059e84de3afbe14647f357b152ab291c3..a212cf759066f4cc67bec18800e6b9581013ab0e 100644 --- a/tests/system-test/7-tmq/subscribeStb0.py +++ b/tests/system-test/7-tmq/subscribeStb0.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -183,169 +183,6 @@ class TDTestCase: return - def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db1', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(5) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 1 end ...... ") - - def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - parameterDict2 = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict2['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_stable(tdSql, parameterDict2["dbName"], parameterDict2["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start create child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.CREATE_CTABLE - parameterDict2['actionType'] = actionType.CREATE_CTABLE - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("start insert data into child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.INSERT_DATA - parameterDict2['actionType'] = actionType.INSERT_DATA - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 2 end ...... ") - def tmqCase3(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 3: ") @@ -360,7 +197,7 @@ class TDTestCase: 'replica': 1, \ 'stbName': 'stb1', \ 'ctbNum': 10, \ - 'rowsPerTbl': 20000, \ + 'rowsPerTbl': 30000, \ 'batchNum': 50, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -386,12 +223,12 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - time.sleep(3) + time.sleep(1.5) tdLog.info("drop som child table of stb1") dropTblNum = 4 tdSql.query("drop table if exists %s.%s_1"%(parameterDict["dbName"], parameterDict["stbName"])) @@ -407,9 +244,9 @@ class TDTestCase: totalConsumeRows += resultList[i] remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) - - if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): - tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) + + tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) + if not (totalConsumeRows <= expectrowcnt and totalConsumeRows >= remaindrowcnt): tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicFromStb1) @@ -461,7 +298,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -544,7 +381,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -580,789 +417,7 @@ class TDTestCase: tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db6', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db7', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 13 end ...... ") + tdLog.printNoPrefix("======== test case 5 end ...... ") def run(self): tdSql.prepare() @@ -1375,8 +430,6 @@ class TDTestCase: cfgPath = buildPath + "/../sim/psim/cfg" tdLog.info("cfgPath: %s" % cfgPath) - # self.tmqCase1(cfgPath, buildPath) - # self.tmqCase2(cfgPath, buildPath) self.tmqCase3(cfgPath, buildPath) self.tmqCase4(cfgPath, buildPath) self.tmqCase5(cfgPath, buildPath) diff --git a/tests/system-test/7-tmq/subscribeStb1.py b/tests/system-test/7-tmq/subscribeStb1.py index 049b297d2df0f54e7d681c6236d942340da2d19f..92347690d9a14f35e50ac11e18c51daa7fb1f716 100644 --- a/tests/system-test/7-tmq/subscribeStb1.py +++ b/tests/system-test/7-tmq/subscribeStb1.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -183,248 +183,15 @@ class TDTestCase: return - def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db1', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(5) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 1 end ...... ") - - def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - parameterDict2 = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict2['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_stable(tdSql, parameterDict2["dbName"], parameterDict2["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start create child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.CREATE_CTABLE - parameterDict2['actionType'] = actionType.CREATE_CTABLE - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("start insert data into child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.INSERT_DATA - parameterDict2['actionType'] = actionType.INSERT_DATA - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 2 end ...... ") - - def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db3', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 13, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(2) - tdLog.info("drop som child table of stb1") - dropTblNum = 4 - tdSql.query("drop table if exists %s.%s_9"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_8"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_7"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_3"%(parameterDict["dbName"], parameterDict["stbName"])) - - tdLog.info("drop some child tables, then start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) - - if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): - tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 3 end ...... ") - - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: ") + def tmqCase6(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 6: ") self.initConsumerTable() # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db4', \ + 'dbName': 'db6', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ @@ -461,7 +228,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -479,6 +246,10 @@ class TDTestCase: self.initConsumerInfoTable() consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("again start consume processor") @@ -497,17 +268,17 @@ class TDTestCase: tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 4 end ...... ") + tdLog.printNoPrefix("======== test case 6 end ...... ") - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: ") + def tmqCase7(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 7: ") self.initConsumerTable() # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db5', \ + 'dbName': 'db7', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ @@ -536,15 +307,15 @@ class TDTestCase: expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicFromStb1 ifcheckdata = 0 - ifManualCommit = 0 + ifManualCommit = 1 keyList = 'group.id:cgrp1,\ enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -556,8 +327,8 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") self.initConsumerInfoTable() @@ -574,796 +345,14 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - if totalConsumeRows != (expectrowcnt * (1 + 1/4)): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db6', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db7', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 7 end ...... ") - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 13 end ...... ") - def run(self): tdSql.prepare() @@ -1375,19 +364,8 @@ class TDTestCase: cfgPath = buildPath + "/../sim/psim/cfg" tdLog.info("cfgPath: %s" % cfgPath) - #self.tmqCase1(cfgPath, buildPath) - #self.tmqCase2(cfgPath, buildPath) - #self.tmqCase3(cfgPath, buildPath) - #self.tmqCase4(cfgPath, buildPath) - #self.tmqCase5(cfgPath, buildPath) self.tmqCase6(cfgPath, buildPath) self.tmqCase7(cfgPath, buildPath) - self.tmqCase8(cfgPath, buildPath) - self.tmqCase9(cfgPath, buildPath) - self.tmqCase10(cfgPath, buildPath) - self.tmqCase11(cfgPath, buildPath) - self.tmqCase12(cfgPath, buildPath) - self.tmqCase13(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeStb2.py b/tests/system-test/7-tmq/subscribeStb2.py index e825ebd3b6ab15eaf9dcd0ab833557fef62664f9..d08adcdc8374d01a0f91dfd596b2de6521d86f84 100644 --- a/tests/system-test/7-tmq/subscribeStb2.py +++ b/tests/system-test/7-tmq/subscribeStb2.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -183,18 +183,15 @@ class TDTestCase: return - def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") + def tmqCase8(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 8: ") self.initConsumerTable() - auotCtbNum = 5 - auotCtbPrefix = 'autoCtb' - # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db1', \ + 'dbName': 'db8', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ @@ -204,42 +201,60 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicFromStb1 ifcheckdata = 0 - ifManualCommit = 0 + ifManualCommit = 1 keyList = 'group.id:cgrp1,\ enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' + auto.offset.reset:latest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - tdLog.info("start consume processor") + tdLog.info("start consume 0 processor") pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - # add some new child tables using auto ctreating mode - time.sleep(1) - for index in range(auotCtbNum): - tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], parameterDict["stbName"], index)) + tdLog.info("start to check consume 0 result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 1 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): @@ -249,22 +264,39 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") + tdLog.info("start consume 2 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 1 end ...... ") + tdLog.printNoPrefix("======== test case 8 end ...... ") - def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") + def tmqCase9(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 9: ") self.initConsumerTable() - auotCtbNum = 10 - auotCtbPrefix = 'autoCtb' - # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db2', \ + 'dbName': 'db9', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ @@ -274,46 +306,64 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - self.create_stable(tdSql, parameterDict["dbName"], 'stb2') + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) - tdLog.info("create topics from stb0/stb1") + tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - topicFromStb2 = 'topic_stb2' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb2, parameterDict['dbName'], 'stb2')) consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) - topicList = '%s, %s'%(topicFromStb1,topicFromStb2) + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 ifcheckdata = 0 - ifManualCommit = 0 + ifManualCommit = 1 keyList = 'group.id:cgrp1,\ enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' + auto.offset.reset:latest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - tdLog.info("start consume processor") + tdLog.info("start consume 0 processor") pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - # add some new child tables using auto ctreating mode - time.sleep(1) - for index in range(auotCtbNum): - tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], 'stb2', index)) + tdLog.info("start to check consume 0 result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") - self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + tdLog.info("start consume 1 processor") + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 0 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): @@ -323,9 +373,29 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") + tdLog.info("start consume 2 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 2 end ...... ") + tdLog.printNoPrefix("======== test case 9 end ...... ") def run(self): tdSql.prepare() @@ -338,8 +408,8 @@ class TDTestCase: cfgPath = buildPath + "/../sim/psim/cfg" tdLog.info("cfgPath: %s" % cfgPath) - self.tmqCase1(cfgPath, buildPath) - self.tmqCase2(cfgPath, buildPath) + self.tmqCase8(cfgPath, buildPath) + self.tmqCase9(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeStb3.py b/tests/system-test/7-tmq/subscribeStb3.py new file mode 100644 index 0000000000000000000000000000000000000000..58e36911c1407add56a5ef023364f5925e2629b1 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeStb3.py @@ -0,0 +1,607 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase10(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 10: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db10', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume 0 processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume 0 result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 1 processor") + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 1 + self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt-10000: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 2 processor") + self.initConsumerInfoTable() + consumerId = 2 + ifManualCommit = 1 + self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 10 end ...... ") + + def tmqCase11(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 11: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db11', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 11 end ...... ") + + def tmqCase12(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 12: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db12', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 12 end ...... ") + + def tmqCase13(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 13: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db13', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*(1/2+1/4): + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 2 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 13 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase10(cfgPath, buildPath) + self.tmqCase11(cfgPath, buildPath) + self.tmqCase12(cfgPath, buildPath) + self.tmqCase13(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeStb4.py b/tests/system-test/7-tmq/subscribeStb4.py new file mode 100644 index 0000000000000000000000000000000000000000..d06e14479667d172a2a7cc42f8019957d131f749 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeStb4.py @@ -0,0 +1,351 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: ") + + self.initConsumerTable() + + auotCtbNum = 5 + auotCtbPrefix = 'autoCtb' + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db1', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # add some new child tables using auto ctreating mode + time.sleep(1) + for index in range(auotCtbNum): + tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], parameterDict["stbName"], index)) + + self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: ") + + self.initConsumerTable() + + auotCtbNum = 10 + auotCtbPrefix = 'autoCtb' + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db2', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + self.create_stable(tdSql, parameterDict["dbName"], 'stb2') + + tdLog.info("create topics from stb0/stb1") + topicFromStb1 = 'topic_stb1' + topicFromStb2 = 'topic_stb2' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb2, parameterDict['dbName'], 'stb2')) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) + topicList = '%s, %s'%(topicFromStb1,topicFromStb2) + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # add some new child tables using auto ctreating mode + time.sleep(1) + for index in range(auotCtbNum): + tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], 'stb2', index)) + + self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase1(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqDnode.py b/tests/system-test/7-tmq/tmqDnode.py index 4200b357a7e314720d9c5aeff8199a07dbcd45dd..bb287134b12010a6697e437622ec1ddcff11e7b9 100644 --- a/tests/system-test/7-tmq/tmqDnode.py +++ b/tests/system-test/7-tmq/tmqDnode.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -118,13 +118,13 @@ class TDTestCase: tdLog.debug("complete to create %s.%s" %(dbName, stbName)) return - def create_ctables(self,tsql, dbName,stbName,ctbNum): + def create_ctables(self,tsql, dbName,stbName,ctbPrefix,ctbNum): tsql.execute("use %s" %dbName) pre_create = "create table" sql = pre_create #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) for i in range(ctbNum): - sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + sql += " %s_%d using %s tags(%d)"%(ctbPrefix,i,stbName,i+1) if (i > 0) and (i%100 == 0): tsql.execute(sql) sql = pre_create @@ -211,7 +211,7 @@ class TDTestCase: for i in range(ctbNum): sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) for j in range(rowsPerTbl): - sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + sql += "(%d, %d, 'autodata_%d') "%(startTs + j, j, j) rowsOfSql += 1 if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): tsql.execute(sql) @@ -236,7 +236,7 @@ class TDTestCase: elif parameterDict["actionType"] == actionType.CREATE_STABLE: self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) elif parameterDict["actionType"] == actionType.CREATE_CTABLE: - self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["stbName"], parameterDict["ctbNum"]) elif parameterDict["actionType"] == actionType.INSERT_DATA: self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ parameterDict["rowsPerTbl"],parameterDict["batchNum"]) @@ -257,16 +257,17 @@ class TDTestCase: 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ - 'stbName': 'stb1', \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ 'ctbNum': 10, \ 'rowsPerTbl': 10000, \ - 'batchNum': 33, \ + 'batchNum': 23, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) tdLog.info("create topics from stb1") @@ -290,8 +291,8 @@ class TDTestCase: showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - print("================= restart dnode ===========================") - time.sleep(3) + time.sleep(3) + tdLog.info("================= restart dnode ===========================") tdDnodes.stop(1) tdDnodes.start(1) time.sleep(2) @@ -323,36 +324,25 @@ class TDTestCase: 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ - 'stbName': 'stb1', \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ 'ctbNum': 10, \ - 'rowsPerTbl': 15000, \ - 'batchNum': 100, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 40, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - parameterDict2 = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 16000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict2['cfg'] = cfgPath - self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_stable(tdSql, parameterDict2["dbName"], parameterDict2["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] * 2 topicList = topicFromStb1 ifcheckdata = 0 ifManualCommit = 0 @@ -363,36 +353,16 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 0 + pollDelay = 50 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - tdLog.info("start create child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.CREATE_CTABLE - parameterDict2['actionType'] = actionType.CREATE_CTABLE - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() + tdLog.info("create some new child table and insert data ") + parameterDict['batchNum'] = 100 + self.insert_data_with_autoCreateTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],"ctb",parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - tdLog.info("start insert data into child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.INSERT_DATA - parameterDict2['actionType'] = actionType.INSERT_DATA - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - print("================= restart dnode ===========================") + tdLog.info("================= restart dnode ===========================") tdDnodes.stop(1) tdDnodes.start(1) time.sleep(2) @@ -412,187 +382,35 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 2 end ...... ") - def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db3', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 50000, \ - 'batchNum': 13, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(5) - tdLog.info("drop som child table of stb1") - dropTblNum = 4 - tdSql.query("drop table if exists %s.%s_1"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_2"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_3"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_4"%(parameterDict["dbName"], parameterDict["stbName"])) - - tdLog.info("drop some child tables, then start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) - - if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): - tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 3 end ...... ") - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: ") + # 自动建表完成数据插入,启动消费 + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: ") self.initConsumerTable() # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db4', \ + 'dbName': 'db3', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ 'ctbNum': 10, \ 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ + 'batchNum': 40, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 4 end ...... ") - - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db5', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) + #self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + #self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + self.insert_data_with_autoCreateTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],"ctb",parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' @@ -607,121 +425,21 @@ class TDTestCase: enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != (expectrowcnt * (1 + 1/4)): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db6', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 10 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + # tdLog.info("================= restart dnode ===========================") + # tdDnodes.stop(1) + # tdDnodes.start(1) + # time.sleep(2) - tdLog.info("again check consume result") - expectRows = 2 + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): @@ -733,702 +451,8 @@ class TDTestCase: tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db7', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) + tdLog.printNoPrefix("======== test case 3 end ...... ") - tdLog.printNoPrefix("======== test case 13 end ...... ") def run(self): tdSql.prepare() @@ -1442,8 +466,8 @@ class TDTestCase: tdLog.info("cfgPath: %s" % cfgPath) # self.tmqCase1(cfgPath, buildPath) - self.tmqCase2(cfgPath, buildPath) - # self.tmqCase3(cfgPath, buildPath) + # self.tmqCase2(cfgPath, buildPath) + self.tmqCase3(cfgPath, buildPath) # self.tmqCase4(cfgPath, buildPath) # self.tmqCase5(cfgPath, buildPath) diff --git a/tests/system-test/7-tmq/tmqModule.py b/tests/system-test/7-tmq/tmqModule.py index 8e0d741040d4aa68cebe12cf20c25a81c4bd7ee2..ad5b4d70b35ba1ade92bb00c1903ce02340ebb19 100644 --- a/tests/system-test/7-tmq/tmqModule.py +++ b/tests/system-test/7-tmq/tmqModule.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) diff --git a/tests/system-test/fulltest.bat b/tests/system-test/fulltest.bat new file mode 100644 index 0000000000000000000000000000000000000000..871c93c9824333acb6ba05474d9249fb9f8d8ed7 --- /dev/null +++ b/tests/system-test/fulltest.bat @@ -0,0 +1,4 @@ + +python3 .\test.py -f 0-others\taosShell.py +python3 .\test.py -f 0-others\taosShellError.py +python3 .\test.py -f 0-others\taosShellNetChk.py \ No newline at end of file diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index dd3ff510d0adabc8454cfd08e0cbaae668ea0711..8481417dbd86fc7fcbbb202eafd3701a7dbbf7e1 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -10,11 +10,17 @@ python3 ./test.py -f 0-others/taosdMonitor.py python3 ./test.py -f 0-others/udfTest.py python3 ./test.py -f 0-others/udf_create.py python3 ./test.py -f 0-others/udf_restart_taosd.py +python3 ./test.py -f 0-others/cachelast.py python3 ./test.py -f 0-others/user_control.py python3 ./test.py -f 0-others/fsync.py -#python3 ./test.py -f 2-query/between.py +python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py +python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py +python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py +#python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py + +python3 ./test.py -f 2-query/between.py python3 ./test.py -f 2-query/distinct.py python3 ./test.py -f 2-query/varchar.py python3 ./test.py -f 2-query/ltrim.py @@ -23,15 +29,19 @@ python3 ./test.py -f 2-query/length.py python3 ./test.py -f 2-query/char_length.py python3 ./test.py -f 2-query/upper.py python3 ./test.py -f 2-query/lower.py -#python3 ./test.py -f 2-query/join.py +python3 ./test.py -f 2-query/join.py +python3 ./test.py -f 2-query/join2.py python3 ./test.py -f 2-query/cast.py -#python3 ./test.py -f 2-query/concat.py -#python3 ./test.py -f 2-query/concat_ws.py +python3 ./test.py -f 2-query/union.py +python3 ./test.py -f 2-query/union1.py +python3 ./test.py -f 2-query/concat.py +python3 ./test.py -f 2-query/concat2.py +python3 ./test.py -f 2-query/concat_ws.py +python3 ./test.py -f 2-query/concat_ws2.py python3 ./test.py -f 2-query/check_tsdb.py -# python3 ./test.py -f 2-query/union.py -# python3 ./test.py -f 2-query/union2.py -# python3 ./test.py -f 2-query/union3.py -# python3 ./test.py -f 2-query/union4.py +python3 ./test.py -f 2-query/spread.py +python3 ./test.py -f 2-query/hyperloglog.py + python3 ./test.py -f 2-query/timezone.py python3 ./test.py -f 2-query/Now.py @@ -40,12 +50,17 @@ python3 ./test.py -f 2-query/max.py python3 ./test.py -f 2-query/min.py python3 ./test.py -f 2-query/count.py python3 ./test.py -f 2-query/last.py -#python3 ./test.py -f 2-query/To_iso8601.py +python3 ./test.py -f 2-query/first.py +python3 ./test.py -f 2-query/To_iso8601.py python3 ./test.py -f 2-query/To_unixtimestamp.py python3 ./test.py -f 2-query/timetruncate.py -# python3 ./test.py -f 2-query/diff.py +python3 ./test.py -f 2-query/diff.py python3 ./test.py -f 2-query/Timediff.py +python3 ./test.py -f 2-query/top.py +python3 ./test.py -f 2-query/bottom.py +python3 ./test.py -f 2-query/percentile.py +python3 ./test.py -f 2-query/apercentile.py python3 ./test.py -f 2-query/abs.py python3 ./test.py -f 2-query/ceil.py python3 ./test.py -f 2-query/floor.py @@ -60,13 +75,30 @@ python3 ./test.py -f 2-query/arcsin.py python3 ./test.py -f 2-query/arccos.py python3 ./test.py -f 2-query/arctan.py python3 ./test.py -f 2-query/query_cols_tags_and_or.py -#python3 ./test.py -f 2-query/nestedQuery.py +# python3 ./test.py -f 2-query/nestedQuery.py +# TD-15983 subquery output duplicate name column. +# Please Xiangyang Guo modify the following script +# python3 ./test.py -f 2-query/nestedQuery_str.py +python3 ./test.py -f 2-query/avg.py +python3 ./test.py -f 2-query/elapsed.py +python3 ./test.py -f 2-query/csum.py +python3 ./test.py -f 2-query/mavg.py +python3 ./test.py -f 2-query/diff.py +python3 ./test.py -f 2-query/sample.py +python3 ./test.py -f 2-query/function_diff.py +python3 ./test.py -f 2-query/unique.py +python3 ./test.py -f 2-query/stateduration.py +python3 ./test.py -f 2-query/function_stateduration.py +python3 ./test.py -f 2-query/statecount.py python3 ./test.py -f 7-tmq/basic5.py python3 ./test.py -f 7-tmq/subscribeDb.py +python3 ./test.py -f 7-tmq/subscribeDb0.py python3 ./test.py -f 7-tmq/subscribeDb1.py python3 ./test.py -f 7-tmq/subscribeStb.py python3 ./test.py -f 7-tmq/subscribeStb0.py python3 ./test.py -f 7-tmq/subscribeStb1.py python3 ./test.py -f 7-tmq/subscribeStb2.py - +python3 ./test.py -f 7-tmq/subscribeStb3.py +python3 ./test.py -f 7-tmq/subscribeStb4.py +python3 ./test.py -f 7-tmq/db.py diff --git a/tests/system-test/insert.json b/tests/system-test/insert.json deleted file mode 100644 index 5dea9eabfef35de733e70c7a7ac251b53f5c3563..0000000000000000000000000000000000000000 --- a/tests/system-test/insert.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "filetype": "insert", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "thread_count": 16, - "create_table_thread_count": 1, - "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "insert_interval": 0, - "interlace_rows": 0, - "num_of_records_per_req": 10000, - "prepared_rand": 10000, - "chinese": "no", - "databases": [ - { - "dbinfo": { - "name": "db", - "drop": "yes", - "vgroups":4, - "replica": 1, - "precision": "ms" - }, - "super_tables": [ - { - "name": "stb", - "child_table_exists": "no", - "childtable_count": 1000, - "childtable_prefix": "stb_", - "escape_character": "no", - "auto_create_table": "no", - "batch_create_tbl_num": 10, - "data_source": "rand", - "insert_mode": "taosc", - "non_stop_mode": "no", - "line_protocol": "line", - "insert_rows": 100000, - "interlace_rows": 0, - "insert_interval": 0, - "disorder_ratio": 0, - "timestamp_step": 1, - "start_timestamp": "2020-10-01 00:00:00.000", - "use_sample_ts": "no", - "tags_file": "", - "columns": [ - { - "type": "FLOAT", - "name": "current", - "count": 4, - "max": 12, - "min": 8 - }, - { "type": "INT", "name": "voltage", "max": 225, "min": 215 }, - { "type": "FLOAT", "name": "phase", "max": 1, "min": 0 } - ], - "tags": [ - { - "type": "TINYINT", - "name": "groupid", - "max": 10, - "min": 1 - }, - { - "name": "location", - "type": "BINARY", - "len": 16, - "values": ["beijing", "shanghai"] - } - ] - } - ] - } - ] -} diff --git a/tests/system-test/test-all.bat b/tests/system-test/test-all.bat new file mode 100644 index 0000000000000000000000000000000000000000..ae6c98b06f3504b20e712630d40184b093143835 --- /dev/null +++ b/tests/system-test/test-all.bat @@ -0,0 +1,25 @@ +@echo off +SETLOCAL EnableDelayedExpansion +for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( set "DEL=%%a") +set /a a=0 +@REM echo Windows Taosd Test +@REM for /F "usebackq tokens=*" %%i in (fulltest.bat) do ( +@REM echo Processing %%i +@REM set /a a+=1 +@REM call %%i ARG1 > result_!a!.txt 2>error_!a!.txt +@REM if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. ) +@REM ) +echo Linux Taosd Test +for /F "usebackq tokens=*" %%i in (fulltest.bat) do ( + echo Processing %%i + set /a a+=1 + call %%i ARG1 -m %1 > result_!a!.txt 2>error_!a!.txt + if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. ) +) +exit + +:colorEcho +echo off + "%~2" +findstr /v /a:%1 /R "^$" "%~2" nul +del "%~2" > nul 2>&1i \ No newline at end of file diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 31afd027ec3e53713479a402b0eb92fbf2e61db8..a11085708c42ec63672129d37636e30fb9140598 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -17,6 +17,9 @@ import sys import getopt import subprocess import time +import base64 +import json +import platform from distutils.log import warn as printf from fabric2 import Connection sys.path.append("../pytest") @@ -34,12 +37,17 @@ if __name__ == "__main__": masterIp = "" testCluster = False valgrind = 0 + killValgrind = 1 logSql = True stop = 0 restart = False windows = 0 - opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrw', [ - 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'windows']) + if platform.system().lower() == 'windows': + windows = 1 + updateCfgDict = {} + execCmd = "" + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd']) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -52,7 +60,9 @@ if __name__ == "__main__": tdLog.printNoPrefix('-c Test Cluster Flag') tdLog.printNoPrefix('-g valgrind Test Flag') tdLog.printNoPrefix('-r taosd restart test') - tdLog.printNoPrefix('-w taos on windows') + tdLog.printNoPrefix('-d update cfg dict, base64 json str') + tdLog.printNoPrefix('-k not kill valgrind processer') + tdLog.printNoPrefix('-e eval str to run') sys.exit(0) if key in ['-r', '--restart']: @@ -85,8 +95,27 @@ if __name__ == "__main__": if key in ['-s', '--stop']: stop = 1 - if key in ['-w', '--windows']: - windows = 1 + if key in ['-d', '--updateCfgDict']: + try: + updateCfgDict = eval(base64.b64decode(value.encode()).decode()) + except: + print('updateCfgDict convert fail.') + sys.exit(0) + + if key in ['-k', '--killValgrind']: + killValgrind = 0 + + if key in ['-e', '--execCmd']: + try: + execCmd = base64.b64decode(value.encode()).decode() + except: + print('updateCfgDict convert fail.') + sys.exit(0) + + if not execCmd == "": + tdDnodes.init(deployPath) + exec(execCmd) + quit() if (stop != 0): if (valgrind == 0): @@ -121,23 +150,52 @@ if __name__ == "__main__": if masterIp == "": host = '127.0.0.1' else: - host = masterIp + try: + config = eval(masterIp) + host = config["host"] + except Exception as r: + host = masterIp tdLog.info("Procedures for tdengine deployed in %s" % (host)) if windows: tdCases.logSql(logSql) tdLog.info("Procedures for testing self-deployment") - td_clinet = TDSimClient("C:\\TDengine") - td_clinet.deploy() - remote_conn = Connection("root@%s"%host) - with remote_conn.cd('/var/lib/jenkins/workspace/TDinternal/community/tests/pytest'): - remote_conn.run("python3 ./test.py") + tdDnodes.init(deployPath, masterIp) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.stopAll() + key_word = 'tdCases.addWindows' + is_test_framework = 0 + try: + if key_word in open(fileName).read(): + is_test_framework = 1 + except: + pass + updateCfgDictStr = '' + if is_test_framework: + moduleName = fileName.replace(".py", "").replace(os.sep, ".") + uModule = importlib.import_module(moduleName) + try: + ucase = uModule.TDTestCase() + if ((json.dumps(updateCfgDict) == '{}') and (ucase.updatecfgDict is not None)): + updateCfgDict = ucase.updatecfgDict + updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode() + except : + pass + else: + pass + tdDnodes.deploy(1,updateCfgDict) + tdDnodes.start(1) conn = taos.connect( host="%s"%(host), - config=td_clinet.cfgDir) - tdCases.runOneWindows(conn, fileName) + config=tdDnodes.sim.getCfgDir()) + if is_test_framework: + tdCases.runOneWindows(conn, fileName) + else: + tdCases.runAllWindows(conn) else: - tdDnodes.init(deployPath) + tdDnodes.setKillValgrind(killValgrind) + tdDnodes.init(deployPath, masterIp) tdDnodes.setTestCluster(testCluster) tdDnodes.setValgrind(valgrind) tdDnodes.stopAll() @@ -153,16 +211,13 @@ if __name__ == "__main__": uModule = importlib.import_module(moduleName) try: ucase = uModule.TDTestCase() - tdDnodes.deploy(1,ucase.updatecfgDict) - except : - tdDnodes.deploy(1,{}) - else: - pass - tdDnodes.deploy(1,{}) + if (json.dumps(updateCfgDict) == '{}'): + updateCfgDict = ucase.updatecfgDict + except: + pass + tdDnodes.deploy(1,updateCfgDict) tdDnodes.start(1) - - tdCases.logSql(logSql) if testCluster: diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt index 964f9fee4abbdc244b83f50390c2b660be6b476c..505c290f2a7b45cdba530fcfbad3e42adefdea90 100644 --- a/tests/test/c/CMakeLists.txt +++ b/tests/test/c/CMakeLists.txt @@ -23,20 +23,18 @@ target_link_libraries( PUBLIC os ) -if(NOT TD_WINDOWS) - add_executable(sdbDump sdbDump.c) - target_link_libraries( - sdbDump - PUBLIC dnode - PUBLIC mnode - PUBLIC sdb - PUBLIC os - ) - target_include_directories( - sdbDump - PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode" - PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/impl/inc" - PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/sdb/inc" - PRIVATE "${TD_SOURCE_DIR}/source/dnode/mgmt/node_mgmt/inc" - ) -ENDIF () \ No newline at end of file +add_executable(sdbDump sdbDump.c) +target_link_libraries( + sdbDump + PUBLIC dnode + PUBLIC mnode + PUBLIC sdb + PUBLIC os +) +target_include_directories( + sdbDump + PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode" + PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/impl/inc" + PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/sdb/inc" + PRIVATE "${TD_SOURCE_DIR}/source/dnode/mgmt/node_mgmt/inc" +) \ No newline at end of file diff --git a/tests/test/c/sdbDump.c b/tests/test/c/sdbDump.c index 5641587c569aa2a2ea3f5a3f18c398f33979714c..e5986cf4dddaa7191e7047e76b51305baefc55c1 100644 --- a/tests/test/c/sdbDump.c +++ b/tests/test/c/sdbDump.c @@ -20,13 +20,13 @@ #include "tconfig.h" #include "tjson.h" -#define TMP_DNODE_DIR "/tmp/dumpsdb" -#define TMP_MNODE_DIR "/tmp/dumpsdb/mnode" -#define TMP_SDB_DATA_DIR "/tmp/dumpsdb/mnode/data" -#define TMP_SDB_SYNC_DIR "/tmp/dumpsdb/mnode/sync" -#define TMP_SDB_DATA_FILE "/tmp/dumpsdb/mnode/data/sdb.data" -#define TMP_SDB_RAFT_CFG_FILE "/tmp/dumpsdb/mnode/sync/raft_config.json" -#define TMP_SDB_RAFT_STORE_FILE "/tmp/dumpsdb/mnode/sync/raft_store.json" +#define TMP_DNODE_DIR TD_TMP_DIR_PATH "dumpsdb" +#define TMP_MNODE_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" +#define TMP_SDB_DATA_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "data" +#define TMP_SDB_SYNC_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "sync" +#define TMP_SDB_DATA_FILE TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "data" TD_DIRSEP "sdb.data" +#define TMP_SDB_RAFT_CFG_FILE TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_config.json" +#define TMP_SDB_RAFT_STORE_FILE TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_store.json" void reportStartup(const char *name, const char *desc) {} @@ -86,6 +86,7 @@ void dumpDb(SSdb *pSdb, SJson *json) { tjsonAddIntegerToObject(item, "cacheLastRow", pObj->cfg.cacheLastRow); tjsonAddIntegerToObject(item, "hashMethod", pObj->cfg.hashMethod); tjsonAddIntegerToObject(item, "numOfRetensions", pObj->cfg.numOfRetensions); + tjsonAddIntegerToObject(item, "schemaless", pObj->cfg.schemaless); sdbRelease(pSdb, pObj); } @@ -279,13 +280,11 @@ void dumpTrans(SSdb *pSdb, SJson *json) { tjsonAddIntegerToObject(item, "id", pObj->id); tjsonAddIntegerToObject(item, "stage", pObj->stage); tjsonAddIntegerToObject(item, "policy", pObj->policy); - tjsonAddIntegerToObject(item, "type", pObj->type); + tjsonAddIntegerToObject(item, "conflict", pObj->conflict); + tjsonAddIntegerToObject(item, "exec", pObj->exec); tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime)); - tjsonAddStringToObject(item, "dbUid", i642str(pObj->dbUid)); tjsonAddStringToObject(item, "dbname", pObj->dbname); - tjsonAddIntegerToObject(item, "redoLogNum", taosArrayGetSize(pObj->redoLogs)); - tjsonAddIntegerToObject(item, "undoLogNum", taosArrayGetSize(pObj->undoLogs)); - tjsonAddIntegerToObject(item, "commitLogNum", taosArrayGetSize(pObj->commitLogs)); + tjsonAddIntegerToObject(item, "commitLogNum", taosArrayGetSize(pObj->commitActions)); tjsonAddIntegerToObject(item, "redoActionNum", taosArrayGetSize(pObj->redoActions)); tjsonAddIntegerToObject(item, "undoActionNum", taosArrayGetSize(pObj->undoActions)); @@ -412,13 +411,23 @@ int32_t parseArgs(int32_t argc, char *argv[]) { char dataFile[PATH_MAX] = {0}; char raftCfgFile[PATH_MAX] = {0}; char raftStoreFile[PATH_MAX] = {0}; - snprintf(dataFile, PATH_MAX, "%s/mnode/data/sdb.data", tsDataDir); - snprintf(raftCfgFile, PATH_MAX, "%s/mnode/sync/raft_config.json", tsDataDir); - snprintf(raftStoreFile, PATH_MAX, "%s/mnode/sync/raft_store.json", tsDataDir); + snprintf(dataFile, PATH_MAX, "%s" TD_DIRSEP "mnode" TD_DIRSEP "data" TD_DIRSEP "sdb.data", tsDataDir); + snprintf(raftCfgFile, PATH_MAX, "%s" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_config.json", tsDataDir); + snprintf(raftStoreFile, PATH_MAX, "%s" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_store.json", tsDataDir); char cmd[PATH_MAX * 2] = {0}; snprintf(cmd, sizeof(cmd), "rm -rf %s", TMP_DNODE_DIR); system(cmd); +#ifdef WINDOWS + taosMulMkDir(TMP_SDB_DATA_DIR); + taosMulMkDir(TMP_SDB_SYNC_DIR); + snprintf(cmd, sizeof(cmd), "cp %s %s 2>nul", dataFile, TMP_SDB_DATA_FILE); + system(cmd); + snprintf(cmd, sizeof(cmd), "cp %s %s 2>nul", raftCfgFile, TMP_SDB_RAFT_CFG_FILE); + system(cmd); + snprintf(cmd, sizeof(cmd), "cp %s %s 2>nul", raftStoreFile, TMP_SDB_RAFT_STORE_FILE); + system(cmd); +#else snprintf(cmd, sizeof(cmd), "mkdir -p %s", TMP_SDB_DATA_DIR); system(cmd); snprintf(cmd, sizeof(cmd), "mkdir -p %s", TMP_SDB_SYNC_DIR); @@ -429,6 +438,7 @@ int32_t parseArgs(int32_t argc, char *argv[]) { system(cmd); snprintf(cmd, sizeof(cmd), "cp %s %s 2>/dev/null", raftStoreFile, TMP_SDB_RAFT_STORE_FILE); system(cmd); +#endif strcpy(tsDataDir, TMP_DNODE_DIR); return 0; diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c index accd1dd080ec21a33cde9d803a4c4e361cb96b16..d0b582f3758c2b37f1190f61cd9cd337ec708abb 100644 --- a/tests/test/c/tmqSim.c +++ b/tests/test/c/tmqSim.c @@ -32,6 +32,7 @@ #define MAX_SQL_STR_LEN (1024 * 1024) #define MAX_ROW_STR_LEN (16 * 1024) #define MAX_CONSUMER_THREAD_CNT (16) +#define MAX_VGROUP_CNT (32) typedef struct { TdThread thread; @@ -61,6 +62,10 @@ typedef struct { tmq_t* tmq; tmq_list_t* topicList; + + int32_t numOfVgroups; + int32_t rowsOfPerVgroups[MAX_VGROUP_CNT][2]; // [i][0]: vgroup id, [i][1]: rows of consume + int64_t ts; } SThreadInfo; @@ -69,7 +74,8 @@ typedef struct { char cdbName[32]; char dbName[32]; int32_t showMsgFlag; - int32_t showRowFlag; + int32_t showRowFlag; + int32_t saveRowFlag; int32_t consumeDelay; // unit s int32_t numOfThread; SThreadInfo stThreads[MAX_CONSUMER_THREAD_CNT]; @@ -77,6 +83,7 @@ typedef struct { static SConfInfo g_stConfInfo; TdFilePtr g_fp = NULL; +static int running = 1; // char* g_pRowValue = NULL; // TdFilePtr g_fp = NULL; @@ -93,6 +100,8 @@ static void printHelp() { printf("%s%s%s%d\n", indent, indent, "showMsgFlag, default is ", g_stConfInfo.showMsgFlag); printf("%s%s\n", indent, "-r"); printf("%s%s%s%d\n", indent, indent, "showRowFlag, default is ", g_stConfInfo.showRowFlag); + printf("%s%s\n", indent, "-s"); + printf("%s%s%s%d\n", indent, indent, "saveRowFlag, default is ", g_stConfInfo.saveRowFlag); printf("%s%s\n", indent, "-y"); printf("%s%s%s%d\n", indent, indent, "consume delay, default is s", g_stConfInfo.consumeDelay); exit(EXIT_SUCCESS); @@ -135,6 +144,7 @@ void saveConfigToLogFile() { taosFprintfFile(g_fp, "# cdbName: %s\n", g_stConfInfo.cdbName); taosFprintfFile(g_fp, "# showMsgFlag: %d\n", g_stConfInfo.showMsgFlag); taosFprintfFile(g_fp, "# showRowFlag: %d\n", g_stConfInfo.showRowFlag); + taosFprintfFile(g_fp, "# saveRowFlag: %d\n", g_stConfInfo.saveRowFlag); taosFprintfFile(g_fp, "# consumeDelay: %d\n", g_stConfInfo.consumeDelay); taosFprintfFile(g_fp, "# numOfThread: %d\n", g_stConfInfo.numOfThread); @@ -165,6 +175,7 @@ void parseArgument(int32_t argc, char* argv[]) { memset(&g_stConfInfo, 0, sizeof(SConfInfo)); g_stConfInfo.showMsgFlag = 0; g_stConfInfo.showRowFlag = 0; + g_stConfInfo.saveRowFlag = 0; g_stConfInfo.consumeDelay = 5; for (int32_t i = 1; i < argc; i++) { @@ -181,6 +192,8 @@ void parseArgument(int32_t argc, char* argv[]) { g_stConfInfo.showMsgFlag = atol(argv[++i]); } else if (strcmp(argv[i], "-r") == 0) { g_stConfInfo.showRowFlag = atol(argv[++i]); + } else if (strcmp(argv[i], "-s") == 0) { + g_stConfInfo.saveRowFlag = atol(argv[++i]); } else if (strcmp(argv[i], "-y") == 0) { g_stConfInfo.consumeDelay = atol(argv[++i]); } else { @@ -200,6 +213,7 @@ void parseArgument(int32_t argc, char* argv[]) { pPrint("%s consumeDelay:%d %s", GREEN, g_stConfInfo.consumeDelay, NC); pPrint("%s showMsgFlag:%d %s", GREEN, g_stConfInfo.showMsgFlag, NC); pPrint("%s showRowFlag:%d %s", GREEN, g_stConfInfo.showRowFlag, NC); + pPrint("%s saveRowFlag:%d %s", GREEN, g_stConfInfo.saveRowFlag, NC); #endif } @@ -225,15 +239,64 @@ void ltrim(char* str) { // return str; } -static int running = 1; -static int32_t msg_process(TAOS_RES* msg, int64_t msgIndex, int32_t threadLable) { +void addRowsToVgroupId(SThreadInfo* pInfo, int32_t vgroupId, int32_t rows) { + int32_t i; + for (i = 0; i < pInfo->numOfVgroups; i++) { + if (vgroupId == pInfo->rowsOfPerVgroups[i][0]) { + pInfo->rowsOfPerVgroups[i][1] += rows; + return; + } + } + + pInfo->rowsOfPerVgroups[pInfo->numOfVgroups][0] = vgroupId; + pInfo->rowsOfPerVgroups[pInfo->numOfVgroups][1] += rows; + pInfo->numOfVgroups++; + + taosFprintfFile(g_fp, "consume id %d, add one new vogroup id: %d\n", pInfo->consumerId, vgroupId); + if (pInfo->numOfVgroups > MAX_VGROUP_CNT) { + taosFprintfFile(g_fp, "====consume id %d, vgroup num %d over than 32. new vgroupId: %d\n", pInfo->consumerId, pInfo->numOfVgroups, vgroupId); + taosCloseFile(&g_fp); + exit(-1); + } +} + +int32_t saveConsumeContentToTbl(SThreadInfo* pInfo, char* buf) { + char sqlStr[1100] = {0}; + + if (strlen(buf) > 1024) { + taosFprintfFile(g_fp, "The length of one row[%d] is overflow 1024\n", strlen(buf)); + taosCloseFile(&g_fp); + exit(-1); + } + + TAOS* pConn = taos_connect(NULL, "root", "taosdata", NULL, 0); + assert(pConn != NULL); + + sprintf(sqlStr, "insert into %s.content_%d values (%"PRId64", \'%s\')", g_stConfInfo.cdbName, pInfo->consumerId, pInfo->ts++, buf); + TAOS_RES* pRes = taos_query(pConn, sqlStr); + if (taos_errno(pRes) != 0) { + pError("error in insert consume result, reason:%s\n", taos_errstr(pRes)); + taosFprintfFile(g_fp, "error in insert consume result, reason:%s\n", taos_errstr(pRes)); + taosCloseFile(&g_fp); + taos_free_result(pRes); + exit(-1); + } + + taos_free_result(pRes); + + return 0; +} + +static int32_t msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex) { char buf[1024]; int32_t totalRows = 0; - + // printf("topic: %s\n", tmq_get_topic_name(msg)); - // printf("vg:%d\n", tmq_get_vgroup_id(msg)); - taosFprintfFile(g_fp, "msg index:%" PRId64 ", threadLable: %d\n", msgIndex, threadLable); - taosFprintfFile(g_fp, "topic: %s, vgroupId: %d\n", tmq_get_topic_name(msg), tmq_get_vgroup_id(msg)); + int32_t vgroupId = tmq_get_vgroup_id(msg); + + taosFprintfFile(g_fp, "msg index:%" PRId64 ", consumerId: %d\n", msgIndex, pInfo->consumerId); + //taosFprintfFile(g_fp, "topic: %s, vgroupId: %d, tableName: %s\n", tmq_get_topic_name(msg), vgroupId, tmq_get_table_name(msg)); + taosFprintfFile(g_fp, "topic: %s, vgroupId: %d\n", tmq_get_topic_name(msg), vgroupId); while (1) { TAOS_ROW row = taos_fetch_row(msg); @@ -247,11 +310,16 @@ static int32_t msg_process(TAOS_RES* msg, int64_t msgIndex, int32_t threadLable) if (0 != g_stConfInfo.showRowFlag) { taosFprintfFile(g_fp, "rows[%d]: %s\n", totalRows, buf); + if (0 != g_stConfInfo.saveRowFlag) { + saveConsumeContentToTbl(pInfo, buf); + } } totalRows++; } + addRowsToVgroupId(pInfo, vgroupId, totalRows); + return totalRows; } @@ -344,6 +412,32 @@ int32_t saveConsumeResult(SThreadInfo* pInfo) { taos_free_result(pRes); + #if 0 + // vgroups + for (i = 0; i < pInfo->numOfVgroups; i++) { + // schema: ts timestamp, consumerid int, consummsgcnt bigint, checkresult int + sprintf(sqlStr, "insert into %s.vgroup_%d values (%"PRId64", %d, %" PRId64 ", %" PRId64 ", %d)", + g_stConfInfo.cdbName, + now, + pInfo->consumerId, + pInfo->consumeMsgCnt, + pInfo->consumeRowCnt, + pInfo->checkresult); + + char tmpString[128]; + taosFprintfFile(g_fp, "%s, consume id %d result: %s\n", getCurrentTimeString(tmpString), pInfo->consumerId ,sqlStr); + + TAOS_RES* pRes = taos_query(pConn, sqlStr); + if (taos_errno(pRes) != 0) { + pError("error in save consumeinfo, reason:%s\n", taos_errstr(pRes)); + taos_free_result(pRes); + exit(-1); + } + + taos_free_result(pRes); + } + #endif + return 0; } @@ -356,11 +450,13 @@ void loop_consume(SThreadInfo* pInfo) { char tmpString[128]; taosFprintfFile(g_fp, "%s consumer id %d start to loop pull msg\n", getCurrentTimeString(tmpString), pInfo->consumerId); + pInfo->ts = taosGetTimestampMs(); + while (running) { TAOS_RES* tmqMsg = tmq_consumer_poll(pInfo->tmq, g_stConfInfo.consumeDelay * 1000); if (tmqMsg) { if (0 != g_stConfInfo.showMsgFlag) { - totalRows += msg_process(tmqMsg, totalMsgs, pInfo->consumerId); + totalRows += msg_process(tmqMsg, pInfo, totalMsgs); } taos_free_result(tmqMsg); diff --git a/tools/shell/CMakeLists.txt b/tools/shell/CMakeLists.txt index 284693795ee471ad2d631758970c3033dc8e0c6c..295fae68b30e85e787a7b8b491a354bcc3125709 100644 --- a/tools/shell/CMakeLists.txt +++ b/tools/shell/CMakeLists.txt @@ -1,9 +1,13 @@ aux_source_directory(src SHELL_SRC) add_executable(shell ${SHELL_SRC}) +if(TD_WINDOWS) + target_link_libraries(shell PUBLIC taos_static) +else() + target_link_libraries(shell PUBLIC taos) +endif () target_link_libraries( shell - PUBLIC taos PRIVATE os common transport util ) target_include_directories( diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index 1639fd1ca681ab0bb980b2fd1fca8b34d58e15f3..cd6613b17a2d6d62ef4ce969b27004f1b5e7df72 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -36,6 +36,8 @@ #define SHELL_VERSION "Print program version." #define SHELL_EMAIL "" +static int32_t shellParseSingleOpt(int32_t key, char *arg); + void shellPrintHelp() { char indent[] = " "; printf("Usage: taos [OPTION...] \n\n"); @@ -90,6 +92,21 @@ static struct argp_option shellOptions[] = { {0}, }; +static error_t shellParseOpt(int32_t key, char *arg, struct argp_state *state) { return shellParseSingleOpt(key, arg); } + +static struct argp shellArgp = {shellOptions, shellParseOpt, "", ""}; + +static void shellParseArgsUseArgp(int argc, char *argv[]) { + argp_program_version = shell.info.programVersion; + argp_parse(&shellArgp, argc, argv, 0, 0, &shell.args); +} + +#endif + +#ifndef ARGP_ERR_UNKNOWN + #define ARGP_ERR_UNKNOWN E2BIG +#endif + static int32_t shellParseSingleOpt(int32_t key, char *arg) { SShellArgs *pArgs = &shell.args; @@ -196,8 +213,8 @@ int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) { } shellParseSingleOpt(key[1], val); i++; - } else if (key[1] == 'p' || key[1] == 'A' || key[1] == 'c' || key[1] == 'r' || key[1] == 'k' || key[1] == 't' || - key[1] == 'V') { + } else if (key[1] == 'p' || key[1] == 'A' || key[1] == 'C' || key[1] == 'r' || key[1] == 'k' || + key[1] == 't' || key[1] == 'V' || key[1] == '?' || key[1] == 1) { shellParseSingleOpt(key[1], NULL); } else { fprintf(stderr, "invalid option %s\n", key); @@ -208,21 +225,10 @@ int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) { return 0; } -static error_t shellParseOpt(int32_t key, char *arg, struct argp_state *state) { return shellParseSingleOpt(key, arg); } - -static struct argp shellArgp = {shellOptions, shellParseOpt, "", ""}; - -static void shellParseArgsUseArgp(int argc, char *argv[]) { - argp_program_version = shell.info.programVersion; - argp_parse(&shellArgp, argc, argv, 0, 0, &shell.args); -} - -#endif - static void shellInitArgs(int argc, char *argv[]) { for (int i = 1; i < argc; i++) { if (strncmp(argv[i], "-p", 2) == 0) { - printf(shell.info.clientVersion, tsOsName, taos_get_client_info()); + // printf(shell.info.clientVersion, tsOsName, taos_get_client_info()); if (strlen(argv[i]) == 2) { printf("Enter password: "); taosSetConsoleEcho(false); @@ -341,7 +347,7 @@ int32_t shellParseArgs(int32_t argc, char *argv[]) { #if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32) shell.info.osname = "Windows"; snprintf(shell.history.file, TSDB_FILENAME_LEN, "C:/TDengine/%s", SHELL_HISTORY_FILE); - // if (shellParseArgsWithoutArgp(argc, argv) != 0) return -1; + if (shellParseArgsWithoutArgp(argc, argv) != 0) return -1; #elif defined(_TD_DARWIN_64) shell.info.osname = "Darwin"; snprintf(shell.history.file, TSDB_FILENAME_LEN, "%s/%s", getpwuid(getuid())->pw_dir, SHELL_HISTORY_FILE); diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index a866488d3ad2d239c47b3279f506e755737b88bf..851d9a2070b75f7863f8e55f5779e9bac90607db 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -587,6 +587,8 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) { int32_t width = (int32_t)strlen(field->name); switch (field->type) { + case TSDB_DATA_TYPE_NULL: + return TMAX(4, width); // null case TSDB_DATA_TYPE_BOOL: return TMAX(5, width); // 'false' diff --git a/tools/taos-tools b/tools/taos-tools index 4d83d8c62973506f760bcaa3a33f4665ed9046d0..717f5aaa5f0a1b4d92bb2ae68858fec554fb5eda 160000 --- a/tools/taos-tools +++ b/tools/taos-tools @@ -1 +1 @@ -Subproject commit 4d83d8c62973506f760bcaa3a33f4665ed9046d0 +Subproject commit 717f5aaa5f0a1b4d92bb2ae68858fec554fb5eda