diff --git a/CMakeLists.txt b/CMakeLists.txt index 0436f5b25923927edaa7568ba57c7b948446f8b1..8fe6cc69e05e47b9d9ecfd1b8534d215f597e033 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.16) +cmake_minimum_required(VERSION 3.0) project( TDengine diff --git a/cmake/cmake.define b/cmake/cmake.define index a8bab17aba8a412099b34c6d82c9787468bc89e8..5637c666b99294a536288741948877450cdb9eb5 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.16) +cmake_minimum_required(VERSION 3.0) set(CMAKE_VERBOSE_MAKEFILE OFF) diff --git a/cmake/cmake.platform b/cmake/cmake.platform index 0312f92a5b4116cad03d4bb9c2e7556d7a35deb2..acf17e9427bc453e1ece67cca5cbfe45f8827337 100644 --- a/cmake/cmake.platform +++ b/cmake/cmake.platform @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.16) +cmake_minimum_required(VERSION 3.0) MESSAGE("Current system is ${CMAKE_SYSTEM_NAME}") diff --git a/contrib/test/craft/raftMain.c b/contrib/test/craft/raftMain.c index 12be3deb2e33aba9be9b45acd1595a749ab1b2c5..e1c66422b3b90b23ff8c6f01cf07aa8adace5983 100644 --- a/contrib/test/craft/raftMain.c +++ b/contrib/test/craft/raftMain.c @@ -243,7 +243,7 @@ void console(SRaftServer *pRaftServer) { } else if (strcmp(cmd, "dropnode") == 0) { - char host[HOST_LEN]; + char host[HOST_LEN] = {0}; uint32_t port; parseAddr(param1, host, HOST_LEN, &port); uint64_t rid = raftId(host, port); @@ -258,7 +258,7 @@ void console(SRaftServer *pRaftServer) { } else if (strcmp(cmd, "put") == 0) { - char buf[256]; + char buf[256] = {0}; snprintf(buf, sizeof(buf), "%s--%s", param1, param2); putValue(&pRaftServer->raft, buf); diff --git a/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx b/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx index e63ffce6dd07366da99fe1f41d0a2a8d7a623f31..99a92573c87d0f90f699a8d1352619f4df4aef39 100644 --- a/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx @@ -52,7 +52,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, :::info -- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 16K,一条 SQL 语句总长度不能超过 1M 。 +- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 48K,一条 SQL 语句总长度不能超过 1M 。 - TDengine 支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开 20 个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程频繁切换,带来额外开销。 ::: diff --git a/docs-cn/07-develop/06-subscribe.mdx b/docs-cn/07-develop/06-subscribe.mdx index ad5561fa09087c4c562ac340506f56d756bd98b2..0f531e07c9dce7dbb03bacebf8e5cbefae82671f 100644 --- a/docs-cn/07-develop/06-subscribe.mdx +++ b/docs-cn/07-develop/06-subscribe.mdx @@ -145,7 +145,7 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { taos_unsubscribe(tsub, keep); ``` -其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下,每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。 +其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下(注:`taos.cfg` 配置文件中 `DataDir` 参数值默认为 **/var/lib/taos/**,但是 Windows 服务器上本身不存在该目录,所以需要在 Windows 的配置文件中修改 `DataDir` 参数值为相应的已存在目录"),每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。 代码介绍完毕,我们来看一下实际的运行效果。假设: diff --git a/docs-cn/12-taos-sql/01-data-type.md b/docs-cn/12-taos-sql/01-data-type.md index be5c9a8cb4ed7f4ed9f9c7e11faf1b0f8f6e51b8..8ac6ee3b872bd31f616ea0aea3fd4a093abb4402 100644 --- a/docs-cn/12-taos-sql/01-data-type.md +++ b/docs-cn/12-taos-sql/01-data-type.md @@ -4,6 +4,8 @@ title: 支持的数据类型 description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等" --- +## 时间戳 + 使用 TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则: - 时间格式为 `YYYY-MM-DD HH:mm:ss.MS`,默认时间分辨率为毫秒。比如:`2017-08-12 18:25:58.128` @@ -12,39 +14,59 @@ description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类 - Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度逻辑类似。) - 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n (自然月) 和 y (自然年)。 -TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。(从 2.1.5.0 版本开始支持纳秒精度) +TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。 ```sql CREATE DATABASE db_name PRECISION 'ns'; ``` +## 数据类型 -在 TDengine 中,普通表的数据模型中可使用以下 10 种数据类型。 +在 TDengine 中,普通表的数据模型中可使用以下数据类型。 | # | **类型** | **Bytes** | **说明** | | --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18.0 版本开始,已经去除了这一时间范围限制)(从 2.1.5.0 版本开始支持纳秒精度) | -| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL | -| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用作 NULL | -| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | -| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | -| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | -| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用作 NULL | -| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用作 NULL | -| 9 | BOOL | 1 | 布尔型,{true, false} | -| 10 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 | -| 11 | JSON | | json 数据类型, 只有 tag 可以是 json 格式 | - -:::tip -TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。 +| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒,详细说明见上节。 | +| 2 | INT | 4 | 整型,范围 [-2^31, 2^31-1] | +| 3 | INT UNSIGNED| 4| 无符号整数,[0, 2^32-1] +| 4 | BIGINT | 8 | 长整型,范围 [-2^63, 2^63-1] | +| 5 | BIGINT UNSIGNED | 8 | 长整型,范围 [0, 2^64-1] | +| 6 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | +| 7 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | +| 8 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。 | +| 9 | SMALLINT | 2 | 短整型, 范围 [-32768, 32767] | +| 10 | SMALLINT UNSIGNED | 2| 无符号短整型,范围 [0, 655357] | +| 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] | +| 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] | +| 13 | BOOL | 1 | 布尔型,{true, false} | +| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 | +| 15 | JSON | | json 数据类型, 只有 tag 可以是 json 格式 | +| 16 | VARCHAR | 自定义 | BINARY类型的别名 | -::: :::note -虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。 +- TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。 +- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。 +- BINARY 类型理论上最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 +- SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。 ::: + +## 常量 +TDengine支持多个类型的常量,细节如下表: + +| # | **语法** | **类型** | **说明** | +| --- | :-------: | --------- | -------------------------------------- | +| 1 | [{+ \| -}]123 | BIGINT | 整型数值的字面量的类型均为BIGINT。如果用户输入超过了BIGINT的表示范围,TDengine 按BIGINT对数值进行截断。| +| 2 | 123.45 | DOUBLE | 浮点数值的字面量的类型均为DOUBLE。TDengine依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型。| +| 3 | 1.2E3 | DOUBLE | 科学计数法的字面量的类型为DOUBLE。| +| 4 | 'abc' | BINARY | 单引号括住的内容为字符串字面值,其类型为BINARY,BINARY的size为实际的字符个数。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 \'。| +| 5 | "abc" | BINARY | 双引号括住的内容为字符串字面值,其类型为BINARY,BINARY的size为实际的字符个数。对于字符串内的双引号,可以用转义字符反斜线加单引号来表示,即 \"。 | +| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | TIMESTAMP关键字表示后面的字符串字面量需要被解释为TIMESTAMP类型。字符串需要满足YYYY-MM-DD HH:mm:ss.MS格式,其时间分辨率为当前数据库的时间分辨率。 | +| 7 | {TRUE \| FALSE} | BOOL | 布尔类型字面量。 | +| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | 空值字面量。可以用于任意类型。| + :::note -SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。 +- TDengine依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999会认为超过长整型的上边界而溢出,而9999999999999999999.0会被认为是有效的浮点数。 ::: diff --git a/docs-cn/12-taos-sql/03-table.md b/docs-cn/12-taos-sql/03-table.md index 675c157b3def0d670f771f55b767f3ca4f2a28af..d7235f312933ec46ed427d5da7e2c5a229fa2926 100644 --- a/docs-cn/12-taos-sql/03-table.md +++ b/docs-cn/12-taos-sql/03-table.md @@ -12,7 +12,7 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam 1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键; 2. 表名最大长度为 192; -3. 表的每行长度不能超过 16k 个字符;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) +3. 表的每行长度不能超过 48KB;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) 4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写 5. 使用数据类型 binary 或 nchar,需指定其最长的字节数,如 binary(20),表示 20 字节; 6. 为了兼容支持更多形式的表名,TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 diff --git a/docs-cn/12-taos-sql/04-stable.md b/docs-cn/12-taos-sql/04-stable.md index a3c227317c85917b64b2477994d335710610ec70..3901427736e80bc8dd0dd87b454947af6e586561 100644 --- a/docs-cn/12-taos-sql/04-stable.md +++ b/docs-cn/12-taos-sql/04-stable.md @@ -86,7 +86,7 @@ ALTER STABLE stb_name MODIFY COLUMN field_name data_type(length); ALTER STABLE stb_name ADD TAG new_tag_name tag_type; ``` -为 STable 增加一个新的标签,并指定新标签的类型。标签总数不能超过 128 个,总长度不超过 16k 个字符。 +为 STable 增加一个新的标签,并指定新标签的类型。标签总数不能超过 128 个,总长度不超过 16KB 。 ### 删除标签 diff --git a/docs-cn/12-taos-sql/07-function.md b/docs-cn/12-taos-sql/07-function.md index f6e564419ddaa18931b0f0e0e4e7b5b3219a92f6..b924aad042a7dd8d3a81c01030ee587f485e8da4 100644 --- a/docs-cn/12-taos-sql/07-function.md +++ b/docs-cn/12-taos-sql/07-function.md @@ -261,6 +261,92 @@ taos> select hyperloglog(dbig) from shll; Query OK, 1 row(s) in set (0.008388s) ``` +### HISTOGRAM + +``` +SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause]; +``` + +**功能说明**:统计数据按照用户指定区间的分布。 + +**返回结果类型**:如归一化参数 normalized 设置为 1,返回结果为双精度浮点类型 DOUBLE,否则为长整形 INT64。 + +**应用字段**:数值型字段。 + +**支持的版本**:2.6.0.0 及以后的版本。 + +**适用于**: 表和超级表。 + +**说明**: +1. bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。 +2. bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): + - "user_input": "[1, 3, 5, 7]" + 用户指定 bin 的具体数值。 + + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" + "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点, + 生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。 + + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" + "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点, + 生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。 +3. normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。 + +**示例**: + +```mysql +taos> SELECT HISTOGRAM(voltage, "user_input", "[1,3,5,7]", 1) FROM meters; + histogram(voltage, "user_input", "[1,3,5,7]", 1) | + ======================================================= + {"lower_bin":1, "upper_bin":3, "count":0.333333} | + {"lower_bin":3, "upper_bin":5, "count":0.333333} | + {"lower_bin":5, "upper_bin":7, "count":0.333333} | + Query OK, 3 row(s) in set (0.004273s) + +taos> SELECT HISTOGRAM(voltage, 'linear_bin', '{"start": 1, "width": 3, "count": 3, "infinity": false}', 0) FROM meters; + histogram(voltage, 'linear_bin', '{"start": 1, "width": 3, " | + =================================================================== + {"lower_bin":1, "upper_bin":4, "count":3} | + {"lower_bin":4, "upper_bin":7, "count":3} | + {"lower_bin":7, "upper_bin":10, "count":3} | + Query OK, 3 row(s) in set (0.004887s) + +taos> SELECT HISTOGRAM(voltage, 'log_bin', '{"start": 1, "factor": 3, "count": 3, "infinity": true}', 0) FROM meters; + histogram(voltage, 'log_bin', '{"start": 1, "factor": 3, "count" | + =================================================================== + {"lower_bin":-inf, "upper_bin":1, "count":3} | + {"lower_bin":1, "upper_bin":3, "count":2} | + {"lower_bin":3, "upper_bin":9, "count":6} | + {"lower_bin":9, "upper_bin":27, "count":3} | + {"lower_bin":27, "upper_bin":inf, "count":1} | +``` + +### ELAPSED + +```mysql +SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; +``` + +**功能说明**:elapsed函数表达了统计周期内连续的时间长度,和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有INTERVAL子句,则返回整个给定时间范围内的有数据覆盖的时间范围。注意,ELAPSED返回的并不是时间范围的绝对值,而是绝对值除以time_unit所得到的单位个数。 + +**返回结果类型**:Double + +**应用字段**:Timestamp类型 + +**支持的版本**:2.6.0.0 及以后的版本。 + +**适用于**: 表,超级表,嵌套查询的外层查询 + +**说明**: +- field_name参数只能是表的第一列,即timestamp主键列。 +- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit参数未指定时,以数据库的时间分辨率为时间单位。 +- 可以和interval组合使用,返回每个时间窗口的时间戳差值。需要特别注意的是,除第一个时间窗口和最后一个时间窗口外,中间窗口的时间戳差值均为窗口长度。 +- order by asc/desc不影响差值的计算结果。 +- 对于超级表,需要和group by tbname子句组合使用,不可以直接使用。 +- 对于普通表,不支持和group by子句组合使用。 +- 对于嵌套查询,仅当内层查询会输出隐式时间戳列时有效。例如select elapsed(ts) from (select diff(value) from sub1)语句,diff函数会让内层查询输出隐式时间戳列,此为主键列,可以用于elapsed函数的第一个参数。相反,例如select elapsed(ts) from (select * from sub1) 语句,ts列输出到外层时已经没有了主键列的含义,无法使用elapsed函数。此外,elapsed函数作为一个与时间线强依赖的函数,形如select elapsed(ts) from (select diff(value) from st group by tbname)尽管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。 +- 不支持与leastsquares、diff、derivative、top、bottom、last_row、interp等函数混合使用。 + ## 选择函数 在使用所有的选择函数的时候,可以同时指定输出 ts 列或标签列(包括 tbname),这样就可以方便地知道被选出的值是源于哪个数据行的。 @@ -698,7 +784,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; ``` -**功能说明**:返回跳过最后 offset_value 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。 +**功能说明**:返回跳过最后 offset_val 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。 **参数范围**:k: [1,100] offset_val: [0,100]。 @@ -1378,35 +1464,6 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; - 该函数适用于内层查询和外层查询。 - 版本2.6.0.x后支持 -### 四则运算 - -``` -SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause]; -``` - -**功能说明**:统计表/超级表中某列或多列间的值加、减、乘、除、取余计算结果。 - -**返回数据类型**:双精度浮点数。 - -**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。 - -**适用于**:表、超级表。 - -**使用说明**: - -- 支持两列或多列之间进行计算,可使用括号控制计算优先级; -- NULL 字段不参与计算,如果参与计算的某行中包含 NULL,该行的计算结果为 NULL。 - -``` -taos> SELECT current + voltage * phase FROM d1001; -(current+(voltage*phase)) | -============================ - 78.190000713 | - 84.540003240 | - 80.810000718 | -Query OK, 3 row(s) in set (0.001046s) -``` - ### STATECOUNT ``` @@ -1766,6 +1823,8 @@ SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。 - 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。 +**支持的版本**:2.6.0.0 及以后的版本。 + **示例**: ```sql diff --git a/docs-cn/12-taos-sql/09-limit.md b/docs-cn/12-taos-sql/09-limit.md index 3c86a3862174377e6a00d046fb69627c773fe76e..7673e24a83cc1ba5335b11f29803cf9f3eae26e5 100644 --- a/docs-cn/12-taos-sql/09-limit.md +++ b/docs-cn/12-taos-sql/09-limit.md @@ -7,9 +7,9 @@ title: 边界限制 - 数据库名最大长度为 32。 - 表名最大长度为 192,不包括数据库名前缀和分隔符 -- 每行数据最大长度 16k 个字符, 从 2.1.7.0 版本开始,每行数据最大长度 48k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 +- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 - 列名最大长度为 64,最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。注:从 2.1.7.0 版本(不含)以前最多允许 4096 列 -- 标签名最大长度为 64,最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16k 个字符。 +- 标签名最大长度为 64,最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB 。 - SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576。 - SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。注: 2.1.7.0 版本(不含)之前为最多允许 1024 列 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。 diff --git a/docs-cn/12-taos-sql/12-keywords/index.md b/docs-cn/12-taos-sql/12-keywords.md similarity index 71% rename from docs-cn/12-taos-sql/12-keywords/index.md rename to docs-cn/12-taos-sql/12-keywords.md index 608d4e080967cfd97072706cf0963ae669960be6..0e8e1edfee4a4aa3f05ef7bfd99ca156e44afd2e 100644 --- a/docs-cn/12-taos-sql/12-keywords/index.md +++ b/docs-cn/12-taos-sql/12-keywords.md @@ -23,17 +23,17 @@ title: TDengine 参数限制与保留关键字 去掉了 `` ‘“`\ `` (单双引号、撇号、反斜杠、空格) - 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符 -- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符 -- 表的列名:不能包含特殊字符,不能超过 64 个字符 +- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字节 ,每行数据最大长度 48KB +- 表的列名:不能包含特殊字符,不能超过 64 个字节 - 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线” - 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列) -- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置) -- 单条 SQL 语句默认最大字符串长度:1048576 byte,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 byte +- 记录的最大长度:包括时间戳 8 字节,不能超过 48KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 字节 的存储位置) +- 单条 SQL 语句默认最大字符串长度:1048576 字节,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 字节 - 数据库副本数:不能超过 3 -- 用户名:不能超过 23 个 byte -- 用户密码:不能超过 15 个 byte +- 用户名:不能超过 23 个 字节 +- 用户密码:不能超过 15 个 字节 - 标签(Tags)数量:不能超过 128 个,可以 0 个 -- 标签的总长度:不能超过 16K byte +- 标签的总长度:不能超过 16KB - 记录条数:仅受存储空间限制 - 表的个数:仅受节点个数限制 - 库的个数:仅受节点个数限制 @@ -85,3 +85,44 @@ title: TDengine 参数限制与保留关键字 | CONNECTIONS | HAVING | NOT | SOFFSET | VNODES | | CONNS | ID | NOTNULL | STABLE | WAL | | COPY | IF | NOW | STABLES | WHERE | +| _C0 | _QSTART | _QSTOP | _QDURATION | _WSTART | +| _WSTOP | _WDURATION | _ROWTS | + +## 特殊说明 +### TBNAME +`TBNAME` 可以视为超级表中一个特殊的标签,代表子表的表名。 + +获取一个超级表所有的子表名及相关的标签信息: +```mysql +SELECT TBNAME, location FROM meters; + +统计超级表下辖子表数量: +```mysql +SELECT COUNT(TBNAME) FROM meters; +``` + +以上两个查询均只支持在WHERE条件子句中添加针对标签(TAGS)的过滤条件。例如: +```mysql +taos> SELECT TBNAME, location FROM meters; + tbname | location | +================================================================== + d1004 | California.SanFrancisco | + d1003 | California.SanFrancisco | + d1002 | California.LosAngeles | + d1001 | California.LosAngeles | +Query OK, 4 row(s) in set (0.000881s) + +taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; + count(tbname) | +======================== + 2 | +Query OK, 1 row(s) in set (0.001091s) +``` +### _QSTART/_QSTOP/_QDURATION +表示查询过滤窗口的起始,结束以及持续时间。 + +### _WSTART/_WSTOP/_WDURATION +窗口切分聚合查询(例如 interval/session window/state window)中表示每个切分窗口的起始,结束以及持续时间。 + +### _c0/_ROWTS +_c0 _ROWTS 等价,表示表或超级表的第一列 diff --git a/docs-cn/12-taos-sql/12-keywords/_category_.yml b/docs-cn/12-taos-sql/12-keywords/_category_.yml deleted file mode 100644 index 67738650a4564477f017542aea81767b3de72922..0000000000000000000000000000000000000000 --- a/docs-cn/12-taos-sql/12-keywords/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: 参数限制与保留关键字 \ No newline at end of file diff --git a/docs-cn/12-taos-sql/13-operators.md b/docs-cn/12-taos-sql/13-operators.md new file mode 100644 index 0000000000000000000000000000000000000000..1ffc823044fe8ebbded98265544de39ea14e706e --- /dev/null +++ b/docs-cn/12-taos-sql/13-operators.md @@ -0,0 +1,66 @@ +--- +sidebar_label: 运算符 +title: 运算符 +--- + +## 算术运算符 + +| # | **运算符** | **支持的类型** | **说明** | +| --- | :--------: | -------------- | -------------------------- | +| 1 | +, - | 数值类型 | 表达正数和负数,一元运算符 | +| 2 | +, - | 数值类型 | 表示加法和减法,二元运算符 | +| 3 | \*, / | 数值类型 | 表示乘法和除法,二元运算符 | +| 4 | % | 数值类型 | 表示取余运算,二元运算符 | + +## 位运算符 + +| # | **运算符** | **支持的类型** | **说明** | +| --- | :--------: | -------------- | ------------------ | +| 1 | & | 数值类型 | 按位与,二元运算符 | +| 2 | \| | 数值类型 | 按位或,二元运算符 | + +## JSON 运算符 + +`->` 运算符可以对 JSON 类型的列按键取值。`->` 左侧是列标识符,右侧是键的字符串常量,如 `col->'name'`,返回键 `'name'` 的值。 + +## 集合运算符 + +集合运算符将两个查询的结果合并为一个结果。包含集合运算符的查询称之为复合查询。复合查询中每条查询的选择列表中的相应表达式在数量上必须匹配,且结果类型以第一条查询为准,后续查询的结果类型必须可转换到第一条查询的结果类型,转换规则同 CAST 函数。 + +TDengine 支持 `UNION ALL` 和 `UNION` 操作符。UNION ALL 将查询返回的结果集合并返回,并不去重。UNION 将查询返回的结果集合并并去重后返回。在同一个 SQL 语句中,集合操作符最多支持 100 个。 + +## 比较运算符 + +| # | **运算符** | **支持的类型** | **说明** | +| --- | :---------------: | -------------------------------------------------------------------- | -------------------- | +| 1 | = | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 相等 | +| 2 | <\>, != | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型,且不可以为表的时间戳主键列 | 不相等 | +| 3 | \>, \< | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 大于,小于 | +| 4 | \>=, \<= | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 大于等于,小于等于 | +| 5 | IS [NOT] NULL | 所有类型 | 是否为空值 | +| 6 | [NOT] BETWEEN AND | 除 BOOL、BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 闭区间比较 | +| 7 | IN | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型,且不可以为表的时间戳主键列 | 与列表内的任意值相等 | +| 8 | LIKE | BINARY、NCHAR 和 VARCHAR | 通配符匹配 | +| 9 | MATCH, NMATCH | BINARY、NCHAR 和 VARCHAR | 正则表达式匹配 | +| 10 | CONTAINS | JSON | JSON 中是否存在某键 | + +LIKE 条件使用通配符字符串进行匹配检查,规则如下: + +- '%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意 ASCII 字符。 +- 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 \_,即加一个反斜线来进行转义。 +- 通配符字符串最长不能超过 100 字节。不建议使用太长的通配符字符串,否则将有可能严重影响 LIKE 操作的执行性能。 + +MATCH 条件和 NMATCH 条件使用正则表达式进行匹配,规则如下: + +- 支持符合 POSIX 规范的正则表达式,具体规范内容可参见 Regular Expressions。 +- 只能针对子表名(即 tbname)、字符串类型的标签值进行正则表达式过滤,不支持普通列的过滤。 +- 正则匹配字符串长度不能超过 128 字节。可以通过参数 maxRegexStringLen 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启客户端才能生效 + +## 逻辑运算符 + +| # | **运算符** | **支持的类型** | **说明** | +| --- | :--------: | -------------- | --------------------------------------------------------------------------- | +| 1 | AND | BOOL | 逻辑与,如果两个条件均为 TRUE, 则返回 TRUE。如果任一为 FALSE,则返回 FALSE | +| 2 | OR | BOOL | 逻辑或,如果任一条件为 TRUE, 则返回 TRUE。如果两者都是 FALSE,则返回 FALSE | + +TDengine 在计算逻辑条件时,会进行短路径优化,即对于 AND,第一个条件为 FALSE,则不再计算第二个条件,直接返回 FALSE;对于 OR,第一个条件为 TRUE,则不再计算第二个条件,直接返回 TRUE。 diff --git a/docs-cn/14-reference/03-connector/node.mdx b/docs-cn/14-reference/03-connector/node.mdx index 12345fa9fe995c41828df07703f0efb61a2e029d..9f2bed9e97cb33aeabfce3d69dc3774931b426c0 100644 --- a/docs-cn/14-reference/03-connector/node.mdx +++ b/docs-cn/14-reference/03-connector/node.mdx @@ -14,7 +14,6 @@ import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; -import NodeAsyncQuery from "../../07-develop/04-query-data/_js_async.mdx"; `td2.0-connector` 和 `td2.0-rest-connector` 是 TDengine 的官方 Node.js 语言连接器。Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。 @@ -189,14 +188,8 @@ let cursor = conn.cursor(); ### 查询数据 -#### 同步查询 - -#### 异步查询 - - - ## 更多示例程序 | 示例程序 | 示例程序描述 | diff --git a/docs-cn/14-reference/06-taosdump.md b/docs-cn/14-reference/06-taosdump.md index 7131493ec9439225d8047288ed86026c887f0aac..3a9f2e9acd215be102991a1d91fba285ef6315bb 100644 --- a/docs-cn/14-reference/06-taosdump.md +++ b/docs-cn/14-reference/06-taosdump.md @@ -38,7 +38,7 @@ taosdump 有两种安装方式: :::tip - taosdump 1.4.1 之后的版本提供 `-I` 参数,用于解析 avro 文件 schema 和数据,如果指定 `-s` 参数将只解析 schema。 -- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数挑战为更小的值进行尝试。 +- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数调整为更小的值进行尝试。 ::: diff --git a/docs-cn/14-reference/13-schemaless/13-schemaless.md b/docs-cn/14-reference/13-schemaless/13-schemaless.md index 4de310c248d7763690acef80cdca1c50f609d63b..f2712f2814593bddd65401cb129c8c58ee55a316 100644 --- a/docs-cn/14-reference/13-schemaless/13-schemaless.md +++ b/docs-cn/14-reference/13-schemaless/13-schemaless.md @@ -82,7 +82,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 :::tip 无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 -16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit) +48KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit) ::: diff --git a/docs-cn/20-third-party/11-kafka.md b/docs-cn/20-third-party/11-kafka.md index 0de5b43a396fa3bb4aba558308f95cb0d6f96bc5..8369806adcfe1b195348e7d60160609cde9150e8 100644 --- a/docs-cn/20-third-party/11-kafka.md +++ b/docs-cn/20-third-party/11-kafka.md @@ -7,7 +7,7 @@ TDengine Kafka Connector 包含两个插件: TDengine Source Connector 和 TDeng ## 什么是 Kafka Connect? -Kafka Connect 是 Apache Kafka 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka Broker,Source Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。 +Kafka Connect 是 [Apache Kafka](https://kafka.apache.org/) 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka Broker,Source Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。 ![TDengine Database Kafka Connector -- Kafka Connect structure](kafka/Kafka_Connect.webp) @@ -17,7 +17,7 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送 ## 什么是 Confluent? -Confluent 在 Kafka 的基础上增加很多扩展功能。包括: +[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括: 1. Schema Registry 2. REST 代理 @@ -81,10 +81,10 @@ Development: false git clone https://github.com:taosdata/kafka-connect-tdengine.git cd kafka-connect-tdengine mvn clean package -unzip -d $CONFLUENT_HOME/share/confluent-hub-components/ target/components/packages/taosdata-kafka-connect-tdengine-0.1.0.zip +unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip ``` -以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。安装插件的路径在配置文件 `$CONFLUENT_HOME/etc/kafka/connect-standalone.properties` 中。默认的路径为 `$CONFLUENT_HOME/share/confluent-hub-components/`。 +以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。 ### 用 confluent-hub 安装 @@ -98,7 +98,7 @@ confluent local services start ``` :::note -一定要先安装插件再启动 Confluent, 否则会出现找不到类的错误。Kafka Connect 的日志(默认路径: /tmp/confluent.xxxx/connect/logs/connect.log)中会输出成功安装的插件,据此可判断插件是否安装成功。 +一定要先安装插件再启动 Confluent, 否则加载插件会失败。 ::: :::tip @@ -125,6 +125,61 @@ Control Center is [UP] 清空数据可执行 `rm -rf /tmp/confluent.106668`。 ::: +### 验证各个组件是否启动成功 + +输入命令: + +``` +confluent local services status +``` + +如果各组件都启动成功,会得到如下输出: + +``` +Connect is [UP] +Control Center is [UP] +Kafka is [UP] +Kafka REST is [UP] +ksqlDB Server is [UP] +Schema Registry is [UP] +ZooKeeper is [UP] +``` + +### 验证插件是否安装成功 + +在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件: + +``` +confluent local services connect plugin list +``` + +如果成功安装,会输出如下: + +```txt {4,9} +Available Connect Plugins: +[ + { + "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "type": "sink", + "version": "1.0.0" + }, + { + "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector", + "type": "source", + "version": "1.0.0" + }, +...... +``` + +如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径: +``` +echo `cat /tmp/confluent.current`/connect/connect.stdout +``` +该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`。 + +与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path`, 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。 + + ## TDengine Sink Connector 的使用 TDengine Sink Connector 的作用是同步指定 topic 的数据到 TDengine。用户无需提前创建数据库和超级表。可手动指定目标数据库的名字(见配置参数 connection.database), 也可按一定规则生成(见配置参数 connection.database.prefix)。 @@ -144,7 +199,7 @@ vi sink-demo.properties sink-demo.properties 内容如下: ```ini title="sink-demo.properties" -name=tdengine-sink-demo +name=TDengineSinkConnector connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector tasks.max=1 topics=meters @@ -153,6 +208,7 @@ connection.user=root connection.password=taosdata connection.database=power db.schemaless=line +data.precision=ns key.converter=org.apache.kafka.connect.storage.StringConverter value.converter=org.apache.kafka.connect.storage.StringConverter ``` @@ -179,6 +235,7 @@ confluent local services connect connector load TDengineSinkConnector --config . "connection.url": "jdbc:TAOS://127.0.0.1:6030", "connection.user": "root", "connector.class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "data.precision": "ns", "db.schemaless": "line", "key.converter": "org.apache.kafka.connect.storage.StringConverter", "tasks.max": "1", @@ -223,10 +280,10 @@ Database changed. taos> select * from meters; ts | current | voltage | phase | groupid | location | =============================================================================================================================================================== - 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | - 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | - 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles | - 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles | + 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles | Query OK, 4 row(s) in set (0.004208s) ``` @@ -356,21 +413,33 @@ confluent local services connect connector unload TDengineSourceConnector 2. `connection.database.prefix`: 当 connection.database 为 null 时, 目标数据库的前缀。可以包含占位符 '${topic}'。 比如 kafka_${topic}, 对于主题 'orders' 将写入数据库 'kafka_orders'。 默认 null。当为 null 时,目标数据库的名字和主题的名字是一致的。 3. `batch.size`: 分批写入每批记录数。当 Sink Connector 一次接收到的数据大于这个值时将分批写入。 4. `max.retries`: 发生错误时的最大重试次数。默认为 1。 -5. `retry.backoff.ms`: 发送错误时重试的时间间隔。单位毫秒,默认 3000。 -6. `db.schemaless`: 数据格式,必须指定为: line、json、telnet 中的一个。分别代表 InfluxDB 行协议格式、 OpenTSDB JSON 格式、 OpenTSDB Telnet 行协议格式。 +5. `retry.backoff.ms`: 发送错误时重试的时间间隔。单位毫秒,默认为 3000。 +6. `db.schemaless`: 数据格式,可选值为: + 1. line :代表 InfluxDB 行协议格式 + 2. json : 代表 OpenTSDB JSON 格式 + 3. telnet :代表 OpenTSDB Telnet 行协议格式 +7. `data.precision`: 使用 InfluxDB 行协议格式时,时间戳的精度。可选值为: + 1. ms : 表示毫秒 + 2. us : 表示微秒 + 3. ns : 表示纳秒。默认为纳秒。 ### TDengine Source Connector 特有的配置 1. `connection.database`: 源数据库名称,无缺省值。 2. `topic.prefix`: 数据导入 kafka 后 topic 名称前缀。 使用 `topic.prefix` + `connection.database` 名称作为完整 topic 名。默认为空字符串 ""。 -3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认 "1970-01-01 00:00:00"。 -4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认 1000。 +3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认为 "1970-01-01 00:00:00"。 +4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认为 1000。 5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。 -6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认 line。 +6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认为 line。 + +## 其他说明 + +1. 插件的安装位置可以自定义,请参考官方文档:https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。 +2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。 ## 问题反馈 -https://github.com/taosdata/kafka-connect-tdengine/issues +无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues。 ## 参考 diff --git a/docs-cn/27-train-faq/01-faq.md b/docs-cn/27-train-faq/01-faq.md index a657a95e8d0087eb50265adb86fb34f04d43d501..f298d7e14dec682b58a76ce1d7f1c10970ab2738 100644 --- a/docs-cn/27-train-faq/01-faq.md +++ b/docs-cn/27-train-faq/01-faq.md @@ -33,15 +33,15 @@ title: 常见问题及反馈 ### 2. Windows 平台下 JDBCDriver 找不到动态链接库,怎么办? -请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。 +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。 ### 3. 创建数据表时提示 more dnodes are needed -请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。 +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。 ### 4. 如何让 TDengine crash 时生成 core 文件? -请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。 +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。 ### 5. 遇到错误“Unable to establish connection” 怎么办? @@ -128,19 +128,30 @@ properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8"); Connection = DriverManager.getConnection(url, properties); ``` -### 13.JDBC 报错: the executed SQL is not a DML or a DDL? +### 13. Windows 系统下客户端无法正常显示中文字符? + +Windows 系统中一般是采用 GBK/GB18030 存储中文字符,而 TDengine 的默认字符集为 UTF-8 ,在 Windows 系统中使用 TDengine 客户端时,客户端驱动会将字符统一转换为 UTF-8 编码后发送到服务端存储,因此在应用开发过程中,调用接口时正确配置当前的中文字符集即可。 + +【 v2.2.1.5以后版本 】在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置: + +``` +locale C +charset UTF-8 +``` + +### 14. JDBC 报错: the executed SQL is not a DML or a DDL? 请更新至最新的 JDBC 驱动,参考 [Java 连接器](/reference/connector/java) -### 14. taos connect failed, reason: invalid timestamp +### 15. taos connect failed, reason: invalid timestamp 常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。 -### 15. 表名显示不全 +### 16. 表名显示不全 由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。 -### 16. 如何进行数据迁移? +### 17. 如何进行数据迁移? TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机器 A 移动机器 B 时,注意如下两件事: @@ -148,7 +159,7 @@ TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机 - 2.0.7.0 及以后的版本,到/var/lib/taos/dnode 下,修复 dnodeEps.json 的 dnodeId 对应的 FQDN,重启。确保机器内所有机器的此文件是完全相同的。 - 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 -### 17. 如何在命令行程序 taos 中临时调整日志级别 +### 18. 如何在命令行程序 taos 中临时调整日志级别 为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令: @@ -169,7 +180,7 @@ ALTER LOCAL RESETLOG; -### 18. go 语言编写组件编译失败怎样解决? +### 19. go 语言编写组件编译失败怎样解决? TDengine 2.3.0.0 及之后的版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。 使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 @@ -184,7 +195,7 @@ go env -w GOPROXY=https://goproxy.cn,direct 如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用 `cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。 -### 19. 如何查询数据占用的存储空间大小? +### 20. 如何查询数据占用的存储空间大小? 默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。 @@ -193,3 +204,38 @@ go env -w GOPROXY=https://goproxy.cn,direct 若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。 若仅仅想查看指定(超级)表的数据块分布及大小,可查看[_block_dist 函数](https://docs.taosdata.com/taos-sql/select/#_block_dist-%E5%87%BD%E6%95%B0) + +### 21. 客户端连接串如何保证高可用? + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html) + +### 22. 时间戳的时区信息是怎样处理的? + +TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。 + +客户端在处理时间戳字符串时,会采取如下逻辑: + +1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。 +2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。 +3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。 +4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。 + +### 23. TDengine 2.0 都会用到哪些网络端口? + +使用到的网络端口请看文档:[serverport](/reference/config/#serverport) + +需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。 + +### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功?? + +taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。 + +需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。 + +有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/) + +### 25. 发生了 OOM 怎么办? + +OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。 + +TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 maxVgroupsPerDb 影响,每个 VNode 占用的内存大小受 Blocks 和 Cache 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。 diff --git a/docs-en/05-get-started/_pkg_install.mdx b/docs-en/05-get-started/_pkg_install.mdx index af04d2b70bda7575e57cc49a5aa60f19689113e6..cf10497c96ba1d777e45340b0312d97c127b6fcb 100644 --- a/docs-en/05-get-started/_pkg_install.mdx +++ b/docs-en/05-get-started/_pkg_install.mdx @@ -12,6 +12,6 @@ Between two major release versions, some beta versions may be delivered for user For the details please refer to [Install and Uninstall](/operation/pkg-install)。 -To see the details of versions, please refer to [Download List](https://www.taosdata.com/all-downloads) and [Release Notes](https://github.com/taosdata/TDengine/releases). +To see the details of versions, please refer to [Download List](https://tdengine.com/all-downloads) and [Release Notes](https://github.com/taosdata/TDengine/releases). diff --git a/docs-en/07-develop/01-connect/index.md b/docs-en/07-develop/01-connect/index.md index 21b2149f4451e8e5d388a41f1a0a06b6adc00a96..b9217b828d0d08c4ff1eacd27406d4e3bfba8eac 100644 --- a/docs-en/07-develop/01-connect/index.md +++ b/docs-en/07-develop/01-connect/index.md @@ -1,6 +1,6 @@ --- -sidebar_label: Connection -title: Connect to TDengine +sidebar_label: Connect +title: Connect description: "This document explains how to establish connections to TDengine, and briefly introduces how to install and use TDengine connectors." --- diff --git a/docs-en/07-develop/03-insert-data/01-sql-writing.mdx b/docs-en/07-develop/03-insert-data/01-sql-writing.mdx index ae170a2bef3496c49026e05d7d60399cc88e90a7..397b1a14fd76c1372c79eb88575f2bf21cb62050 100644 --- a/docs-en/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs-en/07-develop/03-insert-data/01-sql-writing.mdx @@ -1,5 +1,5 @@ --- -sidebar_label: SQL +sidebar_label: Insert Using SQL title: Insert Using SQL --- @@ -52,7 +52,7 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). :::info -- Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 16K bytes and each SQL statement can't exceed 1MB. +- Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 48K bytes and each SQL statement can't exceed 1MB. - Inserting with multiple threads can also improve performance. However, depending on the system resources on the application side and the server side, when the number of inserting threads grows beyond a specific point the performance may drop instead of improving. The proper number of threads needs to be tested in a specific environment to find the best number. ::: diff --git a/docs-en/07-develop/03-insert-data/index.md b/docs-en/07-develop/03-insert-data/index.md index ba31a951ff0805b48f90c87ddc635c04978d3cd2..1a71e719a56448e4b535632e570ce8a04d2282bb 100644 --- a/docs-en/07-develop/03-insert-data/index.md +++ b/docs-en/07-develop/03-insert-data/index.md @@ -1,5 +1,5 @@ --- -title: Insert +title: Insert Data --- TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, and OpenTSDB JSON protocol. Data can be inserted row by row, or in batches. Data from one or more collection points can be inserted simultaneously. Data can be inserted with multiple threads, and out of order data and historical data can be inserted as well. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create STables and tables in advance if using schemaless protocols, and the schemas can be adjusted automatically based on the data being inserted. diff --git a/docs-en/07-develop/04-query-data/_category_.yml b/docs-en/07-develop/04-query-data/_category_.yml index 5912a48fc31ed36235c0d34d8b0909bf3b518aaa..809db34621a63505ceace7ba182e07c698bdbddb 100644 --- a/docs-en/07-develop/04-query-data/_category_.yml +++ b/docs-en/07-develop/04-query-data/_category_.yml @@ -1 +1 @@ -label: Select Data +label: Query Data diff --git a/docs-en/07-develop/04-query-data/index.mdx b/docs-en/07-develop/04-query-data/index.mdx index 74562c88232afc2f41fdbe5d4c34d582b0b141bd..a212fa9529215fc24c55c95a166cfc1a407359b2 100644 --- a/docs-en/07-develop/04-query-data/index.mdx +++ b/docs-en/07-develop/04-query-data/index.mdx @@ -1,6 +1,6 @@ --- -Sidebar_label: Select -title: Select +Sidebar_label: Query data +title: Query data description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors." --- diff --git a/docs-en/07-develop/06-subscribe.mdx b/docs-en/07-develop/06-subscribe.mdx index 474841ff8932216d327f39a4f0cb39ba26e6615b..782fcdbaf221419dd231bd10958e26b8f4f856e5 100644 --- a/docs-en/07-develop/06-subscribe.mdx +++ b/docs-en/07-develop/06-subscribe.mdx @@ -1,5 +1,5 @@ --- -sidebar_label: Subscription +sidebar_label: Data Subscription description: "Lightweight service for data subscription and publishing. Time series data inserted into TDengine continuously can be pushed automatically to subscribing clients." title: Data Subscription --- @@ -151,7 +151,7 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { taos_unsubscribe(tsub, keep); ``` -The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription, the subscription will be restarted from the beginning if the corresponding progress file is removed. +The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription(Note: The default value of `DataDir` in the `taos.cfg` file is **/var/lib/taos/**. However, **/var/lib/taos/** does not exist on the Windows server. So you need to change the `DataDir` value to the corresponding existing directory."), the subscription will be restarted from the beginning if the corresponding progress file is removed. Now let's see the effect of the above sample code, assuming below prerequisites have been done. diff --git a/docs-en/07-develop/08-udf.md b/docs-en/07-develop/08-udf.md index 0ee61740cc8b8aad7dd39707a1153b022822f0a9..49bc95bd91a4c31d42d2b21ef05d69225f1bd963 100644 --- a/docs-en/07-develop/08-udf.md +++ b/docs-en/07-develop/08-udf.md @@ -1,6 +1,6 @@ --- sidebar_label: UDF -title: User Defined Functions +title: User Defined Functions(UDF) description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand query capability" --- diff --git a/docs-en/12-taos-sql/01-data-type.md b/docs-en/12-taos-sql/01-data-type.md index 3f5a49e3135771c6c1e62bcf158a99ee30f1ed9d..d038219c8ac66db52416001f7a79c71018e2ca33 100644 --- a/docs-en/12-taos-sql/01-data-type.md +++ b/docs-en/12-taos-sql/01-data-type.md @@ -3,6 +3,8 @@ title: Data Types description: "TDengine supports a variety of data types including timestamp, float, JSON and many others." --- +## TIMESTAMP + When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below: - The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128` @@ -17,33 +19,51 @@ Time precision in TDengine can be set by the `PRECISION` parameter when executin CREATE DATABASE db_name PRECISION 'ns'; ``` +## Data Types + In TDengine, the data types below can be used when specifying a column or tag. | # | **type** | **Bytes** | **Description** | | --- | :-------: | --------- | ------------------------- | | 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported | -| 2 | INT | 4 | Integer, the value range is [-2^31+1, 2^31-1], while -2^31 is treated as NULL | -| 3 | BIGINT | 8 | Long integer, the value range is [-2^63+1, 2^63-1], while -2^63 is treated as NULL | -| 4 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] | -| 5 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] | -| 6 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. The string length can be up to 16374 bytes. The string value must be quoted with single quotes. The literal single quote inside the string must be preceded with back slash like `\'` | -| 7 | SMALLINT | 2 | Short integer, the value range is [-32767, 32767], while -32768 is treated as NULL | -| 8 | TINYINT | 1 | Single-byte integer, the value range is [-127, 127], while -128 is treated as NULL | -| 9 | BOOL | 1 | Bool, the value range is {true, false} | -| 10 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | -| 11 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type | - -:::tip -TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes. - -::: +| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1] | +| 3 |INT UNSIGNED|4 | Unsigned integer, the value range is [0, 2^31-1] | +| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1] | +| 5 | BIGINT UNSIGNED | 8 | Unsigned long integer, the value range is [0, 2^63-1] | +| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] | +| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] | +| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. The string length can be up to 16374 bytes. The string value must be quoted with single quotes. The literal single quote inside the string must be preceded with back slash like `\'` | +| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767] | +| 10 | SMALLINT UNSIGNED | 2 | Unsigned short integer, the value range is [0, 32767] | +| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127] | +| 12 | TINYINT UNSIGNED | 1 | Unsigned single-byte integer, the value range is [0, 127] | +| 13 | BOOL | 1 | Bool, the value range is {true, false} | +| 14 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | +| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type | +| 16 | VARCHAR | User Defined| Alias of BINARY type | :::note -Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type. +- TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes. +- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type. +- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. ::: +## Constants +TDengine supports constants of multiple data type. + +| # | **Syntax** | **Type** | **Description** | +| --- | :-------: | --------- | -------------------------------------- | +| 1 | [{+ \| -}]123 | BIGINT | Numeric constants are treated as BIGINT type. The value will be truncated if it exceeds the range of BIGINT type. | +| 2 | 123.45 | DOUBLE | Floating number constants are treated as DOUBLE type. TDengine determines whether it's a floating number based on if decimal point or scientific notation is used. | +| 3 | 1.2E3 | DOUBLE | Constants in scientific notation are treated ad DOUBLE type. | +| 4 | 'abc' | BINARY | String constants enclosed by single quotes are treated as BINARY type. Its size is determined as the acutal length. Single quote itself can be included by preceding backslash, i.e. `\'`, in a string constant. | +| 5 | "abc" | BINARY | String constants enclosed by double quotes are treated as BINARY type. Its size is determined as the acutal length. Double quote itself can be included by preceding backslash, i.e. `\"`, in a string constant. | +| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | A string constant following `TIMESTAMP` keyword is treated as TIMESTAMP type. The string should be in the format of "YYYY-MM-DD HH:mm:ss.MS". Its time precision is same as that of the current database being used. | +| 7 | {TRUE \| FALSE} | BOOL | BOOL type contant. | +| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | NULL constant, it can be used for any type.| + :::note -Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. +- TDengine determines whether it's a floating number based on if decimal point or scientific notation is used. So whether the value is determined as overflow depends on both the value and the determined type. For example, 9999999999999999999 is determined as overflow because it exceeds the upper limit of BIGINT type, while 9999999999999999999.0 is considered as a valid floating number because it is within the range of DOUBLE type. ::: diff --git a/docs-en/12-taos-sql/03-table.md b/docs-en/12-taos-sql/03-table.md index 0505787ff8cc597eafd8299292ebac3e0fd3d4ad..f065a8e2396583bb7a512446b513ed60056ad55e 100644 --- a/docs-en/12-taos-sql/03-table.md +++ b/docs-en/12-taos-sql/03-table.md @@ -14,7 +14,7 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam 1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key. 2. The maximum length of the table name is 192 bytes. -3. The maximum length of each row is 16k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted. +3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted. 4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive. 5. The maximum length in bytes must be specified when using BINARY or NCHAR types. 6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. Only ASCII visible characters can be used with escape character. diff --git a/docs-en/12-taos-sql/07-function.md b/docs-en/12-taos-sql/07-function.md index 0d6e7f25649872f514dce21bcba38a3af4ba7a5d..1a0dc28fa048c6c6d9a911a1e6719cf370592fdf 100644 --- a/docs-en/12-taos-sql/07-function.md +++ b/docs-en/12-taos-sql/07-function.md @@ -22,8 +22,8 @@ SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]; **More explanation**: -- Wildcard (\*) can be used to represent all columns, it's used to get the number of all rows -- The number of non-NULL values will be returned if this function is used on a specific column +- Wildcard (\*) is used to represent all columns. The `COUNT` function is used to get the total number of all rows. +- The number of non-NULL values will be returned if this function is used on a specific column. **Examples**: @@ -87,7 +87,7 @@ SELECT TWA(field_name) FROM tb_name WHERE clause; **More explanations**: -- From version 2.1.3.0, function TWA can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. +- Since version 2.1.3.0, function TWA can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. ### IRATE @@ -105,7 +105,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause; **More explanations**: -- From version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. +- Since version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. ### SUM @@ -149,7 +149,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause]; **Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**: table, STable (starting from version 2.0.15.1) +**Applicable table types**: table, STable (since version 2.0.15.1) **Examples**: @@ -193,13 +193,13 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause]; **Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence. It can't be used on timestamp column or tags. -**Return value type**:Same as the data type of the column being operated +**Return value type**:Same as the data type of the column being operated upon **Applicable column types**:Data types except for timestamp **More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned. -**Applicable version**:From version 2.6.0.0 +**Applicable version**:Since version 2.6.0.0 **Examples**: @@ -234,7 +234,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause]; **More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case. -**Applicable versions**:From version 2.6.0.0 +**Applicable versions**:Since version 2.6.0.0 **Examples**: @@ -259,6 +259,100 @@ taos> select hyperloglog(dbig) from shll; Query OK, 1 row(s) in set (0.008388s) ``` +### HISTOGRAM + +``` +SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause]; +``` + +**Description**:Returns count of data points in user-specified ranges. + +**Return value type**:Double or INT64, depends on normalized parameter settings. + +**Applicable column type**:Numerical types. + +**Applicable versions**:Since version 2.6.0.0. + +**Applicable table types**: table, STable + +**Explanations**: + +1. bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。 +2. bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively: + + - "user_input": "[1, 3, 5, 7]": User specified bin values. + + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" + "start" - bin starting point. + "width" - bin offset. + "count" - number of bins generated. + "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins. + The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]. + + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" + "start" - bin starting point. + "factor" - exponential factor of bin offset. + "count" - number of bins generated. + "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins. + The above "log_bin" descriptor generates a set of bins:[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]. + +3. normalized: setting to 1/0 to turn on/off result normalization. + +**Example**: + +```mysql +taos> SELECT HISTOGRAM(voltage, "user_input", "[1,3,5,7]", 1) FROM meters; + histogram(voltage, "user_input", "[1,3,5,7]", 1) | + ======================================================= + {"lower_bin":1, "upper_bin":3, "count":0.333333} | + {"lower_bin":3, "upper_bin":5, "count":0.333333} | + {"lower_bin":5, "upper_bin":7, "count":0.333333} | + Query OK, 3 row(s) in set (0.004273s) + +taos> SELECT HISTOGRAM(voltage, 'linear_bin', '{"start": 1, "width": 3, "count": 3, "infinity": false}', 0) FROM meters; + histogram(voltage, 'linear_bin', '{"start": 1, "width": 3, " | + =================================================================== + {"lower_bin":1, "upper_bin":4, "count":3} | + {"lower_bin":4, "upper_bin":7, "count":3} | + {"lower_bin":7, "upper_bin":10, "count":3} | + Query OK, 3 row(s) in set (0.004887s) + +taos> SELECT HISTOGRAM(voltage, 'log_bin', '{"start": 1, "factor": 3, "count": 3, "infinity": true}', 0) FROM meters; + histogram(voltage, 'log_bin', '{"start": 1, "factor": 3, "count" | + =================================================================== + {"lower_bin":-inf, "upper_bin":1, "count":3} | + {"lower_bin":1, "upper_bin":3, "count":2} | + {"lower_bin":3, "upper_bin":9, "count":6} | + {"lower_bin":9, "upper_bin":27, "count":3} | + {"lower_bin":27, "upper_bin":inf, "count":1} | +``` + +### ELAPSED + +```mysql +SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; +``` + +**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. + +**Return value type**:Double + +**Applicable Column type**:Timestamp + +**Applicable versions**:Sicne version 2.6.0.0 + +**Applicable tables**: table, STable, outter in nested query + +**Explanations**: +- `field_name` parameter can only be the first column of a table, i.e. timestamp primary key. +- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default ime unit. +- It can be used with `INTERVAL` to get the time valid time length of each time window. Please be noted that the return value is same as the time window for all time windows except for the first and the last time window. +- `order by asc/desc` has no effect on the result. +- `group by tbname` must be used together when `elapsed` is used against a STable. +- `group by` must NOT be used together when `elapsed` is used against a table or sub table. +- When used in nested query, it's only applicable when the inner query outputs an implicit timestamp column as the primary key. For example, `select elapsed(ts) from (select diff(value) from sub1)` is legal usage while `select elapsed(ts) from (select * from sub1)` is not. +- It can't be used with `leastsquares`, `diff`, `derivative`, `top`, `bottom`, `last_row`, `interp`. + ## Selection Functions When any select function is used, timestamp column or tag columns including `tbname` can be specified to show that the selected value are from which rows. @@ -271,7 +365,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; **Description**: The minimum value of a specific column in a table or STable -**Return value type**: Same as the data type of the column being operated +**Return value type**: Same as the data type of the column being operated upon **Applicable column types**: Data types except for timestamp, binary, nchar and bool @@ -301,7 +395,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Description**: The maximum value of a specific column of a table or STable -**Return value type**: Same as the data type of the column being operated +**Return value type**: Same as the data type of the column being operated upon **Applicable column types**: Data types except for timestamp, binary, nchar and bool @@ -331,7 +425,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Description**: The first non-null value of a specific column in a table or STable -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Any data type @@ -341,7 +435,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; - FIRST(\*) can be used to get the first non-null value of all columns - NULL will be returned if all the values of the specified column are all NULL -- No result will NOT be returned if all the columns in the result set are all NULL +- A result will NOT be returned if all the columns in the result set are all NULL **Examples**: @@ -367,7 +461,7 @@ SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Description**: The last non-NULL value of a specific column in a table or STable -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Any data type @@ -403,7 +497,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; **Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Data types except for timestamp, binary, nchar and bool @@ -442,7 +536,7 @@ SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; **Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Data types except for timestamp, binary, nchar and bool @@ -549,7 +643,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; **Description**: The last row of a table or STable -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Any data type @@ -576,7 +670,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; Query OK, 1 row(s) in set (0.001042s) ``` -### INTERP [From version 2.3.1] +### INTERP [Since version 2.3.1] ``` SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; @@ -584,7 +678,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ **Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Numeric data types @@ -593,7 +687,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ **More explanations** - `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter. -- The input data of `INTERP` is the value of the specified column, `where` can be used to filter the original data. If no `where` condition is specified then all original data is the input. +- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input. - The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. If `RANGE` is not specified, then the timestamp of the first row that matches the filter condition is treated as timestamp1, the timestamp of the last row that matches the filter condition is treated as timestamp2. - The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. If `EVERY` parameter is not used, the time windows will be considered as no ending timestamp, i.e. there is only one time window from timestamp1. - Interpolation is performed based on `FILL` parameter. No interpolation is performed if `FILL` is not used, that means either the original data that matches is returned or nothing is returned. @@ -632,7 +726,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ taos> SELECT INTERP(current) FROM t1 where ts >= '2017-07-14 17:00:00' and ts <= '2017-07-14 20:00:00' RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR); ``` -### INTERP [Prior to version 2.3.1] +### INTERP [Since version 2.0.15.0] ``` SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; @@ -640,7 +734,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL **Description**: The value of a specific column that matches the specified time slice -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Numeric data type @@ -648,7 +742,6 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL **More explanations**: -- It can be used from version 2.0.15.0 - Time slice must be specified. If there is no data matching the specified time slice, interpolation is performed based on `FILL` parameter. Conditions such as tags or `tbname` can be used `Where` clause can be used to filter data. - The timestamp specified must be within the time range of the data rows of the table or STable. If it is beyond the valid time range, nothing is returned even with `FILL` parameter. - `INTERP` can be used to query only single time point once. `INTERP` can be used with `EVERY` to get the interpolation value every time interval. @@ -696,11 +789,11 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; **Parameter value range**: k: [1,100] offset_val: [0,100] -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Any data type except form timestamp, i.e. the primary key -**Applicable versions**: From version 2.6.0.0 +**Applicable versions**: Since version 2.6.0.0 **Examples**: @@ -732,11 +825,11 @@ SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause]; **Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. -**Return value type**: Same as the column or tag being operated +**Return value type**: Same as the column or tag being operated upon **Applicable column types**: Any data types except for timestamp -**Applicable versions**: From version 2.6.0.0 +**Applicable versions**: Since version 2.6.0.0 **More explanations**: @@ -780,7 +873,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored. -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Data types except for timestamp, binary, nchar and bool @@ -789,8 +882,8 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **More explanations**: - The number of result rows is the number of rows subtracted by one, no output for the first row -- From version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname` -- From version 2.6.0, `ignore_negative` parameter is supported +- Since version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname` +- Since version 2.6.0, `ignore_negative` parameter is supported **Examples**: @@ -874,7 +967,7 @@ Query OK, 1 row(s) in set (0.000836s) SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The round up value of a specific column +**Description**: The rounded up value of a specific column **Return value type**: Same as the column being used @@ -896,9 +989,9 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The round down value of a specific column +**Description**: The rounded down value of a specific column -**More explanations**: The restrictions are same as `CEIL` function. +**More explanations**: The restrictions are same as those of the `CEIL` function. ### ROUND @@ -906,7 +999,7 @@ SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The round value of a specific column. +**Description**: The rounded value of a specific column. **More explanations**: The restrictions are same as `CEIL` function. @@ -933,7 +1026,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; - Can only be used with aggregate functions - `Group by tbname` must be used together on a STable to force the result on a single timeline -**Applicable versions**: From 2.3.0.x +**Applicable versions**: Since 2.3.0.x ### MAVG @@ -958,7 +1051,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; - Can't be used with aggregate functions. - Must be used with `GROUP BY tbname` when it's used on a STable to force the result on each single timeline. -**Applicable versions**: From 2.3.0.x +**Applicable versions**: Since 2.3.0.x ### SAMPLE @@ -981,7 +1074,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; - Arithmetic operation can't be operated on the result of `SAMPLE` function - Must be used with `Group by tbname` when it's used on a STable to force the result on each single timeline -**Applicable versions**: From 2.3.0.x +**Applicable versions**: Since 2.3.0.x ### ASIN @@ -1444,37 +1537,6 @@ SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause] - Parameter `pos` can be an positive or negative integer; If it's positive, the starting position will be counted from the beginning of the string; if it's negative, the starting position will be counted from the end of the string. - If `len` is not specified, it means from `pos` to the end. -### Arithmetic Operations - -``` -SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause]; -``` - -**Description**: The sum, difference, product, quotient, or remainder between one or more columns - -**Return value type**: Double precision floating point - -**Applicable column types**: Data types except for timestamp, binary, nchar, bool - -**Applicable table types**: table, STable - -**More explanations**: - -- Arithmetic operations can be performed on two or more columns, `()` can be used to control the precedence -- NULL doesn't participate the operation, if one of the operands is NULL then result is NULL - -**Examples**: - -``` -taos> SELECT current + voltage * phase FROM d1001; -(current+(voltage*phase)) | -============================ - 78.190000713 | - 84.540003240 | - 80.810000718 | -Query OK, 3 row(s) in set (0.001046s) -``` - ### STATECOUNT ``` @@ -1586,7 +1648,7 @@ Query OK, 6 row(s) in set (0.002613s) ## Time Functions -From version 2.6.0.0, below time related functions can be used in TDengine. +Since version 2.6.0.0, below time related functions can be used in TDengine. ### NOW @@ -1840,6 +1902,8 @@ SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day). - The precision of the returned timestamp is same as the precision set for the current data base in use +**Applicable versions**:Since version 2.6.0.0 + **Examples**: ```sql diff --git a/docs-en/12-taos-sql/08-interval.md b/docs-en/12-taos-sql/08-interval.md index 1b5265b44b6b63f8f5472e1e8760d1f45401fc21..acfb0de0e1521fd8c6a068497a3df7a17941524c 100644 --- a/docs-en/12-taos-sql/08-interval.md +++ b/docs-en/12-taos-sql/08-interval.md @@ -3,36 +3,36 @@ sidebar_label: Interval title: Aggregate by Time Window --- -Aggregate by time window is supported in TDengine. For example, each temperature sensor reports the temperature every second, the average temperature every 10 minutes can be retrieved by query with time window. -Window related clauses are used to divide the data set to be queried into subsets and then aggregate. There are three kinds of windows, time window, status window, and session window. There are two kinds of time windows, sliding window and flip time window. +Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. +Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. ## Time Window -`INTERVAL` clause is used to generate time windows of the same time interval, `SLIDING` is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining continuous query both the size of time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time window. +The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window. ![TDengine Database Time Window](./timewindow-1.webp) -`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. Below SQL statement is illegal because no aggregate or selection function is used with `INTERVAL`. +`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`. ``` SELECT * FROM temp_tb_1 INTERVAL(1m); ``` -The time step specified by `SLIDING` can't exceed the time interval specified by `INTERVAL`. Below SQL statement is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. +The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. ``` SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); ``` -When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. From version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. +When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip/tumbling window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. Since version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. ## Status Window -In case of using integer, bool, or string to represent the device status at a moment, the continuous rows with same status belong to same status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. +In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. ![TDengine Database Status Window](./timewindow-3.webp) -`STATE_WINDOW` is used to specify the column based on which to define status window, for example: +`STATE_WINDOW` is used to specify the column on which the status window will be based. For example: ``` SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); @@ -44,7 +44,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); ``` -The primary key, i.e. timestamp, is used to determine which session window the row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different time windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. +The primary key, i.e. timestamp, is used to determine which session window a row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different session windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. ![TDengine Database Session Window](./timewindow-2.webp) @@ -73,7 +73,7 @@ SELECT function_list FROM stb_name ### Restrictions -- Aggregate functions and select functions can be used in `function_list`, with each function having only one output, for example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple output can't be used, for example DIFF or arithmetic operations. +- Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used. - `LAST_ROW` can't be used together with window aggregate. - Scalar functions, like CEIL/FLOOR, can't be used with window aggregate. - `WHERE` clause can be used to specify the starting and ending time and other filter conditions @@ -87,8 +87,8 @@ SELECT function_list FROM stb_name :::info -1. Huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum interpolation values that can be returned in single query is 10,000,000. -2. The result set is in ascending order of timestamp in aggregate by time window aggregate. +1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000. +2. The result set is in ascending order of timestamp when you aggregate by time window. 3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `GROUP BY` is not used in the query, the result set will be returned in ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group. ::: @@ -97,13 +97,13 @@ Aggregate by time window is also used in continuous query, please refer to [Cont ## Examples -The table of intelligent meters can be created by the SQL statement below: +A table of intelligent meters can be created by the SQL statement below: ```sql CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); ``` -The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the below SQL statement, with missing values filled with the previous non-NULL values. +The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values. ``` SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters diff --git a/docs-en/12-taos-sql/09-limit.md b/docs-en/12-taos-sql/09-limit.md index b987cbcb7886dd35d4fbfefb945d8f36f8d4f399..db55cdd69e7bd29ca66ee15b61f28991568d9556 100644 --- a/docs-en/12-taos-sql/09-limit.md +++ b/docs-en/12-taos-sql/09-limit.md @@ -4,8 +4,8 @@ title: Limits & Restrictions ## Naming Rules -1. Only English characters, digits and underscore are allowed -2. Can't start with a digit +1. Only characters from the English alphabet, digits and underscore are allowed +2. Names cannot start with a digit 3. Case insensitive without escape character "\`" 4. Identifier with escape character "\`" To support more flexible table or column names, a new escape character "\`" is introduced. For more details please refer to [escape](/taos-sql/escape). @@ -16,38 +16,38 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`. ## General Limits -- Maximum length of database name is 32 bytes -- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator -- Maximum length of each data row is 48K bytes from version 2.1.7.0 , before which the limit is 16K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. -- Maximum of column name is 64. +- Maximum length of database name is 32 bytes. +- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator. +- Maximum length of each data row is 48K bytes since version 2.1.7.0 , before which the limit was 16K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. +- Maximum length of column name is 64. - Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp. - Maximum length of tag name is 64. - Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values should not exceed 16K bytes. -- Maximum length of singe SQL statement is 1048576, i.e. 1 MB bytes. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576]. -- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`, functions in the query statement may constitute columns. Error will be returned if the limit is exceeded. -- Maximum numbers of databases, STables, tables are only depending on the system resources. +- Maximum length of singe SQL statement is 1048576, i.e. 1 MB. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576]. +- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded. +- Maximum numbers of databases, STables, tables are dependent only on the system resources. - Maximum of database name is 32 bytes, and it can't include "." or special characters. -- Maximum replica number of database is 3 -- Maximum length of user name is 23 bytes -- Maximum length of password is 15 bytes -- Maximum number of rows depends on the storage space only. -- Maximum number of tables depends on the number of nodes only. -- Maximum number of databases depends on the number of nodes only. -- Maximum number of vnodes for single database is 64. +- Maximum number of replicas for a database is 3. +- Maximum length of user name is 23 bytes. +- Maximum length of password is 15 bytes. +- Maximum number of rows depends only on the storage space. +- Maximum number of tables depends only on the number of nodes. +- Maximum number of databases depends only on the number of nodes. +- Maximum number of vnodes for a single database is 64. ## Restrictions of `GROUP BY` -`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with one restriction that only one column and the number of unique values on that column is lower than 100,000. Please note that `GROUP BY` can't be performed on float or double types. +`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with the only restriction being it can only be performed on one data column and the number of unique values in that column is lower than 100,000. Please note that `GROUP BY` cannot be performed on float or double types. ## Restrictions of `IS NOT NULL` -`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `<\>""` can only be used on non-numeric data types. +`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `< > ""` can only be used on non-numeric data types. ## Restrictions of `ORDER BY` - Only one `order by` is allowed for normal table and subtable. - At most two `order by` are allowed for STable, and the second one must be `ts`. -- `order by tag` must be used with `group by tag` on same tag, this rule is also applicable to `tbname`. +- `order by tag` must be used with `group by tag` on same tag. This rule is also applicable to `tbname`. - `order by column` must be used with `group by column` or `top/bottom` on same column. This rule is applicable to table and STable. - `order by ts` is applicable to table and STable. - If `order by ts` is used with `group by`, the result set is sorted using `ts` in each group. @@ -56,7 +56,7 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`. ### Name Restrictions of Table/Column -The name of a table or column can only be composed of ASCII characters, digits and underscore, while it can't start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator. +The name of a table or column can only be composed of ASCII characters, digits and underscore and it cannot start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator. ### Name Restrictions After Escaping diff --git a/docs-en/12-taos-sql/10-json.md b/docs-en/12-taos-sql/10-json.md index abe6649330618eb3df45f5bed03335a65f93a434..7460a5e0ba3ce78ee7744569cda460c477cac19c 100644 --- a/docs-en/12-taos-sql/10-json.md +++ b/docs-en/12-taos-sql/10-json.md @@ -4,7 +4,7 @@ title: JSON Type ## Syntax -1. Tag of JSON type +1. Tag of type JSON ```sql create STable s1 (ts timestamp, v1 int) tags (info json); @@ -12,7 +12,7 @@ title: JSON Type create table s1_1 using s1 tags ('{"k1": "v1"}'); ``` -2. -> Operator of JSON +2. "->" Operator of JSON ```sql select * from s1 where info->'k1' = 'v1'; @@ -20,7 +20,7 @@ title: JSON Type select info->'k1' from s1; ``` -3. contains Operator of JSON +3. "contains" Operator of JSON ```sql select * from s1 where info contains 'k2'; @@ -30,7 +30,7 @@ title: JSON Type ## Applicable Operations -1. When JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used. +1. When a JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used. ```sql select * from s1 where info->'k1' match 'v*'; @@ -42,9 +42,9 @@ title: JSON Type select * from s1 where info->'k1' is not null; ``` -2. Tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query, for example `group by json->'key'` +2. A tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query; for example `group by json->'key'` -3. `Distinct` can be used with tag of JSON type +3. `Distinct` can be used with a tag of type JSON ```sql select distinct info->'k1' from s1; @@ -52,9 +52,9 @@ title: JSON Type 4. Tag Operations - The value of JSON tag can be altered. Please note that the full JSON will be overriden when doing this. + The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this. - The name of JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. + The name of a JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. ## Other Restrictions @@ -64,17 +64,17 @@ title: JSON Type - JSON format: - - The input string for JSON can be empty, i.e. "", "\t", or NULL, but can't be non-NULL string, bool or array. - - object can be {}, and the whole JSON is empty if so. Key can be "", and it's ignored if so. - - value can be int, double, string, boll or NULL, can't be array. Nesting is not allowed, that means value can't be another JSON. + - The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array. + - object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so. + - value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON. - If one key occurs twice in JSON, only the first one is valid. - Escape characters are not allowed in JSON. -- NULL is returned if querying a key that doesn't exist in JSON. +- NULL is returned when querying a key that doesn't exist in JSON. - If a tag of JSON is the result of inner query, it can't be parsed and queried in the outer query. -For example, the below SQL statements are not supported. +For example, the SQL statements below are not supported. ```sql; select jtag->'key' from (select jtag from STable); diff --git a/docs-en/12-taos-sql/12-keywords.md b/docs-en/12-taos-sql/12-keywords.md index fa750300b71251e1172dba13f91d05822f9ac1f4..8f045f48019e419d21d3bd22f432a024551c585c 100644 --- a/docs-en/12-taos-sql/12-keywords.md +++ b/docs-en/12-taos-sql/12-keywords.md @@ -46,3 +46,44 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam | CONNECTIONS | HAVING | NOT | SOFFSET | VNODES | | CONNS | ID | NOTNULL | STable | WAL | | COPY | IF | NOW | STableS | WHERE | +| _C0 | _QSTART | _QSTOP | _QDURATION | _WSTART | +| _WSTOP | _WDURATION | _ROWTS | + +## Explanations +### TBNAME +`TBNAME` can be considered as a special tag, which represents the name of the subtable, in a STable. + +Get the table name and tag values of all subtables in a STable. +```mysql +SELECT TBNAME, location FROM meters; + +Count the number of subtables in a STable. +```mysql +SELECT COUNT(TBNAME) FROM meters; +``` + +Only filter on TAGS can be used in WHERE clause in the above two query statements. +```mysql +taos> SELECT TBNAME, location FROM meters; + tbname | location | +================================================================== + d1004 | California.SanFrancisco | + d1003 | California.SanFrancisco | + d1002 | California.LosAngeles | + d1001 | California.LosAngeles | +Query OK, 4 row(s) in set (0.000881s) + +taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; + count(tbname) | +======================== + 2 | +Query OK, 1 row(s) in set (0.001091s) +``` +### _QSTART/_QSTOP/_QDURATION +The start, stop and duration of a query time window. + +### _WSTART/_WSTOP/_WDURATION +The start, stop and duration of aggegate query by time window, like interval, session window, state window. + +### _c0/_ROWTS +_c0 is equal to _ROWTS, it means the first column of a table or STable. diff --git a/docs-en/12-taos-sql/13-operators.md b/docs-en/12-taos-sql/13-operators.md new file mode 100644 index 0000000000000000000000000000000000000000..e393c82c76a60449ee66e647adeb08ed2802c725 --- /dev/null +++ b/docs-en/12-taos-sql/13-operators.md @@ -0,0 +1,66 @@ +--- +sidebar_label: Operators +title: Operators +--- + +## Arithmetic Operators + +| # | **Operator** | **Data Types** | **Description** | +| --- | :----------: | -------------- | --------------------------------------------------------- | +| 1 | +, - | Numeric Types | Representing positive or negative numbers, unary operator | +| 2 | +, - | Numeric Types | Addition and substraction, binary operator | +| 3 | \*, / | Numeric Types | Multiplication and division, binary oeprator | +| 4 | % | Numeric Types | Taking the remainder, binary operator | + +## Bitwise Operators + +| # | **Operator** | **Data Types** | **Description** | +| --- | :----------: | -------------- | ----------------------------- | +| 1 | & | Numeric Types | Bitewise AND, binary operator | +| 2 | \| | Numeric Types | Bitewise OR, binary operator | + +## JSON Operator + +`->` operator can be used to get the value of a key in a column of JSON type, the left oeprand is the column name, the right operand is a string constant. For example, `col->'name'` returns the value of key `'name'`. + +## Set Operator + +Set operators are used to combine the results of two queries into single result. A query including set operators is called a combined query. The number of rows in each result in a combined query must be same, and the type is determined by the first query's result, the type of the following queriess result must be able to be converted to the type of the first query's result, the conversion rule is same as `CAST` function. + +TDengine provides 2 set operators: `UNION ALL` and `UNION`. `UNION ALL` combines the results without removing duplicate data. `UNION` combines the results and remove duplicate data rows. In single SQL statement, at most 100 set operators can be used. + +## Comparsion Operator + +| # | **Operator** | **Data Types** | **Description** | +| --- | :---------------: | ------------------------------------------------------------------- | ----------------------------------------------- | +| 1 | = | Except for BLOB, MEDIUMBLOB and JSON | Equal | +| 2 | <\>, != | Except for BLOB, MEDIUMBLOB, JSON and primary key of timestamp type | Not equal | +| 3 | \>, \< | Except for BLOB, MEDIUMBLOB and JSON | Greater than, less than | +| 4 | \>=, \<= | Except for BLOB, MEDIUMBLOB and JSON | Greater than or equal to, less than or equal to | +| 5 | IS [NOT] NULL | Any types | Is NULL or NOT | +| 6 | [NOT] BETWEEN AND | Except for BLOB, MEDIUMBLOB and JSON | In a value range or not | +| 7 | IN | Except for BLOB, MEDIUMBLOB, JSON and primary key of timestamp type | In a list of values or not | +| 8 | LIKE | BINARY, NCHAR and VARCHAR | Wildcard matching | +| 9 | MATCH, NMATCH | BINARY, NCHAR and VARCHAR | Regular expression matching | +| 10 | CONTAINS | JSON | If A key exists in JSON | + +`LIKE` operator uses wildcard to match a string, the rules are: + +- '%' matches 0 to any number of characters; '\_' matches any single ASCII character. +- \_ can be used to match a `_` in the string, i.e. using escape character backslash `\` +- Wildcard string is 100 bytes at most. Longer a wildcard string is, worse the performance of LIKE operator is. + +`MATCH` and `NMATCH` operators use regular expressions to match a string, the rules are: + +- Regular expressions of POSIX standard are supported. +- Only `tbname`, i.e. table name of sub tables, and tag columns of string types can be matched with regular expression, data columns are not supported. +- Regular expression string is 128 bytes at most, and can be adjusted by setting parameter `maxRegexStringLen`, which is a client side configuration and needs to restart the client to take effect. + +## Logical Operators + +| # | **Operator** | **Data Types** | **Description** | +| --- | :----------: | -------------- | ---------------------------------------------------------------------------------------- | +| 1 | AND | BOOL | Logical AND, return TRUE if both conditions are TRUE; return FALSE if any one is FALSE. | +| 2 | OR | BOOL | Logical OR, return TRUE if any condition is TRUE; return FALSE if both are FALSE | + +TDengine uses shortcircut optimization when performing logical operations. For AND operator, if the first condition is evaluated to FALSE, then the second one is not evaluated. For OR operator, if the first condition is evaluated to TRUE, then the second one is not evaluated. diff --git a/docs-en/13-operation/01-pkg-install.md b/docs-en/13-operation/01-pkg-install.md index 8dd6de34280ee3702bc955d00dfb24fcb73e940e..c098002962d62aa0acc7a94462c052303cb2ed90 100644 --- a/docs-en/13-operation/01-pkg-install.md +++ b/docs-en/13-operation/01-pkg-install.md @@ -6,7 +6,7 @@ description: Install, Uninstall, Start, Stop and Upgrade import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -TDengine community version provides dev and rpm packages for users to choose based on the system environment. deb supports Debian, Ubuntu and systems derived from them. rpm supports CentOS, RHEL, SUSE and systems derived from them. Furthermore, tar.gz package is provided for enterprise customers. +TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers. ## Install @@ -124,7 +124,7 @@ taoskeeper is installed, enable it by `systemctl enable taoskeeper` ``` :::info -Some configuration will be prompted for users to provide when install.sh is executing, the interactive mode can be disabled by executing `./install.sh -e no`. `./install -h` can show all parameters and detailed explanation. +Users will be prompted to enter some configuration information when install.sh is executing. The interactive mode can be disabled by executing `./install.sh -e no`. `./install.sh -h` can show all parameters with detailed explanation. ::: @@ -132,7 +132,7 @@ Some configuration will be prompted for users to provide when install.sh is exec :::note -When installing on the first node in the cluster, when "Enter FQDN:" is prompted, nothing needs to be provided. When installing on following nodes, when "Enter FQDN:" is prompted, the end point of the first dnode in the cluster can be input if it is already up; or just ignore it and configure later after installation is done. +When installing on the first node in the cluster, at the "Enter FQDN:" prompt, nothing needs to be provided. When installing on subsequent nodes, at the "Enter FQDN:" prompt, you must enter the end point of the first dnode in the cluster if it is already up. You can also just ignore it and configure it later after installation is finished. ::: @@ -181,14 +181,14 @@ taosKeeper is removed successfully! :::note -- It's strongly suggested not to use multiple kinds of installation packages on a single host TDengine -- After deb package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as in the command below and then reinstalling. +- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. +- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed. ```bash $ sudo rm -f /var/lib/dpkg/info/tdengine* ``` -- After rpm package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as in the command below and then reinstalling. +- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed. ```bash $ sudo rpm -e --noscripts tdengine @@ -219,7 +219,7 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/ During the installation process: - Configuration directory, data directory, and log directory are created automatically if they don't exist -- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg if not existing +- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg - The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data - The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log - The executables at /usr/local/taos/bin are linked to /usr/bin @@ -228,7 +228,7 @@ During the installation process: :::note -- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution because data can't be recovered +- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data. - When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used. ## Start and Stop @@ -263,18 +263,19 @@ Active: inactive (dead) There are two aspects in upgrade operation: upgrade installation package and upgrade a running server. -Upgrading package should follow the steps mentioned previously to first uninstall the old version then install the new version. +To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version. -Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 section match can the old version be upgraded to the new version. The steps of upgrading a running server are as below: +Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below: - Stop inserting data -- Make sure all data are persisted into disk +- Make sure all data is persisted to disk +- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.) - Stop the cluster of TDengine - Uninstall old version and install new version - Start the cluster of TDengine -- Make some simple queries to make sure no data loss -- Make some simple data insertion to make sure the cluster works well -- Restore business data +- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss +- Run some simple data insertion statements to make sure the cluster works well +- Restore business services :::warning diff --git a/docs-en/13-operation/02-planning.mdx b/docs-en/13-operation/02-planning.mdx index 4b8ed1f1b893446a521425b9eb1f6ec32b112505..c1baf92dbfa8d93f83174c05c2ea631d1a469739 100644 --- a/docs-en/13-operation/02-planning.mdx +++ b/docs-en/13-operation/02-planning.mdx @@ -2,17 +2,17 @@ title: Resource Planning --- -The computing and storage resources need to be planned if using TDengine to build an IoT platform. How to plan the CPU, memory and disk required will be described in this chapter. +It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter. ## Memory Requirement of Server Side -The number of vgroups created for each database is the same as the number of CPU cores by default and can be configured by parameter `maxVgroupsPerDb`, each vnode in a vgroup stores one replica. Each vnode consumes a fixed size of memory, i.e. `blocks` \* `cache`. Besides, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below: +By default, the number of vgroups created for each database is the same as the number of CPU cores. This can be configured by the parameter `maxVgroupsPerDb`. Each vnode in a vgroup stores one replica. Each vnode consumes a fixed amount of memory, i.e. `blocks` \* `cache`. In addition, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below: ``` Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB) ``` -For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M. +For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` is 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M. In the real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`. @@ -22,10 +22,10 @@ In the real operation of TDengine, we are more concerned about the memory used b In the above formula: -1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula mentioned previously then dividing by number of dnodes and multiplying the number of replicas. +1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula for Database Memory Size, mentioned above, then dividing by number of dnodes and multiplying the number of replicas. ``` - vnode_memory = sum(Database memory) / number_of_dnodes * replica + vnode_memory = (sum(Database Memory Size) / number_of_dnodes) * replica ``` 2. "mnode_memory" of a `taosd` process is the memory consumed by a mnode. If there is one (and only one) mnode hosted in a `taosd` process, the memory consumed by "mnode" is "0.2KB \* the total number of tables in the cluster". @@ -56,8 +56,8 @@ So, at least 3GB needs to be reserved for such a client. The CPU resources required depend on two aspects: -- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The computing resource consumed between inserting 1 row one time and inserting 10 rows one time is very small. So, the more the rows to insert one time, the higher the efficiency. Inserting in bach also exposes requirements for the client side which needs to cache rows and insert in batch once the cached rows reaches a threshold. -- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, etc provided by user. +- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold. +- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, and other requirements provided by users. In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources. @@ -71,12 +71,12 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable For example, there are 10,000,000 meters, while each meter collects data every 15 minutes and the data size of each collection is 128 bytes, so the raw data size of one year is: 10000000 \* 128 \* 24 \* 60 / 15 \* 365 = 44.8512(TB). Assuming compression ratio is 5, the actual disk size is: 44.851 / 5 = 8.97024(TB). -Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device, and this is transparent to application programs. +Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device. This is completely transparent to application programs. -To increase the performance, multiple disks can be setup for parallel data reading or data inserting. Please note that an expensive disk array is not necessary because replications are used in TDengine to provide high availability. +To increase performance, multiple disks can be setup for parallel data reading or data inserting. Please note that an expensive disk array is not necessary because replications are used in TDengine to provide high availability. ## Number of Hosts -A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulas mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily. +A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily. **Quick Estimation for CPU, Memory and Disk** Please refer to [Resource Estimate](https://www.taosdata.com/config/config.html). diff --git a/docs-en/13-operation/03-tolerance.md b/docs-en/13-operation/03-tolerance.md index 9f74760278cd34a50c232f528549e90842631e18..d4d48d7fcdc2c990b6ea0821e2347c70a809ed79 100644 --- a/docs-en/13-operation/03-tolerance.md +++ b/docs-en/13-operation/03-tolerance.md @@ -7,26 +7,26 @@ title: Fault Tolerance & Disaster Recovery TDengine uses **WAL**, i.e. Write Ahead Log, to achieve fault tolerance and high reliability. -When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally due to any reason and then restarted. +When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally for any reason and then restarted. There are 2 configuration parameters related to WAL: - walLevel: - - 0:wal is disabled; - - 1:wal is enabled without fsync; - - 2:wal is enabled with fsync. -- fsync:only valid when walLevel is set to 2, it specifies the interval of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written. + - 0:wal is disabled + - 1:wal is enabled without fsync + - 2:wal is enabled with fsync +- fsync:This parameter is only valid when walLevel is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written. -To achieve absolutely no data loss, walLevel needs to be set to 2 and fsync needs to be set to 1. The penalty is the performance of data ingestion downgrades. However, if the concurrent threads of data insertion on the client side can reach a big enough number, for example 50, the data ingestion performance would be still good enough, our verification shows that the drop is only 30% compared to fsync is set to 3,000 milliseconds. +To achieve absolutely no data loss, walLevel should be set to 2 and fsync should be set to 1. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when fsync is set to 3,000 milliseconds. ## Disaster Recovery -TDengine uses replications to provide high availability and disaster recovery capability. +TDengine uses replication to provide high availability and disaster recovery capability. -TDengine cluster is managed by mnode. To make sure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee the metadata consistency. +A TDengine cluster is managed by mnode. To ensure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency. -The number of replicas for the time series data in TDengine is associated with each database, there can be a lot of databases in a cluster while each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1. +The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1. The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table. -As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is set to bigger than 1, high availability can be achieved without any other assistance. If dnodes of TDengine cluster are deployed in geographically different data centers, disaster recovery can be achieved too. +As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers. diff --git a/docs-en/13-operation/08-export.md b/docs-en/13-operation/08-export.md index fa9625a7c5f6b0e6706d726bff410cee647286bb..5780de42faeaedbc1c985ad2aa2f52fe56c76971 100644 --- a/docs-en/13-operation/08-export.md +++ b/docs-en/13-operation/08-export.md @@ -2,11 +2,13 @@ title: Data Export --- -There are two ways of exporting data from a TDengine cluster, one is SQL statement in TDengine CLI, the other one is `taosdump`. +There are two ways of exporting data from a TDengine cluster: +- Using a SQL statement in TDengine CLI +- Using the `taosdump` tool ## Export Using SQL -If you want to export the data of a table or a STable, please execute below SQL statement in TDengine CLI. +If you want to export the data of a table or a STable, please execute the SQL statement below, in the TDengine CLI. ```sql select * from >> data.csv; @@ -16,4 +18,4 @@ The data of table or STable specified by `tb_name` will be exported into a file ## Export Using taosdump -With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). +With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). diff --git a/docs-en/13-operation/09-status.md b/docs-en/13-operation/09-status.md index ca8974bb8f4efec4c6d7c87c60b3ca67ad35c613..51396524ea281ae665c9fdf61d2e6e6202995537 100644 --- a/docs-en/13-operation/09-status.md +++ b/docs-en/13-operation/09-status.md @@ -3,7 +3,7 @@ sidebar_label: Connections & Tasks title: Manage Connections and Query Tasks --- -A system operator can use TDengine CLI to show the connections, ongoing queries, stream computing, and can close connection or stop ongoing query task or stream computing. +A system operator can use the TDengine CLI to show connections, ongoing queries, stream computing, and can close connections or stop ongoing query tasks or stream computing. ## Show Connections @@ -13,7 +13,7 @@ SHOW CONNECTIONS; One column of the output of the above SQL command is "ip:port", which is the end point of the client. -## Close Connections Forcedly +## Force Close Connections ```sql KILL CONNECTION ; @@ -27,9 +27,9 @@ In the above SQL command, `connection-id` is from the first column of the output SHOW QUERIES; ``` -The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection, in format of "connection-id:query-no". +The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection. The format is "connection-id:query-no". -## Close Queries Forcedly +## Force Close Queries ```sql KILL QUERY ; @@ -43,9 +43,9 @@ In the above SQL command, `query-id` is from the first column of the output of ` SHOW STREAMS; ``` -The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection, in the format of "connection-id:stream-no". +The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection. The format is "connection-id:stream-no". -## Close Continuous Query Forcedly +## Force Close Continuous Query ```sql KILL STREAM ; diff --git a/docs-en/13-operation/10-monitor.md b/docs-en/13-operation/10-monitor.md index 615f79ca73f25115f5b4f19863c0f152f4fecf69..a4679983f2bc77bb4e438f5d43fa1b8beb39b120 100644 --- a/docs-en/13-operation/10-monitor.md +++ b/docs-en/13-operation/10-monitor.md @@ -2,13 +2,13 @@ title: TDengine Monitoring --- -After TDengine is started, a database named `log` for monitoring is created automatically. The information about CPU, memory, disk, bandwidth, number of requests, disk I/O speed, slow query is written into `log` database on the basis of a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console. +After TDengine is started, a database named `log` is created automatically to help with monitoring. Information that includes CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, is written into the `log` database at a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console. The collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in the configuration file. ## TDinsight -TDinsight is a complete solution which uses the monitor database `log` mentioned previously and Grafana to monitor a TDengine cluster. +TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster. From version 2.3.3.0, more monitoring data has been added in the `log` database. Please refer to [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) to learn more details about using TDinsight to monitor TDengine. diff --git a/docs-en/13-operation/17-diagnose.md b/docs-en/13-operation/17-diagnose.md index 53d808ef511b72acbf7cff22dc8c0d5a5b05408e..2b474fddba4af5ba0c29103cd8ab1249d10d055b 100644 --- a/docs-en/13-operation/17-diagnose.md +++ b/docs-en/13-operation/17-diagnose.md @@ -4,13 +4,13 @@ title: Problem Diagnostics ## Network Connection Diagnostics -When the client is unable to access the server, the network connection between the client side and the server side needs to be checked to find out the root cause and resolve problems. +When a TDengine client is unable to access a TDengine server, the network connection between the client side and the server side must be checked to find the root cause and resolve problems. -The diagnostic for network connection can be executed between Linux and Linux or between Linux and Windows. +Diagnostics for network connections can be executed between Linux and Linux or between Linux and Windows. Diagnostic steps: -1. If the port range to be diagnosed are being occupied by a `taosd` server process, please first stop `taosd. +1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd. 2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server". 3. On the client side, execute command `taos -n client -h -P -l ` to send a testing package to the specified server and port. @@ -65,13 +65,13 @@ Output of the client side for the example is below: 12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011 ``` -The output needs to be checked carefully for the system operator to find out the root cause and solve the problem. +The output needs to be checked carefully for the system operator to find the root cause and resolve the problem. ## Startup Status and RPC Diagnostic -`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a comman task for a system operator to do to determine whether `taosd` has been started successfully, especially in case of cluster. +`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a common task which should be performed by a system operator, especially in the case of a cluster, to determine whether `taosd` has been started successfully. -`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or `taosd` is abnormal. +`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or whether `taosd` is abnormal. ## Sync and Arbitrator Diagnostic @@ -80,13 +80,13 @@ taos -n sync -P 6040 -h taos -n sync -P 6042 -h ``` -The above commands can be executed on Linux Shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well. +The above commands can be executed in a Linux shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well. ## Network Speed Diagnostic `taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP` -From version 2.2.0.0, the above command can be executed on Linux Shell to test the network speed, it sends uncompressed package to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below: +From version 2.2.0.0 onwards, the above command can be executed in a Linux shell to test network speed. The command sends uncompressed packages to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below: -n:When set to "speed", it means testing network speed. -h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used. @@ -99,23 +99,23 @@ From version 2.2.0.0, the above command can be executed on Linux Shell to test t `taos -n fqdn -h ` -From version 2.2.0.0, the above command can be executed on Linux Shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below: +From version 2.2.0.0 onward, the above command can be executed in a Linux shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below: -n:When set to "fqdn", it means testing the speed of resolving FQDN. -h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default. ## Server Log -The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131, for debug purpose it needs to be escalated to 135 or 143. +The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively. -Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily, so on server side important information is stored at different place from other logs. +Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily and so on the server side, important information is stored in a different place from other logs. - The log at level of INFO, WARNING and ERROR is stored in `taosinfo` so that it is easy to find important information - The log at level of DEBUG (135) and TRACE (143) and other information not handled by `taosinfo` are stored in `taosdlog` ## Client Log -An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded, for debugging purposes it needs to be changed to 135 or 143 so that logs at DEBUG or TRACE level can be recorded. +An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded. The maximum length of a single log file is controlled by parameter `numOfLogLines` and only 2 log files are kept for each `taosd` server process. diff --git a/docs-en/13-operation/index.md b/docs-en/13-operation/index.md index a9801c0390f294d6b39b1219cc4055149871ef9c..c64749c40e26f091e4a25e0238827ebceff4b069 100644 --- a/docs-en/13-operation/index.md +++ b/docs-en/13-operation/index.md @@ -2,7 +2,7 @@ title: Administration --- -This chapter is mainly written for system administrators, covering download, install/uninstall, data import/export, system monitoring, user management, connection management, etc. Capacity planning and system optimization are also covered. +This chapter is mainly written for system administrators. It covers download, install/uninstall, data import/export, system monitoring, user management, connection management, capacity planning and system optimization. ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docs-en/14-reference/02-rest-api/02-rest-api.mdx b/docs-en/14-reference/02-rest-api/02-rest-api.mdx index 0edc901bc373683a49dfde061f796dc0ae79ab4f..990af861961e9daf4ac775462e21d6d9852d17c1 100644 --- a/docs-en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs-en/14-reference/02-rest-api/02-rest-api.mdx @@ -2,10 +2,10 @@ title: REST API --- -To support the development of various types of platforms, TDengine provides an API that conforms to the REST principle, namely REST API. To minimize the learning cost, different from the other database REST APIs, TDengine directly requests the SQL command contained in the request BODY through HTTP POST to operate the database and only requires a URL. +To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database. :::note -One difference from the native connector is that the REST interface is stateless, so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name prefix. (Since version 2.2.0.0, it is supported to specify db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default. And it requires that the `db_name` must be specified in the URL.) +One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. (Since version 2.2.0.0, TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default and it requires that the `db_name` must be specified in the URL.) ::: ## Installation @@ -16,9 +16,9 @@ The REST interface does not rely on any TDengine native library, so the client a If the TDengine server is already installed, it can be verified as follows: -The following is an Ubuntu environment using the `curl` tool (to confirm that it is installed) to verify that the REST interface is working. +The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment. -The following example lists all databases, replacing `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. +The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. ```html curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql @@ -89,7 +89,7 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60 TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication. -- The custom authentication information is as follows (Let's introduce token later) +- The custom authentication information is as follows. More details about "token" later. ``` Authorization: Taosd @@ -136,7 +136,7 @@ The return result is in JSON format, as follows: Description: -- status: tell if the operation result is success or failure. +- status: tells you whethre the operation result is success or failure. - head: the definition of the table, or just one column "affected_rows" if no result set is returned. (As of version 2.0.17.0, it is recommended not to rely on the head return value to determine the data column type but rather use column_meta. In later versions, the head item may be removed from the return value.) - column_meta: this item is added to the return value to indicate the data type of each column in the data with version 2.0.17.0 and later versions. Each column is described by three values: column name, column type, and type length. For example, `["current",6,4]` means that the column name is "current", the column type is 6, which is the float type, and the type length is 4, which is the float type with 4 bytes. If the column type is binary or nchar, the type length indicates the maximum length of content stored in the column, not the length of the specific data in this return value. When the column type is nchar, the type length indicates the number of Unicode characters that can be saved, not bytes. - data: The exact data returned, presented row by row, or just [[affected_rows]] if no result set is returned. The order of the data columns in each row of data is the same as that of the data columns described in column_meta. diff --git a/docs-en/14-reference/03-connector/cpp.mdx b/docs-en/14-reference/03-connector/cpp.mdx index 4b388d32a9050645e268bb267d16e9a5b8aa4bda..d13a74384ccc99b4200f89cdba98e5ba902e41f8 100644 --- a/docs-en/14-reference/03-connector/cpp.mdx +++ b/docs-en/14-reference/03-connector/cpp.mdx @@ -4,7 +4,7 @@ sidebar_label: C/C++ title: C/C++ Connector --- -C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use it, you need to include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs; the application also needs to link to the corresponding dynamic libraries on the platform where it is located. +C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located. ```c #include @@ -26,7 +26,7 @@ Please refer to [list of supported platforms](/reference/connector#supported-pla ## Supported versions -The version number of the TDengine client driver and the version number of the TDengine server require one-to-one correspondence and recommend using the same version of client driver as what the TDengine server version is. Although a lower version of the client driver is compatible to work with a higher version of the server, if the first three version numbers are the same (i.e., only the fourth version number is different), but it is not recommended. It is strongly discouraged to use a higher version of the client driver to access a lower version of the TDengine server. +The version number of the TDengine client driver and the version number of the TDengine server should be the same. A lower version of the client driver is compatible with a higher version of the server, if the first three version numbers are the same (i.e., only the fourth version number is different). For e.g. if the client version is x.y.z.1 and the server version is x.y.z.2 the client and server are compatible. But in general we do not recommend using a lower client version with a newer server version. It is also strongly discouraged to use a higher version of the client driver to access a lower version of the TDengine server. ## Installation steps @@ -55,7 +55,7 @@ In the above example code, `taos_connect()` establishes a connection to port 603 :::note -- If not specified, when the return value of the API is an integer, _0_ means success, the others are error codes representing the reason for failure, and when the return value is a pointer, _NULL_ means failure. +- If not specified, when the return value of the API is an integer, _0_ means success. All others are error codes representing the reason for failure. When the return value is a pointer, _NULL_ means failure. - All error codes and their corresponding causes are described in the `taoserror.h` file. ::: @@ -140,13 +140,12 @@ The base API is used to do things like create database connections and provide a - `void taos_cleanup()` - Clean up the runtime environment and should be called before the application exits. + Cleans up the runtime environment and should be called before the application exits. - ` int taos_options(TSDB_OPTION option, const void * arg, ...) ` Set client options, currently supports region setting (`TSDB_OPTION_LOCALE`), character set -(`TSDB_OPTION_CHARSET`), time zone -(`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`) . The region setting, character set, and time zone default to the current settings of the operating system. +(`TSDB_OPTION_CHARSET`), time zone (`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`). The region setting, character set, and time zone default to the current settings of the operating system. - `char *taos_get_client_info()` @@ -159,7 +158,7 @@ The base API is used to do things like create database connections and provide a - host: FQDN of any node in the TDengine cluster - user: user name - pass: password - - db: database name, if the user does not provide, it can also be connected correctly, the user can create a new database through this connection, if the user provides the database name, it means that the database user has already created, the default use of the database + - db: the database name. Even if the user does not provide this, the connection will still work correctly. The user can create a new database through this connection. If the user provides the database name, it means that the database has already been created and the connection can be used for regular operations on the database. - port: the port the taosd program is listening on NULL indicates a failure. The application needs to save the returned parameters for subsequent use. @@ -187,7 +186,7 @@ The APIs described in this subsection are all synchronous interfaces. After bein - `TAOS_RES* taos_query(TAOS *taos, const char *sql)` - Executes an SQL command, either a DQL, DML, or DDL statement. The `taos` parameter is a handle obtained with `taos_connect()`. You can't tell if the result failed by whether the return value is `NULL`, but by parsing the error code in the result set with the `taos_errno()` function. + Executes an SQL command, either a DQL, DML, or DDL statement. The `taos` parameter is a handle obtained with `taos_connect()`. If the return value is `NULL` this does not necessarily indicate a failure. You can get the error code, if any, by parsing the error code in the result set with the `taos_errno()` function. - `int taos_result_precision(TAOS_RES *res)` @@ -231,7 +230,7 @@ typedef struct taosField { - ` void taos_free_result(TAOS_RES *res)` - Frees the query result set and the associated resources. Be sure to call this API to free the resources after the query is completed. Otherwise, it may lead to a memory leak in the application. However, note that the application will crash if you call a function like `taos_consume()` to get the query results after freeing the resources. + Frees the query result set and the associated resources. Be sure to call this API to free the resources after the query is completed. Failing to call this, may lead to a memory leak in the application. However, note that the application will crash if you call a function like `taos_consume()` to get the query results after freeing the resources. - `char *taos_errstr(TAOS_RES *res)` @@ -242,7 +241,7 @@ typedef struct taosField { Get the reason for the last API call failure. The return value is the error code. :::note -TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, etc., issued based on TAOS structures are multi-thread safe, but state quantities such as "USE statement" may interfere between threads. In addition, the C connector can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection. +TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, and other operations issued that are based on TAOS structures are multi-thread safe, but state quantities such as the "USE statement" may interfere between threads. In addition, the C connector can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection. ::: @@ -274,12 +273,12 @@ All TDengine's asynchronous APIs use a non-blocking call pattern. Applications c ### Parameter Binding API -In addition to direct calls to `taos_query()` to perform queries, TDengine also provides a set of `bind` APIs that supports parameter binding, similar in style to MySQL, and currently only supports using a question mark `? ` to represent the parameter to be bound. +In addition to direct calls to `taos_query()` to perform queries, TDengine also provides a set of `bind` APIs that supports parameter binding, similar in style to MySQL. TDengine currently only supports using a question mark `? ` to represent the parameter to be bound. -Starting with versions 2.1.1.0 and 2.1.2.0, TDengine has significantly improved the bind APIs to support for data writing (INSERT) scenarios. This avoids the resource consumption of SQL syntax parsing when writing data through the parameter binding interface, thus significantly improving write performance in most cases. A typical operation, in this case, is as follows. +Starting with versions 2.1.1.0 and 2.1.2.0, TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. This avoids the resource consumption of SQL syntax parsing when writing data through the parameter binding interface, thus significantly improving write performance in most cases. A typical operation, in this case, is as follows. 1. call `taos_stmt_init()` to create the parameter binding object. -2. call `taos_stmt_prepare()` to parse the INSERT statement. 3. +2. call `taos_stmt_prepare()` to parse the INSERT statement. 3. call `taos_stmt_set_tbname()` to set the table name if it is reserved in the INSERT statement but not the TAGS. 4. call `taos_stmt_set_tbname_tags()` to set the table name and TAGS values if the table name and TAGS are reserved in the INSERT statement (for example, if the INSERT statement takes an automatic table build). 5. call `taos_stmt_bind_param_batch()` to set the value of VALUES in multiple columns, or call `taos_stmt_bind_param()` to set the value of VALUES in a single row. @@ -383,7 +382,7 @@ In addition to writing data using the SQL method or the parameter binding API, w **return value** TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`. In some cases, the returned TAOS_RES is `NULL`, and it is still possible to call `taos_errno()` to safely get the error code information. - The returned TAOS_RES needs to be freed by the caller. Otherwise, a memory leak will occur. + The returned TAOS_RES needs to be freed by the caller in order to avoid memory leaks. **Description** The protocol type is enumerated and contains the following three formats. @@ -416,13 +415,13 @@ The Subscription API currently supports subscribing to one or more tables and co This function is responsible for starting the subscription service, returning the subscription object on success and `NULL` on failure, with the following parameters. - - taos: the database connection that has been established - - restart: if the subscription already exists, whether to restart or continue the previous subscription - - topic: the topic of the subscription (i.e., the name). This parameter is the unique identifier of the subscription - - sql: the query statement of the subscription, this statement can only be _select_ statement, only the original data should be queried, only the data can be queried in time order - - fp: the callback function when the query result is received (the function prototype will be introduced later), only used when called asynchronously. This parameter should be passed `NULL` when called synchronously - - param: additional parameter when calling the callback function, the system API will pass it to the callback function as it is, without any processing - - interval: polling period in milliseconds. The callback function will be called periodically according to this parameter when called asynchronously. not recommended to set this parameter too small To avoid impact on system performance when called synchronously. If the interval between two calls to `taos_consume()` is less than this period, the API will block until the interval exceeds this period. + - taos: the database connection that has been established. + - restart: if the subscription already exists, whether to restart or continue the previous subscription. + - topic: the topic of the subscription (i.e., the name). This parameter is the unique identifier of the subscription. + - sql: the query statement of the subscription which can only be a _select_ statement. Only the original data should be queried, and data can only be queried in temporal order. + - fp: the callback function when the query result is received only used when called asynchronously. This parameter should be passed `NULL` when called synchronously. The function prototype is described below. + - param: additional parameter when calling the callback function. The system API will pass it to the callback function as is, without any processing. + - interval: polling period in milliseconds. The callback function will be called periodically according to this parameter when called asynchronously. The interval should not be too small to avoid impact on system performance when called synchronously. If the interval between two calls to `taos_consume()` is less than this period, the API will block until the interval exceeds this period. - ` typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` diff --git a/docs-en/14-reference/03-connector/csharp.mdx b/docs-en/14-reference/03-connector/csharp.mdx index 2969392a0594ff0705e88bede5be90fb9dfd646d..5eb322cf9125fe036349de22ceea5988de46e404 100644 --- a/docs-en/14-reference/03-connector/csharp.mdx +++ b/docs-en/14-reference/03-connector/csharp.mdx @@ -179,9 +179,9 @@ namespace TDengineExample 1. "Unable to establish connection", "Unable to resolve FQDN" - Usually, it cause by the FQDN configuration is incorrect, you can refer to [How to understand TDengine's FQDN (Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html) to solve it. 2. + Usually, it's caused by an incorrect FQDN configuration. Please refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot. -Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found. +2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found. This is usually because the program did not find the dependent client driver. The solution is to copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32\` directory on Windows, and create the following soft link on Linux `ln -s /usr/local/taos/driver/libtaos.so.x.x .x.x /usr/lib/libtaos.so` will work. diff --git a/docs-en/14-reference/03-connector/go.mdx b/docs-en/14-reference/03-connector/go.mdx index fd5930f07ff7184bd8dd5ff19cd3860f9718eaf9..c1e85ae4eb1d1d7ccfb70b2b4f38cebaf6cbf06c 100644 --- a/docs-en/14-reference/03-connector/go.mdx +++ b/docs-en/14-reference/03-connector/go.mdx @@ -15,9 +15,9 @@ import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.md import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx" import GoQuery from "../../07-develop/04-query-data/_go.mdx" -`driver-go` is the official Go language connector for TDengine, which implements the interface to the Go language [database/sql](https://golang.org/pkg/database/sql/) package. Go developers can use it to develop applications that access TDengine cluster data. +`driver-go` is the official Go language connector for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data. -`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from the native connection. +`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection. This article describes how to install `driver-go` and connect to TDengine clusters and perform basic operations such as data query and data writing through `driver-go`. @@ -213,7 +213,7 @@ func main() { Since the REST interface is stateless, the `use db` syntax will not work. You need to put the db name into the SQL command, e.g. `create table if not exists tb1 (ts timestamp, a int)` to `create table if not exists test.tb1 (ts timestamp, a int)` otherwise it will report the error `[0x217] Database not specified or available`. -You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter in TDengine 2.4.0.5. is supported since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. +You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. The complete example is as follows. @@ -289,7 +289,7 @@ func main() { 6. `readBufferSize` parameter has no significant effect after being increased - If you increase `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value according to the actual situation to achieve the best query result. + Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. 7. `disableCompression` parameter is set to `false` when the query efficiency is reduced diff --git a/docs-en/14-reference/03-connector/java.mdx b/docs-en/14-reference/03-connector/java.mdx index 1c84c0b1cacb454ca4e35266a1d362a2d2a038fb..33d715c2e218fd6db4f61882f2a7a92baa80f5a2 100644 --- a/docs-en/14-reference/03-connector/java.mdx +++ b/docs-en/14-reference/03-connector/java.mdx @@ -9,19 +9,19 @@ description: TDengine Java based on JDBC API and provide both native and REST co import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). REST connections implement has a slight differences to compare the set of features implemented and native connections. +'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). The implementation of the REST connection and those of the native connections have slight differences in features. ![TDengine Database tdengine-connector](tdengine-jdbc-connector.webp) The preceding diagram shows two ways for a Java app to access TDengine via connector: - JDBC native connection: Java applications use TSDBDriver on physical node 1 (pnode1) to call client-driven directly (`libtaos.so` or `taos.dll`) APIs to send writing and query requests to taosd instances located on physical node 2 (pnode2). -- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server of physical node 2 (taosAdapter), requests TDengine server through the REST server, and returns the result. +- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server (taosAdapter) on physical node 2. taosAdapter forwards the request to TDengine server and returns the result. -Using REST connection, which does not rely on TDengine client drivers.It can be cross-platform more convenient and flexible but introduce about 30% lower performance than native connection. +The REST connection, which does not rely on TDengine client drivers, is more convenient and flexible, in addition to being cross-platform. However the performance is about 30% lower than that of the native connection. :::info -TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases, so 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. You need to pay attention to the following points when using: +TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases. So 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. It is important to keep the following points in mind: - TDengine does not currently support delete operations for individual data records. - Transactional operations are not currently supported. @@ -88,7 +88,7 @@ Add following dependency in the `pom.xml` file of your Maven project: -You can build Java connector from source code after clone TDengine project: +You can build Java connector from source code after cloning the TDengine project: ```shell git clone https://github.com/taosdata/TDengine.git @@ -96,7 +96,7 @@ cd TDengine/src/connector/jdbc mvn clean install -Dmaven.test.skip=true ``` -After compilation, a jar package of taos-jdbcdriver-2.0.XX-dist .jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository. +After compilation, a jar package named taos-jdbcdriver-2.0.XX-dist.jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository. @@ -186,7 +186,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl); In the above example, a RestfulDriver with a JDBC REST connection is used to establish a connection to a database named `test` with hostname `taosdemo.com` on port `6041`. The URL specifies the user name as `root` and the password as `taosdata`. -There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required: 1. +There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required: 1. driverClass specified as "com.taosdata.jdbc.rs.RestfulDriver". 2. jdbcUrl starting with "jdbc:TAOS-RS://". @@ -209,7 +209,7 @@ The configuration parameters in the URL are as follows. INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); ``` -- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); +- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into test using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); ::: @@ -271,7 +271,7 @@ If the configuration parameters are duplicated in the URL, Properties, or client 2. Properties connProps 3. the configuration file taos.cfg of the TDengine client driver when using a native connection -For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously. In this case, JDBC will use the password in the URL to establish the connection. +For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously, JDBC will use the password in the URL to establish the connection. ## Usage examples @@ -323,7 +323,7 @@ while(resultSet.next()){ } ``` -> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, starting from 1, it is recommended to use the field names to get them. +> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. ### Handling exceptions @@ -623,7 +623,7 @@ public void setNString(int columnIndex, ArrayList list, int size) throws ### Schemaless Writing -Starting with version 2.2.0.0, TDengine has added the ability to schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details. +Starting with version 2.2.0.0, TDengine has added the ability to perform schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details. **Note**. @@ -666,16 +666,16 @@ The TDengine Java Connector supports subscription functionality with the followi #### Create subscriptions ```java -TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); +TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topicname", "select * from meters", false); ``` The three parameters of the `subscribe()` method have the following meanings. -- topic: the subscribed topic (i.e., name). This parameter is the unique identifier of the subscription -- sql: the query statement of the subscription, this statement can only be `select` statement, only the original data should be queried, and you can query only the data in the positive time order +- topicname: the name of the subscribed topic. This parameter is the unique identifier of the subscription. +- sql: the query statement of the subscription. This statement can only be a `select` statement. Only original data can be queried, and you can query the data only temporal order. - restart: if the subscription already exists, whether to restart or continue the previous subscription -The above example will use the SQL command `select * from meters` to create a subscription named `topic`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning. +The above example will use the SQL command `select * from meters` to create a subscription named `topicname`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning. #### Subscribe to consume data diff --git a/docs-en/14-reference/03-connector/node.mdx b/docs-en/14-reference/03-connector/node.mdx index 3d30148e8ed9d8f98d135fa0fa72809f1115231a..8f586acde4848af71efcb23358be1f8486cedb8e 100644 --- a/docs-en/14-reference/03-connector/node.mdx +++ b/docs-en/14-reference/03-connector/node.mdx @@ -14,7 +14,6 @@ import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; -import NodeAsyncQuery from "../../07-develop/04-query-data/_js_async.mdx"; `td2.0-connector` and `td2.0-rest-connector` are the official Node.js language connectors for TDengine. Node.js developers can develop applications to access TDengine instance data. @@ -189,14 +188,8 @@ let cursor = conn.cursor(); ### Query data -#### Synchronous queries - -#### asynchronous query - - - ## More Sample Programs | Sample Programs | Sample Program Description | @@ -232,7 +225,7 @@ See [video tutorial](https://www.taosdata.com/blog/2020/11/11/1957.html) for the 2. "Unable to establish connection", "Unable to resolve FQDN" - Usually, root cause is the FQDN is not configured correctly. You can refer to [How to understand TDengine's FQDN (In Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html). + Usually, the root cause is an incorrect FQDN configuration. You can refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot. ## Important Updates diff --git a/docs-en/14-reference/03-connector/python.mdx b/docs-en/14-reference/03-connector/python.mdx index c52b4f18825c083e4bdfebe26b2e68ef2025ef8a..69eec2388d460754493d2b775f14ab4bbf129799 100644 --- a/docs-en/14-reference/03-connector/python.mdx +++ b/docs-en/14-reference/03-connector/python.mdx @@ -11,18 +11,18 @@ import TabItem from "@theme/TabItem"; `taospy` is the official Python connector for TDengine. `taospy` provides a rich set of APIs that makes it easy for Python applications to access TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/). -The connection to the server directly using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection". +The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection". The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). ## Supported Platforms -- The native connection [supported platforms](/reference/connector/#supported-platforms) is the same as the one supported by the TDengine client. +- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client. - REST connections are supported on all platforms that can run Python. ## Version selection -We recommend using the latest version of `taospy`, regardless what the version of TDengine is. +We recommend using the latest version of `taospy`, regardless of the version of TDengine. ## Supported features @@ -139,7 +139,7 @@ The FQDN above can be the FQDN of any dnode in the cluster, and the PORT is the -For REST connections and making sure the cluster is up, make sure the taosAdapter component is up. This can be tested using the following `curl ` command. +For REST connections, make sure the cluster and taosAdapter component, are running. This can be tested using the following `curl ` command. ``` curl -u root:taosdata http://:/rest/sql -d "select server_version()" @@ -312,7 +312,7 @@ For a more detailed description of the `sql()` method, please refer to [RestClie ### Exception handling -All database operations will be thrown directly if an exception occurs. The application is responsible for exception handling. For example: +All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example: ```python {{#include docs-examples/python/handle_exception.py}} diff --git a/docs-en/14-reference/03-connector/rust.mdx b/docs-en/14-reference/03-connector/rust.mdx index 2c8fe68c1ca8b091b8d685d8e20942a02ab2c5e8..cd54f35982ec13fc3c9160145fa002fb6f1d094b 100644 --- a/docs-en/14-reference/03-connector/rust.mdx +++ b/docs-en/14-reference/03-connector/rust.mdx @@ -30,7 +30,7 @@ REST connections are supported on all platforms that can run Rust. Please refer to [version support list](/reference/connector#version-support). -The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. Recommend to use TDengine version 2.4 or higher to avoid known issues. +The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 2.4 or higher to avoid known issues. ## Installation @@ -206,7 +206,7 @@ let conn: Taos = cfg.connect(); ### Connection pooling -In complex applications, recommand to enable connection pool. Connection pool for [libtaos] is implemented using [r2d2]. +In complex applications, we recommend enabling connection pools. Connection pool for [libtaos] is implemented using [r2d2]. As follows, a connection pool with default parameters can be generated. @@ -269,7 +269,7 @@ The [Taos] structure is the connection manager in [libtaos] and provides two mai Note that Rust asynchronous functions and an asynchronous runtime are required. -[Taos] provides partial Rust methodization of SQL to reduce the frequency of `format!` code blocks. +[Taos] provides a few Rust methods that encapsulate SQL to reduce the frequency of `format!` code blocks. - `.describe(table: &str)`: Executes `DESCRIBE` and returns a Rust data structure. - `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement. @@ -279,7 +279,7 @@ In addition, this structure is also the entry point for [Parameter Binding](#Par ### Bind Interface -Similar to the C interface, Rust provides the bind interface's wraping. First, create a bind object [Stmt] for a SQL command from the [Taos] object. +Similar to the C interface, Rust provides the bind interface's wrapping. First, create a bind object [Stmt] for a SQL command from the [Taos] object. ```rust let mut stmt: Stmt = taos.stmt("insert into ? values(? ,?)") ? ; diff --git a/docs-en/14-reference/04-taosadapter.md b/docs-en/14-reference/04-taosadapter.md index 55d964c14a091109d82d67f0060e846d7e513c0c..3264124655e7040e1d94b43500a0b582d95cb5a1 100644 --- a/docs-en/14-reference/04-taosadapter.md +++ b/docs-en/14-reference/04-taosadapter.md @@ -30,7 +30,7 @@ taosAdapter provides the following features. ### Install taosAdapter -taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. +taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. ### Start/Stop taosAdapter @@ -38,7 +38,7 @@ On Linux systems, the taosAdapter service is managed by `systemd` by default. Yo ### Remove taosAdapter -Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package or use package management command like rpm or apt to remove the TDengine server, including taosAdapter. +Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package. If you installed using a .deb or .rpm package, use the corresponding command, for your package manager, like apt or rpm to remove the TDengine server, including taosAdapter. ### Upgrade taosAdapter @@ -240,7 +240,7 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne ## Memory usage optimization methods -taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values range from -1 to 100 integers in percent of the system's physical memory. +taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory. - pauseQueryMemoryThreshold - pauseAllMemoryThreshold @@ -276,7 +276,7 @@ Corresponding configuration parameter monitor.pauseQueryMemoryThreshold memory threshold for no more queries Environment variable `TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD` (default 70) ``` -You can adjust it according to the specific application scenario and operation strategy, and it is recommended to use operation monitoring software to monitor system memory status timely. The load balancer can also check the taosAdapter running status through this interface. +You should adjust this parameter based on your specific application scenario and operation strategy. We recommend using monitoring software to monitor system memory status. The load balancer can also check the taosAdapter running status through this interface. ## taosAdapter Monitoring Metrics @@ -325,7 +325,7 @@ You can also adjust the level of the taosAdapter log output by setting the `--lo ## How to migrate from older TDengine versions to taosAdapter -In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its process ID. And there are some configuration parameters and behaviors that are different between the two. See the following table for details. +In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its own process ID. There are some configuration parameters and behaviors that are different between the two. See the following table for details. | **#** | **embedded httpd** | **taosAdapter** | **comment** | | ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------ ------------------------------------------------------------------------ | diff --git a/docs-en/14-reference/05-taosbenchmark.md b/docs-en/14-reference/05-taosbenchmark.md index 1e2b0b99f652bca0d775bebe28378600470f8661..b029f3d3eea0b010354dac1eb3ffecbc872e597f 100644 --- a/docs-en/14-reference/05-taosbenchmark.md +++ b/docs-en/14-reference/05-taosbenchmark.md @@ -7,7 +7,7 @@ description: "taosBenchmark (once called taosdemo ) is a tool for testing the pe ## Introduction -taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can flexibly control the number and type of databases, supertables, tag columns, number and type of data columns, and sub-tables, and types of databases, super tables, the number and types of data columns, the number of sub-tables, the amount of data per sub-table, the time interval for inserting data, the number of working threads, whether and how to insert disordered data, and so on. The installer provides taosdemo as a soft link to taosBenchmark for compatibility with past users. +taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can flexibly control the number and type of databases, supertables, tag columns, number and type of data columns, and sub-tables, and types of databases, super tables, the number and types of data columns, the number of sub-tables, the amount of data per sub-table, the time interval for inserting data, the number of working threads, whether and how to insert disordered data, and so on. The installer provides taosdemo as a soft link to taosBenchmark for compatibility and for the convenience of past users. ## Installation @@ -21,7 +21,7 @@ There are two ways to install taosBenchmark: ### Configuration and running methods -taosBenchmark supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive, and with only one command-line parameter, users can use `-f ` to specify a configuration file when using a configuration file. When running taosBenchmark with command-line arguments and controlling its behavior, users should use other parameters for configuration rather than `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters. +taosBenchmark supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive. Users can use `-f ` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters. taosBenchmark supports complete performance testing of TDengine. taosBenchmark supports the TDengine functions in three categories: write, query, and subscribe. These three functions are mutually exclusive, and users can select only one of them each time taosBenchmark runs. It is important to note that the type of functionality to be tested is not configurable when using the command-line configuration method, which can only test writing performance. To test the query and subscription performance of the TDengine, you must use the configuration file method and specify the function type to test via the parameter `filetype` in the configuration file. @@ -35,7 +35,7 @@ Execute the following commands to quickly experience taosBenchmark's default con taosBenchmark ``` -When run without parameters, taosBenchmark connects to the TDengine cluster specified in `/etc/taos` by default and creates a database named test in TDengine, a super table named `meters` under the test database, and 10,000 tables under the super table with 10,000 records written to each table. Note that if there is already a test database, this table is not used. Note that if there is already a test database, this command will delete it first and create a new test database. +When run without parameters, taosBenchmark connects to the TDengine cluster specified in `/etc/taos` by default and creates a database named `test`, a super table named `meters` under the test database, and 10,000 tables under the super table with 10,000 records written to each table. Note that if there is already a database named "test" this command will delete it first and create a new database. ### Run with command-line configuration parameters @@ -45,7 +45,7 @@ The `-f ` argument cannot be used when running taosBenchmark with com taosBenchmark -I stmt -n 200 -t 100 ``` -The above command, `taosBenchmark` will create a database named `test`, create a super table `meters` in it, create 100 sub-tables in the super table and insert 200 records for each sub-table using parameter binding. +Using the above command, `taosBenchmark` will create a database named `test`, create a super table `meters` in it, create 100 sub-tables in the super table and insert 200 records for each sub-table using parameter binding. ### Run with the configuration file @@ -95,10 +95,10 @@ taosBenchmark -f ## Command-line argument in detailed - **-f/--file ** : - specify the configuration file to use. This file includes All parameters. And users should not use this parameter with other parameters on the command-line. There is no default value. + specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value. - **-c/--config-dir ** : - specify the directory where the TDengine cluster configuration file. the default path is `/etc/taos`. + specify the directory where the TDengine cluster configuration file. The default path is `/etc/taos`. - **-h/--host ** : Specify the FQDN of the TDengine server to connect to. The default value is localhost. @@ -272,13 +272,13 @@ The parameters for creating super tables are configured in `super_tables` in the - **child_table_prefix** : The prefix of the child table name, mandatory configuration item, no default value. -- **escape_character**: specify the super table and child table names containing escape characters. By default is "no". The value can be "yes" or "no". +- **escape_character**: specify the super table and child table names containing escape characters. The value can be "yes" or "no". The default is "no". - **auto_create_table**: only when insert_mode is taosc, rest, stmt, and childtable_exists is "no". "yes" means taosBenchmark will automatically create non-existent tables when inserting data; "no" means that taosBenchmark will create all tables before inserting. -- **batch_create_tbl_num** : the number of tables per batch when creating sub-tables, default is 10. Note: the actual number of batches may not be the same as this value when the executed SQL statement is larger than the maximum length supported, it will be automatically truncated and re-executed to continue creating. +- **batch_create_tbl_num** : the number of tables per batch when creating sub-tables, default is 10. Note: the actual number of batches may not be the same as this value. If the executed SQL statement is larger than the maximum length supported, it will be automatically truncated and re-executed to continue creating. -- **data_source**: specify the source of data-generating. Default is taosBenchmark randomly generated. Users can configure it as "rand" and "sample". When "sample" is used, taosBenchmark will use the data in the file specified by the `sample_file` parameter. +- **data_source**: specify the source of data-generation. Default is taosBenchmark randomly generated. Users can configure it as "rand" and "sample". When "sample" is used, taosBenchmark will use the data in the file specified by the `sample_file` parameter. - **insert_mode**: insertion mode with options taosc, rest, stmt, sml, sml-rest, corresponding to normal write, restful interface write, parameter binding interface write, schemaless interface write, restful schemaless interface write (provided by taosAdapter). The default value is taosc. @@ -300,15 +300,15 @@ The parameters for creating super tables are configured in `super_tables` in the - **partial_col_num**: If this value is a positive number n, only the first n columns are written to, only if insert_mode is taosc and rest, or all columns if n is 0. -- **disorder_ratio** : Specifies the percentage probability of disordered data in the value range [0,50]. The default is 0, which means there is no disorder data. +- **disorder_ratio** : Specifies the percentage probability of disordered (i.e. out-of-order) data in the value range [0,50]. The default is 0, which means there is no disorder data. -- **disorder_range** : Specifies the timestamp fallback range for the disordered data. The generated disorder timestamp is the timestamp that should be used in the non-disorder case minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0. +- **disorder_range** : Specifies the timestamp fallback range for the disordered data. The disordered timestamp is generated by subtracting a random value in this range, from the timestamp that would be used in the non-disorder case. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0. -- **timestamp_step**: The timestamp step for inserting data in each child table, in units consistent with the `precision` of the database, the default value is 1. +- **timestamp_step**: The timestamp step for inserting data in each child table, in units consistent with the `precision` of the database. For e.g. if the `precision` is milliseconds, the timestamp step will be in milliseconds. The default value is 1. - **start_timestamp** : The timestamp start value of each sub-table, the default value is now. -- **sample_format**: The type of the sample data file, now only "csv" is supported. +- **sample_format**: The type of the sample data file; for now only "csv" is supported. - **sample_file**: Specify a CSV format file as the data source. It only works when data_source is a sample. If the number of rows in the CSV file is less than or equal to prepared_rand, then taosBenchmark will read the CSV file data cyclically until it is the same as prepared_rand; otherwise, taosBenchmark will read only the rows with the number of prepared_rand. The final number of rows of data generated is the smaller of the two. @@ -341,7 +341,7 @@ The configuration parameters for specifying super table tag columns and data col - **create_table_thread_count** : The number of threads to build the table, default is 8. -- **connection_pool_size** : The number of pre-established connections to the TDengine server. If not configured, it is the same number of threads specified. +- **connection_pool_size** : The number of pre-established connections to the TDengine server. If not configured, it is the same as number of threads specified. - **result_file** : The path to the result output file, the default value is . /output.txt. diff --git a/docs-en/14-reference/06-taosdump.md b/docs-en/14-reference/06-taosdump.md index a7e216398a183a096678d8d70c429606d4e5f809..5403e40925f633ce62795cc6037fc8c8f7aad07a 100644 --- a/docs-en/14-reference/06-taosdump.md +++ b/docs-en/14-reference/06-taosdump.md @@ -1,16 +1,17 @@ --- title: taosdump -description: "taosdump is a tool application that supports backing up data from a running TDengine cluster and restoring the backed up data to the same or another running TDengine cluster." +description: "taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster." --- ## Introduction -taosdump is a tool application that supports backing up data from a running TDengine cluster and restoring the backed up data to the same or another running TDengine cluster. +taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster. taosdump can back up a database, a super table, or a normal table as a logical data unit or backup data records in the database, super tables, and normal tables. When using taosdump, you can specify the directory path for data backup. If you do not specify a directory, taosdump will back up the data to the current directory by default. -Suppose the specified location already has data files. In that case, taosdump will prompt the user and exit immediately to avoid data overwriting which means that the same path can only be used for one backup. -Please be careful if you see a prompt for this. +If the specified location already has data files, taosdump will prompt the user and exit immediately to avoid data overwriting. This means that the same path can only be used for one backup. + +Please be careful if you see a prompt for this and please ensure that you follow best practices and relevant SOPs for data integrity, backup and data security. Users should not use taosdump to back up raw data, environment settings, hardware information, server configuration, or cluster topology. taosdump uses [Apache AVRO](https://avro.apache.org/) as the data file format to store backup data. @@ -30,7 +31,7 @@ There are two ways to install taosdump: 2. backup multiple specified databases: use `-D db1,db2,... ` parameters; 3. back up some super or normal tables in the specified database: use `-dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces. 4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter. -5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use This can reduce the backup data time and backup data footprint if table names, column names, and tag names do not use `escape character`. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters. +5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters. :::tip - taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema. @@ -58,7 +59,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...] or: taosdump [OPTION...] -i inpath or: taosdump [OPTION...] -o outpath - -h, --host=HOST Server host dumping data from. Default is + -h, --host=HOST Server host from which to dump data. Default is localhost. -p, --password User password to connect to server. Default is taosdata. @@ -71,10 +72,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...] -r, --resultFile=RESULTFILE DumpOut/In Result file path and name. -a, --allow-sys Allow to dump system database -A, --all-databases Dump all databases. - -D, --databases=DATABASES Dump inputted databases. Use comma to separate - databases' name. + -D, --databases=DATABASES Dump listed databases. Use comma to separate + database names. -N, --without-property Dump database without its properties. - -s, --schemaonly Only dump tables' schema. + -s, --schemaonly Only dump table schemas. -y, --answer-yes Input yes for prompt. It will skip data file checking! -d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy, @@ -97,7 +98,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...] and try. The workable value is related to the length of the row and type of table schema. -I, --inspect inspect avro file content and print on screen - -L, --loose-mode Using loose mode if the table name and column name + -L, --loose-mode Use loose mode if the table name and column name use letter and number only. Default is NOT. -n, --no-escape No escape char '`'. Default is using it. -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is diff --git a/docs-en/14-reference/07-tdinsight/index.md b/docs-en/14-reference/07-tdinsight/index.md index 16bae615c04ab92e4934418d6c0a3aaf1e1ccde8..cebfafa225e6e8de75ff84bb51fa664784177910 100644 --- a/docs-en/14-reference/07-tdinsight/index.md +++ b/docs-en/14-reference/07-tdinsight/index.md @@ -5,11 +5,11 @@ sidebar_label: TDinsight TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana]. -After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, etc., and also vnode, dnode, and mnode status, and exception alerts. Developers monitoring TDengine cluster operation status in real-time can be very convinient. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel through `TDinsight.sh` installation script. +After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, vnode, dnode, and mnode status, exception alerts and many other metrics. This is very convenient for developers who want to monitor TDengine cluster status in real-time. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel using the `TDinsight.sh` installation script. ## System Requirements -To deploy TDinsight, a single-node TDengine server or a multi-nodes TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`). +To deploy TDinsight, a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`). ## Installing Grafana @@ -17,7 +17,7 @@ We recommend using the latest [Grafana] version 7 or 8 here. You can install Gra ### Installing Grafana on Debian or Ubuntu -For Debian or Ubuntu operating systems, we recommend the Grafana image repository and Use the following command to install from scratch. +For Debian or Ubuntu operating systems, we recommend the Grafana image repository and using the following command to install from scratch. ```bash sudo apt-get install -y apt-transport-https @@ -71,7 +71,7 @@ chmod +x TDinsight.sh ./TDinsight.sh ``` -This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters from the command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications. +This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications. Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard. diff --git a/docs-en/14-reference/08-taos-shell.md b/docs-en/14-reference/08-taos-shell.md index 9bb5178300931e4b3808716badf06c85a4bbf396..002b515093258152e85dd9d7437e424dfa98c874 100644 --- a/docs-en/14-reference/08-taos-shell.md +++ b/docs-en/14-reference/08-taos-shell.md @@ -1,10 +1,10 @@ --- -title: TDengine Command Line (CLI) -sidebar_label: TDengine CLI +title: TDengine Command Line Interface (CLI) +sidebar_label: Command Line Interface description: Instructions and tips for using the TDengine CLI --- -The TDengine command-line application (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances. +The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances. ## Installation diff --git a/docs-en/14-reference/11-docker/index.md b/docs-en/14-reference/11-docker/index.md index f532a263d88def21bd8b0fe9c59adaf982ee2404..b7e60ab3e7f04a6078950977a563382a3524ebaa 100644 --- a/docs-en/14-reference/11-docker/index.md +++ b/docs-en/14-reference/11-docker/index.md @@ -13,7 +13,7 @@ The TDengine image starts with the HTTP service activated by default, using the docker run -d --name tdengine -p 6041:6041 tdengine/tdengine ``` -The above command starts a container named "tdengine" and maps the HTTP service end 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. +The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. ```shell curl -u root:taosdata -d "show databases" localhost:6041/rest/sql @@ -34,7 +34,7 @@ taos> show databases; Query OK, 1 row(s) in set (0.002843s) ``` -The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from containerized using TDengine CLI or various connectors in some complex scenarios. +The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various connectors for complex scenarios. ## Start TDengine on the host network @@ -42,7 +42,7 @@ The TDengine server running in the container uses the container's hostname to es docker run -d --name tdengine --network host tdengine/tdengine ``` -The above command starts TDengine on the host network and uses the host's FQDN to establish a connection instead of the container's hostname. It works too, like using `systemctl` to start TDengine on the host. If the TDengine client is already installed on the host, you can access it directly with the following command. +The above command starts TDengine on the host network and uses the host's FQDN to establish a connection instead of the container's hostname. It is the equivalent of using `systemctl` to start TDengine on the host. If the TDengine client is already installed on the host, you can access it directly with the following command. ```shell $ taos @@ -382,7 +382,7 @@ password: taosdata Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example: ```docker - ersion: "3" + version: "3" networks: inter: diff --git a/docs-en/14-reference/12-config/index.md b/docs-en/14-reference/12-config/index.md index 10e23bbdb85c1aa65ffa021d3d7a7fdaf7b77b09..8ad9a474a02c5cc52559ccdc5910ad9d7b6264ae 100644 --- a/docs-en/14-reference/12-config/index.md +++ b/docs-en/14-reference/12-config/index.md @@ -78,7 +78,7 @@ taos --dump-config | Note | REST service is provided by `taosd` before 2.4.0.0 but by `taosAdapter` after 2.4.0.0, the default port of REST service is 6041 | :::note -TDengine uses continuous 13 ports, both TCP and UDP, from the port specified by `serverPort`. These ports need to be kept open if firewall is enabled. Below table describes the ports used by TDengine in details. +TDengine uses 13 continuous ports, both TCP and UDP, starting with the port specified by `serverPort`. You should ensure, in your firewall rules, that these ports are kept open. Below table describes the ports used by TDengine in details. ::: @@ -197,7 +197,7 @@ TDengine uses continuous 13 ports, both TCP and UDP, from the port specified by | Default Value | TimeZone configured in the host | :::info -To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly. +To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly. On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. @@ -209,7 +209,7 @@ timezone Asia/Shanghai The above examples are all proper configuration for the timezone of UTC+8. On Windows system, however, `timezone Asia/Shanghai` is not supported, it must be set as `timezone UTC-8`. -The setting for timezone impacts the strings not in Unix timestamp, keywords or functions related to date/time, for example +The setting for timezone impacts strings that are not in Unix timestamp format and keywords or functions related to date/time. For example: ```sql SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08'; @@ -227,7 +227,7 @@ If the timezone is UTC, it's equal to SELECT count(*) FROM table_name WHERE TS<1554984068000; ``` -To avoid the problems of using time strings, Unix timestamp can be used directly. Furthermore, time strings with timezone can be used in SQL statement, for example "2013-04-12T15:52:01.123+08:00" in RFC3339 format or "2013-04-12T15:52:01.123+0800" in ISO-8601 format, they are not influenced by timezone setting when converted to Unix timestamp. +To avoid the problems of using time strings, Unix timestamp can be used directly. Furthermore, time strings with timezone can be used in SQL statements. For example "2013-04-12T15:52:01.123+08:00" in RFC3339 format or "2013-04-12T15:52:01.123+0800" in ISO-8601 format are not influenced by timezone setting when converted to Unix timestamp. ::: @@ -244,7 +244,7 @@ A specific type "nchar" is provided in TDengine to store non-ASCII characters su The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE. -The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. On Linux andMac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset. +The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset. ::: @@ -263,7 +263,7 @@ On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the cha locale zh_CN.UTF-8 ``` -Besides, on Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the one who comes later in the configuration file is used. +On a Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence. ```title="Effective charset is GBK" locale zh_CN.UTF-8 @@ -778,7 +778,7 @@ To prevent system resource from being exhausted by multiple concurrent streams, ## HTTP Parameters :::note -HTTP server had been provided by `taosd` prior to version 2.4.0.0, now is provided by `taosAdapter` after version 2.4.0.0. +HTTP service was provided by `taosd` prior to version 2.4.0.0 and is provided by `taosAdapter` after version 2.4.0.0. The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter](/reference/taosadapter/). ::: diff --git a/docs-en/14-reference/13-schemaless/13-schemaless.md b/docs-en/14-reference/13-schemaless/13-schemaless.md index ff0b2c51bd433788c593b6e20d4c341a9af7e921..acbbb1cd3c5a7c50e226644f2de9e0e77274c6dd 100644 --- a/docs-en/14-reference/13-schemaless/13-schemaless.md +++ b/docs-en/14-reference/13-schemaless/13-schemaless.md @@ -1,11 +1,11 @@ --- title: Schemaless Writing -description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data as it is written to the interface." +description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface." --- -In IoT applications, many data items are often collected for intelligent control, business analysis, device monitoring, etc. Due to the version upgrades of the application logic, or the hardware adjustment of the devices themselves, the data collection items may change frequently. To facilitate the data logging work in such cases, TDengine starting from version 2.2.0.0 provides a series of interfaces to the schemaless writing method, which eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. And when necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. +In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine starting from version 2.2.0.0, provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. -The schemaless writing method creates super tables and their corresponding subtables completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and lack readability. +The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability. ## Schemaless Writing Line Protocol @@ -76,8 +76,7 @@ If the subtable obtained by the parse line protocol does not exist, Schemaless c 8. Errors encountered throughout the processing will interrupt the writing process and return an error code. :::tip -All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed -16k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. +All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. ::: ## Time resolution recognition @@ -87,7 +86,7 @@ Three specified modes are supported in the schemaless writing process, as follow | **Serial** | **Value** | **Description** | | -------- | ------------------- | ------------------------------- | | 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol | -| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol | | 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol +| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol | | 3 | SML_JSON_PROTOCOL | JSON protocol format | In the SML_LINE_PROTOCOL parsing mode, the user is required to specify the time resolution of the input timestamp. The available time resolutions are shown in the following table. @@ -106,8 +105,11 @@ In SML_TELNET_PROTOCOL and SML_JSON_PROTOCOL modes, the time precision is determ ## Data schema mapping rules -This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped to -The tag name in tag_set is the name of the tag in the data schema, and the name in field_set is the column's name. The following data is used as an example to illustrate the mapping rules. +This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped as follows: +- The tag name in tag_set is the name of the tag in the data schema +- The name in field_set is the column's name. + +The following data is used as an example to illustrate the mapping rules. ```json st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 @@ -139,7 +141,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000 st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000 ``` -The first line of the line protocol parsing will declare column c5 is a BINARY(4) field, the second line data write will extract column c5 is still a BINARY column. Still, its width is 6, then you need to increase the width of the BINARY field to be able to accommodate the new string. +The first line of the line protocol parsing will declare column c5 is a BINARY(4) field. The second line data write will parse column c5 as a BINARY column. But in the second line, c5's width is 6 so you need to increase the width of the BINARY field to be able to accommodate the new string. ```json st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000 diff --git a/docs-en/14-reference/_collectd.mdx b/docs-en/14-reference/_collectd.mdx index 1f57d883eec9feadc3cc460bf968b0dd43fedfe8..ce88328098a181de48dcaa080ef45f228b20bf1c 100644 --- a/docs-en/14-reference/_collectd.mdx +++ b/docs-en/14-reference/_collectd.mdx @@ -25,7 +25,7 @@ The default database name written by taosAdapter is `collectd`. You can also mod #collectd collectd uses a plugin mechanism to write the collected monitoring data to different data storage software in various forms. tdengine supports both direct collection plugins and write_tsdb plugins. -#### is configured to receive data from the direct collection plugin +#### Configure the direct collection plugin Modify the relevant configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf). @@ -62,7 +62,7 @@ LoadPlugin write_tsdb ``` -Where fills in the server's domain name or IP address running taosAdapter. Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047). +Where is the domain name or IP address of the server running taosAdapter. Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047). ```text LoadPlugin write_tsdb diff --git a/docs-en/14-reference/_tcollector.mdx b/docs-en/14-reference/_tcollector.mdx index 85794d54007b70acf205b1bbc897cec1d0c4f824..42b021410e3862c4fa328d8dae40dcac1456e929 100644 --- a/docs-en/14-reference/_tcollector.mdx +++ b/docs-en/14-reference/_tcollector.mdx @@ -17,7 +17,7 @@ password = "taosdata" ... ``` -The taosAdapter writes to the database with the default name `tcollector`. You can also modify the taosAdapter configuration file dbs entry to specify a different name. user and password fill in the actual TDengine configuration values. After changing the configuration file, you need to restart the taosAdapter. +The taosAdapter writes to the database with the default name `tcollector`. You can also modify the taosAdapter configuration file dbs entry to specify a different name. Fill in the actual user and password for TDengine. After changing the configuration file, you need to restart the taosAdapter. - You can also enable taosAdapter to receive tcollector data by using the taosAdapter command-line parameters or setting environment variables. @@ -25,7 +25,7 @@ The taosAdapter writes to the database with the default name `tcollector`. You c To use TCollector, you need to download its [source code](https://github.com/OpenTSDB/tcollector). Its configuration items are in its source code. Note: TCollector differs significantly from version to version, so here is an example of the latest code for the current master branch (git commit: 37ae920). -Modify the contents of the `collectors/etc/config.py` and `tcollector.py` files. Change the address of the OpenTSDB host to the domain name or IP address of the server where taosAdapter is deployed, and change the port to the port that taosAdapter supports TCollector on (default is 6049). +Modify the contents of the `collectors/etc/config.py` and `tcollector.py` files. Change the address of the OpenTSDB host to the domain name or IP address of the server where taosAdapter is deployed, and change the port to the port on which taosAdapter supports TCollector (default is 6049). Example of git diff output of source code changes. diff --git a/docs-en/14-reference/index.md b/docs-en/14-reference/index.md index 89f675902d01ba2d2c1b322408c372429d6bda1c..f350eebfc1a1ca2feaedc18c4b4fa798742e31b4 100644 --- a/docs-en/14-reference/index.md +++ b/docs-en/14-reference/index.md @@ -2,11 +2,11 @@ title: Reference --- -The reference guide is the detailed introduction to TDengine, various TDengine's connectors in different languages, and the tools that come with it. +The reference guide is a detailed introduction to TDengine including various TDengine connectors in different languages, and the tools that come with TDengine. ```mdx-code-block import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-en/20-third-party/01-grafana.mdx b/docs-en/20-third-party/01-grafana.mdx index dc2033ae6f789908d4d9f9ecd96c9396748c4400..b3cab6271001feb56714f808906cb78ba1098593 100644 --- a/docs-en/20-third-party/01-grafana.mdx +++ b/docs-en/20-third-party/01-grafana.mdx @@ -3,13 +3,13 @@ sidebar_label: Grafana title: Grafana --- -TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a DashBoard. +TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard. You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md). ## Prerequisites -In order for Grafana to add the TDengine data source successfully, the following preparations are required: +In order for Grafana to add the TDengine data source successfully, the following preparation is required: 1. The TDengine cluster is deployed and functioning properly 2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details. @@ -36,7 +36,7 @@ GF_VERSION=3.1.4 wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip ``` -Take CentOS 7.2 for example, extract the plugin package to /var/lib/grafana/plugins directory, and restart grafana. +In CentOS 7.2 for example, extract the plugin package to /var/lib/grafana/plugins directory, and restart grafana. ```bash sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/ @@ -76,13 +76,13 @@ Enter the datasource configuration page, and follow the default prompts to modif - User: TDengine user name. - Password: TDengine user password. -Click `Save & Test` to test. Follows are a success. +Click `Save & Test` to test. You should see a success message if the test worked. ![TDengine Database TDinsight plugin add database 4](./grafana/add_datasource4.webp) ### Create Dashboard -Go back to the main interface to create the Dashboard, click Add Query to enter the panel query page: +Go back to the main interface to create a dashboard and click Add Query to enter the panel query page: ![TDengine Database TDinsight plugin create dashboard 1](./grafana/create_dashboard1.webp) diff --git a/docs-en/20-third-party/03-telegraf.md b/docs-en/20-third-party/03-telegraf.md index 0d563c9ff36268ac27e18e21fefed789789dc1a7..6a7aac322f9def880f58d7ed0adcc4a8f3687ed1 100644 --- a/docs-en/20-third-party/03-telegraf.md +++ b/docs-en/20-third-party/03-telegraf.md @@ -5,7 +5,7 @@ title: Telegraf writing import Telegraf from "../14-reference/_telegraf.mdx" -Telegraf is a viral metrics collection open-source software. Telegraf can collect the operation information of various components without writing any scripts to collect regularly, reducing the difficulty of data acquisition. +Telegraf is a viral, open-source, metrics collection software. Telegraf can collect the operation information of various components without having to write any scripts to collect regularly, reducing the difficulty of data acquisition. Telegraf's data can be written to TDengine by simply adding the output configuration of Telegraf to the URL corresponding to taosAdapter and modifying several configuration items. The presence of Telegraf data in TDengine can take advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data. diff --git a/docs-en/20-third-party/05-collectd.md b/docs-en/20-third-party/05-collectd.md index 609e55842ab35cdc2d394663f5450f908e49f7f7..db62f2ecd1afb4936466ca0243a7e14ff294f8b6 100644 --- a/docs-en/20-third-party/05-collectd.md +++ b/docs-en/20-third-party/05-collectd.md @@ -6,7 +6,7 @@ title: collectd writing import CollectD from "../14-reference/_collectd.mdx" -collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics number while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load. +collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load. You can write the data collected by collectd to TDengine by simply modifying the configuration of collectd to the domain name (or IP address) and corresponding port of the server running taosAdapter. It can take full advantage of TDengine's efficient storage query performance and clustering capability for time-series data. diff --git a/docs-en/20-third-party/06-statsd.md b/docs-en/20-third-party/06-statsd.md index bf4b6c7ab5dac4114cad0d650b2aeb026a67581c..40e927b9fd1d2eca9d454a987ac51d533eb75005 100644 --- a/docs-en/20-third-party/06-statsd.md +++ b/docs-en/20-third-party/06-statsd.md @@ -7,7 +7,7 @@ import StatsD from "../14-reference/_statsd.mdx" StatsD is a simple daemon for aggregating application metrics, which has evolved rapidly in recent years into a unified protocol for collecting application performance metrics. -You can write StatsD data to TDengine by simply modifying in the configuration file of StatsD with the domain name (or IP address) of the server running taosAdapter and the corresponding port. It can take full advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data. +You can write StatsD data to TDengine by simply modifying the configuration file of StatsD with the domain name (or IP address) of the server running taosAdapter and the corresponding port. It can take full advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data. ## Prerequisites diff --git a/docs-en/20-third-party/07-icinga2.md b/docs-en/20-third-party/07-icinga2.md index ba9cde8cea7504ac9df871d5f6aa42cc5c94d895..b27196dfe313b468eeb73ff4b114d9d955618c3e 100644 --- a/docs-en/20-third-party/07-icinga2.md +++ b/docs-en/20-third-party/07-icinga2.md @@ -5,7 +5,7 @@ title: icinga2 writing import Icinga2 from "../14-reference/_icinga2.mdx" -icinga2 is an open-source software monitoring host and network initially developed from the Nagios network monitoring application. Currently, icinga2 is distributed under the GNU GPL v2 license. +icinga2 is an open-source, host and network monitoring software initially developed from the Nagios network monitoring application. Currently, icinga2 is distributed under the GNU GPL v2 license. You can write the data collected by icinga2 to TDengine by simply modifying the icinga2 configuration to point to the taosAdapter server and the corresponding port, taking advantage of TDengine's efficient storage and query performance and clustering capabilities for time-series data. diff --git a/docs-en/20-third-party/09-emq-broker.md b/docs-en/20-third-party/09-emq-broker.md index 738372cabd736c0be47b4080cc2c984e5110236c..d3eafebc14e8ddc29b03abf8785a6c0a013ef014 100644 --- a/docs-en/20-third-party/09-emq-broker.md +++ b/docs-en/20-third-party/09-emq-broker.md @@ -3,7 +3,7 @@ sidebar_label: EMQX Broker title: EMQX Broker writing --- -MQTT is a popular IoT data transfer protocol, [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software, you can write MQTT data directly to TDengine without any code, you only need to use "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending it to web services and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.). +MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software. You can write MQTT data directly to TDengine without any code. You only need to setup "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending data to a web service and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.). ## Prerequisites diff --git a/docs-en/20-third-party/11-kafka.md b/docs-en/20-third-party/11-kafka.md index 9c78a6645a0578d3b8d494d1fa60831eb88b3c81..6720af8bf81ea2f4fce415a54847453f578ababf 100644 --- a/docs-en/20-third-party/11-kafka.md +++ b/docs-en/20-third-party/11-kafka.md @@ -7,7 +7,7 @@ TDengine Kafka Connector contains two plugins: TDengine Source Connector and TDe ## What is Kafka Connect? -Kafka Connect is a component of Apache Kafka that enables other systems, such as databases, cloud services, file systems, etc., to connect to Kafka easily. Data can flow from other software to Kafka via Kafka Connect and Kafka to other systems via Kafka Connect. Plugins that read data from other software are called Source Connectors, and plugins that write data to other software are called Sink Connectors. Neither Source Connector nor Sink Connector will directly connect to Kafka Broker, and Source Connector transfers data to Kafka Connect. Sink Connector receives data from Kafka Connect. +Kafka Connect is a component of [Apache Kafka](https://kafka.apache.org/) that enables other systems, such as databases, cloud services, file systems, etc., to connect to Kafka easily. Data can flow from other software to Kafka via Kafka Connect and Kafka to other systems via Kafka Connect. Plugins that read data from other software are called Source Connectors, and plugins that write data to other software are called Sink Connectors. Neither Source Connector nor Sink Connector will directly connect to Kafka Broker, and Source Connector transfers data to Kafka Connect. Sink Connector receives data from Kafka Connect. ![TDengine Database Kafka Connector -- Kafka Connect](kafka/Kafka_Connect.webp) @@ -17,7 +17,7 @@ TDengine Source Connector is used to read data from TDengine in real-time and se ## What is Confluent? -Confluent adds many extensions to Kafka. include: +[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include: 1. Schema Registry 2. REST Proxy @@ -79,10 +79,10 @@ Development: false git clone https://github.com:taosdata/kafka-connect-tdengine.git cd kafka-connect-tdengine mvn clean package -unzip -d $CONFLUENT_HOME/share/confluent-hub-components/ target/components/packages/taosdata-kafka-connect-tdengine-0.1.0.zip +unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip ``` -The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to the path where the plugin is installed. The path to install the plugin is in the configuration file `$CONFLUENT_HOME/etc/kafka/connect-standalone.properties`. The default path is `$CONFLUENT_HOME/share/confluent-hub-components/`. +The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path. ### Install with confluent-hub @@ -96,7 +96,7 @@ confluent local services start ``` :::note -Be sure to install the plugin before starting Confluent. Otherwise, there will be a class not found error. The log of Kafka Connect (default path: /tmp/confluent.xxxx/connect/logs/connect.log) will output the successfully installed plugin, which users can use to determine whether the plugin is installed successfully. +Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins. ::: :::tip @@ -123,6 +123,59 @@ Control Center is [UP] To clear data, execute `rm -rf /tmp/confluent.106668`. ::: +### Check Confluent Services Status + +Use command bellow to check the status of all service: + +``` +confluent local services status +``` + +The expected output is: +``` +Connect is [UP] +Control Center is [UP] +Kafka is [UP] +Kafka REST is [UP] +ksqlDB Server is [UP] +Schema Registry is [UP] +ZooKeeper is [UP] +``` + +### Check Successfully Loaded Plugin + +After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully: +``` +confluent local services connect plugin list +``` + +The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow: + +``` +Available Connect Plugins: +[ + { + "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "type": "sink", + "version": "1.0.0" + }, + { + "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector", + "type": "source", + "version": "1.0.0" + }, +...... +``` + +If not, please check the log file of Kafka Connect. To view the log file path, please execute: + +``` +echo `cat /tmp/confluent.current`/connect/connect.stdout +``` +It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout` + +Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`. + ## The use of TDengine Sink Connector The role of the TDengine Sink Connector is to synchronize the data of the specified topic to TDengine. Users do not need to create databases and super tables in advance. The name of the target database can be specified manually (see the configuration parameter connection.database), or it can be generated according to specific rules (see the configuration parameter connection.database.prefix). @@ -142,7 +195,7 @@ vi sink-demo.properties sink-demo.properties' content is following: ```ini title="sink-demo.properties" -name=tdengine-sink-demo +name=TDengineSinkConnector connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector tasks.max=1 topics=meters @@ -151,6 +204,7 @@ connection.user=root connection.password=taosdata connection.database=power db.schemaless=line +data.precision=ns key.converter=org.apache.kafka.connect.storage.StringConverter value.converter=org.apache.kafka.connect.storage.StringConverter ``` @@ -177,6 +231,7 @@ If the above command is executed successfully, the output is as follows: "connection.url": "jdbc:TAOS://127.0.0.1:6030", "connection.user": "root", "connector.class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "data.precision": "ns", "db.schemaless": "line", "key.converter": "org.apache.kafka.connect.storage.StringConverter", "tasks.max": "1", @@ -221,10 +276,10 @@ Database changed. taos> select * from meters; ts | current | voltage | phase | groupid | location | =============================================================================================================================================================== - 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LoSangeles | - 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LoSangeles | - 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LoSangeles | - 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LoSangeles | + 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles | Query OK, 4 row(s) in set (0.004208s) ``` @@ -356,6 +411,7 @@ The following configuration items apply to TDengine Sink Connector and TDengine 4. `max.retries`: The maximum number of retries when an error occurs. Defaults to 1. 5. `retry.backoff.ms`: The time interval for retry when sending an error. The unit is milliseconds. The default is 3000. 6. `db.schemaless`: Data format, could be one of `line`, `json`, and `telnet`. Represent InfluxDB line protocol format, OpenTSDB JSON format, and OpenTSDB Telnet line protocol format. +7. `data.precision`: The time precision when use InfluxDB line protocol format data, could be one of `ms`, `us` and `ns`. The default is `ns`. ### TDengine Source Connector specific configuration @@ -366,7 +422,13 @@ The following configuration items apply to TDengine Sink Connector and TDengine 5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database. Default is 100. 6. `out.format`: The data format. The value could be line or json. The line represents the InfluxDB Line protocol format, and json represents the OpenTSDB JSON format. Default is `line`. -## feedback + +## Other notes + +1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually. +2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect. + +## Feedback https://github.com/taosdata/kafka-connect-tdengine/issues diff --git a/docs-en/21-tdinternal/01-arch.md b/docs-en/21-tdinternal/01-arch.md index 16d4b7afe26107e251a542ee24b644c1d372def0..4d8bed4d2d6b3a0404e10213aeab599767325cc2 100644 --- a/docs-en/21-tdinternal/01-arch.md +++ b/docs-en/21-tdinternal/01-arch.md @@ -5,11 +5,11 @@ title: Architecture ## Cluster and Primary Logic Unit -The design of TDengine is based on the assumption that any hardware or software system is not 100% reliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, TDengine has been designed in a distributed and high-reliability architecture since day one of the development, so that hardware failure or software failure of any single even multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware resources significantly. +The design of TDengine is based on the assumption that any hardware or software system is not 100% reliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, since day one, TDengine has been designed as a natively distributed system, with high-reliability architecture. Hardware failure or software failure of a single, or even multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware resource needs, significantly. ### Primary Logic Unit -Logical structure diagram of TDengine distributed architecture as following: +Logical structure diagram of TDengine's distributed architecture is as follows: ![TDengine Database architecture diagram](structure.webp)
Figure 1: TDengine architecture diagram
@@ -18,25 +18,25 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc **Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine, or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please check [wikipedia](https://en.wikipedia.org/wiki/Fully_qualified_domain_name). -**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node. A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE), zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes. +**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node (pnode). A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE) and zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes. -**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node. +**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node. -**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction. +**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction. -**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. +**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. -**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster. +**TAOSC**: TAOSC is the driver provided by TDengine to applications. It is responsible for dealing with the interaction between application and cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster. ### Node Communication -**Communication mode**: The communication among each data node of TDengine system, and among the client driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation. +**Communication mode**: The communication among each data node of TDengine system, and among the client driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digitally sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation. **FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter “fqdn”. If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter “fqdn” of the node to its IP address. However, IP is not recommended because IP address may be changed, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the DNS service is running, or hosts files on nodes are configured properly. **Port configuration**: The external port of a data node is determined by the system configuration parameter “serverPort” in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. To run the system, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort. -**Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted. +**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted. **Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: @@ -44,11 +44,13 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc 2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; 3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again. -**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server-side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. +**The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. -**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"; Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the detailed user tutorial for detailed steps. In this way, the cluster will be established step by step. +**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. +- Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode" +- Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step. -**Redirection**: No matter about dnode or TAOSC, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not a mnode by self, it will reply to the mnode EP List back. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes. +**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not an mnode itself, it will reply to the mnode with the EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes. ### A Typical Data Writing Process @@ -58,17 +60,17 @@ To explain the relationship between vnode, mnode, TAOSC and application and thei
Figure 2: Typical process of TDengine
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs. -2. TAOSC checks if meta data existing for the table in the cache. If so, go straight to Step 4. If not, TAOSC sends a get meta-data request to mnode. +2. TAOSC checks the cache to see if meta data exists for the table. If it does, it goes straight to Step 4. If not, TAOSC sends a get meta-data request to mnode. 3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode. 4. TAOSC initiates an insert request to master vnode. 5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup. 6. TAOSC notifies APP that writing is successful. -For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will inform the mnode EP list in a reply message, so that TAOSC will re-issue a request to obtain meta-data to the EP of another new mnode. +For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to obtain meta-data to the EP of another mnode. -For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target where TAOSC shall send a request to. Once the reply of successful insertion is obtained, TAOSC will cache the information of master node. +For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of master node. -The above is the process of inserting data, and the processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications. +The above describes the process of inserting data. The processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications. Through TAOSC caching mechanism, mnode needs to be accessed only when a table is accessed for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), TAOSC will interact with mnode regularly to automatically update the cache. @@ -76,24 +78,24 @@ Through TAOSC caching mechanism, mnode needs to be accessed only when a table is ### Storage Model -The data stored by TDengine include collected time-series data, metadata related to database and tables, tag data, etc. These data are specifically divided into three parts: +The data stored by TDengine includes collected time-series data, metadata related to database and tables, tag data, etc. All of the data is specifically divided into three parts: -- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database “update” parameter is set to 1. By adopting the model with **one table for each data collection point**, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single data collection point with the best performance. -- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the tag filtering results will return in milliseconds. -- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of create, delete, update and read are supported. The amount of these data are not large and can be stored in memory, moreover, the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck. +- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database “update” parameter is set to 1. By adopting the model with **one table for each data collection point**, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the best performance for both insert and query operations of a single data collection point. +- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even with millions of tables, the tag filtering results will return in milliseconds. +- Metadata: stored in mnode and includes system node, user, DB, table schema and other information. Four standard operations of create, delete, update and read are supported. The amount of this data is not large and can be stored in memory. Moreover, the number of queries is not large because of client cache. Even though TDengine uses centralized storage management, because of the architecture, there is no performance bottleneck. -Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately, which has two major advantages: +Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately. This has two major advantages: -- Reduce the redundancy of tag data storage significantly: general NoSQL database or time-series database adopts K-V storage, in which Key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is extremely expensive to operate. -- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds out the tables which satisfy the filtering conditions, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. +- Reduces the redundancy of tag data storage significantly. General NoSQL database or time-series database adopts K-V (key-value) storage, in which the key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is an extremely expensive operation. +- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds the tables which satisfy the filtering conditions, and then finds the corresponding data blocks of these tables. This greatly reduces the data sets to be scanned which in turn improves the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. ### Data Sharding -For large-scale data management, to achieve scale-out, it is generally necessary to adopt the Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for a time range. +For large-scale data management, to achieve scale-out, it is generally necessary to adopt a Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for a time range. VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application. -For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G), so TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores. +For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores. When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes. @@ -101,23 +103,23 @@ The meta data of each table (including schema, tags, etc.) is also stored in vno ### Data Partitioning -In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter `“days”`. This method of partitioning by time rang is also convenient to efficiently implement the data retention policy. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the tiered-storage. Cold/hot data can be stored in different storage media to reduce the storage cost. +In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by the database configuration parameter `“days”`. This method of partitioning by time range is also convenient to efficiently implement data retention policies. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate tiered-storage. Cold/hot data can be stored in different storage media to significantly reduce storage costs. In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability. ### Load Balancing -Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node), so mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected. +Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) so that the mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected. -If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. When offline lasts a certain period of time (configured by parameter `“offlineThreshold”`), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number. +If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. If the dnode stays offline beyond the time configured by parameter `“offlineThreshold”`, the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number. -When new data nodes are added to the cluster, with new computing and storage resources are added, the system will automatically start the load balancing process. +When new data nodes are added to the cluster, with new computing and storage resources, the system will automatically start the load balancing process. The load balancing process does not require any manual intervention, and it is transparent to the application. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.** ## Data Writing and Replication Process -If a database has N replicas, thus a virtual node group has N virtual nodes, but only one as Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect. +If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect. ### Master vnode Writing Process @@ -130,7 +132,7 @@ Master Vnode uses a writing process as follows: 2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; 3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data; 4. Write into memory and add the record to “skip list”; -5. Master vnode returns a confirmation message to the application, indicating a successful writing. +5. Master vnode returns a confirmation message to the application, indicating a successful write. 6. If any of Step 2, 3 or 4 fails, the error will directly return to the application. ### Slave vnode Writing Process @@ -146,19 +148,19 @@ For a slave vnode, the write process as follows: Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same. -### Remote Disaster Recovery and IDC Migration +### Remote Disaster Recovery and IDC (Internet Data Center) Migration -As above Master and Slave processes discussed, TDengine adopts asynchronous replication for data synchronization. This method can greatly improve the writing performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. +As discussed above, TDengine writes using Master and Slave processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. -On the other hand, TDengine supports dynamic modification of the replicas number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization completed, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. +On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. -However, the asynchronous replication has a tiny time window where data can be lost. The specific scenario is as follows: +However, the asynchronous replication has a very low probability scenario where data may be lost. The specific scenario is as follows: -1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then went down; +1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down; 2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2; 3. Slave vnode will become the new master, thus losing one record. -In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before. +In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this is an extremely low probability scenario as described above. Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet** @@ -171,43 +173,43 @@ When a vnode starts, the roles (master, slave) are uncertain, and the data is in 1. If there’s only one replica, it’s always master 2. When all replicas are online, the one with latest version is master 3. Over half of online nodes are virtual nodes, and some virtual node is slave, it will automatically become master -4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master +4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master. ### Synchronous Replication For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application. -With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication. +With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistency, the default for data synchronization between mnodes is synchronous replication. ## Caching and Persistence ### Caching -TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly put the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the newly generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer. +TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly puts the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the most recently generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer. -TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data cache by setting appropriate configuration parameters without deploying Redis or other additional cache systems**, which can effectively simplify the system architecture and reduce the operation costs. It should be noted that after the TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer as so in a proprietary key-value cache system. +TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data cache by setting appropriate configuration parameters without deploying Redis or other additional cache systems**. This can effectively simplify the system architecture and reduce operational costs. It should be noted that after TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer. In this sense, TDengine's cache differs from proprietary key-value cache systems. Each vnode has its own independent memory, and it is composed of multiple memory blocks of fixed size, and different vnodes are completely isolated. When writing data, similar to the writing of logs, data is sequentially added to memory, but each vnode maintains its own skip list for quick search. When more than one third of the memory block are used, the disk writing operation will start, and the subsequent writing operation is carried out in a new memory block. By this design, one third of the memory blocks in a vnode keep the latest data, so as to achieve the purpose of caching and quick search. The number of memory blocks of a vnode is determined by the configuration parameter “blocks”, and the size of memory blocks is determined by the configuration parameter “cache”. ### Persistent Storage -TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will also pull up the disk-writing thread to write the cached data into persistent storage in order not to block subsequent data writing. TDengine will open a new database log file when the data is written, and delete the old database log file after written successfully to avoid unlimited log growth. +TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will pull up the disk-writing thread to write the cached data into persistent storage so that subsequent data writing is not blocked. TDengine will open a new database log file when the data is written, and delete the old database log file after successfull persistence, to avoid unlimited log growth. -To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `“days”`. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations. +To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `“days”`. Thus for given start and end dates of a query, you can locate the data files to open immediately without any index. This greatly speeds up read operations. For time-series data, there is generally a retention policy, which is determined by the system configuration parameter `“keep”`. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space. Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set. -In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `“maxRows”` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. +In each data file, the data of a table is stored in blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `“maxRows”` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, data location for queries will take a longer tim. If it is too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. -Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `“minRows”` (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file. +Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information which allows the system to locate the data to be found very quickly. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `“minRows”` (minimum number of records per block), it will be stored in the last file first. At the next write operation to the disk, the newly written records will be merged with the records in last file and then written into data file. -When data is written to disk, it is decided whether to compress the data according to system configuration parameter `“comp”`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. +When data is written to disk, the system decideswhether to compress the data based on the system configuration parameter `“comp”`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. ### Tiered Storage -By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data for more than one week is stored on local hard disk, and the data for more than four weeks is stored on network storage device, thus reducing the storage cost and ensuring efficient data access. The movement of data on different storage media is automatically done by the system and completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”. +By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”. dataDir format is as follows: ``` @@ -216,7 +218,7 @@ dataDir data_path [tier_level] Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data. -Suppose a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows: +Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows: ``` dataDir /mnt/disk1/taos @@ -233,11 +235,11 @@ Note: Tiered Storage is only supported in Enterprise Edition ## Data Query -TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. The query processing of TDengine needs the collaboration of client, vnode and mnode. +TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. Query processing in TDengine needs the collaboration of client, vnode and mnode. ### Single Table Query -The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then request metadata information (table metadata) for the table specified in the query from management node (mnode). +The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then metadata information (table metadata) for the table specified is requested in the query from management node (mnode). According to the End Point information in metadata information, the query request is serialized and sent to the data node (dnode) where the table is located. After receiving the query, the dnode identifies the virtual node (vnode) pointed to and forwards the message to the query execution queue of the vnode. The query execution thread of vnode establishes the basic query execution environment, immediately returns the query request and starts executing the query at the same time. @@ -245,9 +247,9 @@ When client obtains query result, the worker thread in query execution queue of ### Aggregation by Time Axis, Downsampling, Interpolation -The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and distinct feature from common databases. From this point of view, it is similar to the window query of stream computing engine. +Time-series data is different from ordinary data in that each record has a timestamp. So aggregating data by timestamps on the time axis is an important and distinct feature of time-series databases which is different from that of common databases. It is similar to the window query of stream computing engines. -The keyword `interval` is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated based on time windows, and the data within window range are aggregated as needed. For example: +The keyword `interval` is introduced into TDengine to split fixed length time windows on the time axis. The data is aggregated based on time windows, and the data within time window ranges is aggregated as needed. For example: ```mysql select count(*) from d1001 interval(1h); @@ -265,7 +267,7 @@ For the data collected by device D1001, the number of records per hour is counte ### Multi-table Aggregation Query -TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure: +TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure: ![TDengine Database Diagram of multi-table aggregation query](multi_tables.webp)
Figure 5: Diagram of multi-table aggregation query
@@ -274,12 +276,12 @@ TDengine creates a separate table for each data collection point, but in practic 2. TAOSC sends the STable name to Meta Node(management node); 3. Management node sends the vnode list owned by the STable back to TAOSC; 4. TAOSC sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes; -5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC; +5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC; 6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application. -Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. +Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. ### Precomputation -In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the index BRIN (Block Range Index) of PostgreSQL. +In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the BRIN (Block Range Index) of PostgreSQL. diff --git a/docs-en/25-application/01-telegraf.md b/docs-en/25-application/01-telegraf.md index 6a57145cd3d82ca5ec1ab828bfc7b6270bbe9d47..d30a23fe1b942e1411e8b5f1320e1c54ae2b407f 100644 --- a/docs-en/25-application/01-telegraf.md +++ b/docs-en/25-application/01-telegraf.md @@ -5,16 +5,16 @@ title: Quickly Build IT DevOps Visualization System with TDengine + Telegraf + G ## Background -TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telematics, Industrial Internet, IT DevOps, etc. by TAOSData. Since it opened its source code in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. +TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telemetry, Industrial Internet, IT DevOps and other applications. Since it was open-sourced in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. IT DevOps metric data usually are time sensitive, for example: - System resource metrics: CPU, memory, IO, bandwidth, etc. - Software system metrics: health status, number of connections, number of requests, number of timeouts, number of errors, response time, service type, and other business-related metrics. -Current mainstream IT DevOps system usually include a data collection module, a data persistent module, and a visualization module; Telegraf and Grafana are one of the most popular data collection modules and visualization modules, respectively. The data persistent module is available in a wide range of options, with OpenTSDB or InfluxDB being the most popular. TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. +Current mainstream IT DevOps system usually include a data collection module, a data persistent module, and a visualization module; Telegraf and Grafana are one of the most popular data collection modules and visualization modules, respectively. The data persistence module is available in a wide range of options, with OpenTSDB or InfluxDB being the most popular. TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. -This article introduces how to quickly build a TDengine + Telegraf + Grafana based IT DevOps visualization system without writing even a single line of code and by simply modifying a few lines of configuration files. The architecture is as follows. +This article introduces how to quickly build a TDengine + Telegraf + Grafana based IT DevOps visualization system without writing even a single line of code and by simply modifying a few lines in configuration files. The architecture is as follows. ![TDengine Database IT-DevOps-Solutions-Telegraf](./IT-DevOps-Solutions-Telegraf.webp) @@ -79,5 +79,5 @@ Click on the plus icon on the left and select `Import` to get the data from `htt ## Wrap-up -The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and the powerful ecological software adaptation capability, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes. +The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes. Please refer to the official documentation and product implementation cases for other features. diff --git a/docs-en/25-application/02-collectd.md b/docs-en/25-application/02-collectd.md index 963881eafa6e5085eab951c1b1ab54faeba1fa7b..1733ed1b1af8c9375c3773d1ca86831396499a78 100644 --- a/docs-en/25-application/02-collectd.md +++ b/docs-en/25-application/02-collectd.md @@ -5,17 +5,17 @@ title: Quickly build an IT DevOps visualization system using TDengine + collectd ## Background -TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telematics, Industrial Internet, IT DevOps, etc. by TAOSData. Since it opened its source code in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. +TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telemetry, Industrial Internet, IT DevOps and other applications. Since it was open-sourced in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. IT DevOps metric data usually are time sensitive, for example: - System resource metrics: CPU, memory, IO, bandwidth, etc. - Software system metrics: health status, number of connections, number of requests, number of timeouts, number of errors, response time, service type, and other business-related metrics. -The current mainstream IT DevOps visualization system usually contains a data collection module, a data persistent module, and a visual display module. collectd/StatsD, as an old-fashion open source data collection tool, has a wide user base. However, collectd/StatsD has limited functionality, and often needs to be combined with Telegraf, Grafana, and a time-series database to build a complete monitoring system. +The current mainstream IT DevOps visualization system usually contains a data collection module, a data persistence module, and a visual display module. collectd/StatsD, as an old-fashion open source data collection tool, has a wide user base. However, collectd/StatsD has limited functionality, and often needs to be combined with Telegraf, Grafana, and a time-series database to build a complete monitoring system. The new version of TDengine supports multiple data protocols and can accept data from collectd and StatsD directly, and provides Grafana dashboard for graphical display. -This article introduces how to quickly build an IT DevOps visualization system based on TDengine + collectd / StatsD + Grafana without writing even a single line of code but by simply modifying a few lines of configuration files. The architecture is shown in the following figure. +This article introduces how to quickly build an IT DevOps visualization system based on TDengine + collectd / StatsD + Grafana without writing even a single line of code but by simply modifying a few lines in configuration files. The architecture is shown in the following figure. ![TDengine Database IT-DevOps-Solutions-Collectd-StatsD](./IT-DevOps-Solutions-Collectd-StatsD.webp) @@ -99,6 +99,6 @@ Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob ## Wrap-up -TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing function in TDengine version 2.4.0.0 and the powerful ecological software adaptation capability, users can build an efficient and easy-to-use IT DevOps visualization system or adapt to an existing system in just a few minutes. +TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system, or adapt an existing system, in just a few minutes. For TDengine's powerful data writing and querying performance and other features, please refer to the official documentation and successful product implementation cases. diff --git a/docs-en/25-application/03-immigrate.md b/docs-en/25-application/03-immigrate.md index 69166bf78b66a23af35af726f2e5c477195a3595..4d47aec1d76014ba63f6be91004abcc3934769f7 100644 --- a/docs-en/25-application/03-immigrate.md +++ b/docs-en/25-application/03-immigrate.md @@ -3,10 +3,9 @@ sidebar_label: OpenTSDB Migration to TDengine title: Best Practices for Migrating OpenTSDB Applications to TDengine --- -As a distributed, scalable, HBase-based distributed time-series database software, thanks to its first-mover advantage, OpenTSDB has been introduced and widely used in DevOps by people. However, using new technologies like cloud computing, microservices, and containerization technology with rapid development. Enterprise-level services are becoming more and more diverse. The architecture is becoming more complex. +As a distributed, scalable, distributed time-series database platform based on HBase, and thanks to its first-mover advantage, OpenTSDB is widely used for monitoring in DevOps. However, as new technologies like cloud computing, microservices, and containerization technology has developed rapidly, Enterprise-level services are becoming more and more diverse and the architecture is becoming more complex. -From this situation, it increasingly plagues to use of OpenTSDB as a DevOps backend storage for monitoring by performance issues and delayed feature upgrades. The resulting increase in application deployment costs and reduced operational efficiency. -These problems are becoming increasingly severe as the system scales up. +As a result, as a DevOps backend for monitoring, OpenTSDB is plagued by performance issues and delayed feature upgrades. This has resulted in increased application deployment costs and reduced operational efficiency. These problems become increasingly severe as the system tries to scale up. To meet the fast-growing IoT big data market and technical needs, TAOSData developed an innovative big-data processing product, **TDengine**. @@ -14,14 +13,14 @@ After learning the advantages of many traditional relational databases and NoSQL Compared with OpenTSDB, TDengine has the following distinctive features. -- Performance of data writing and querying far exceeds that of OpenTSDB. -- Efficient compression mechanism for time-series data, which compresses less than 1/5 of the storage space on disk. -- The installation and deployment are straightforward. A single installation package can complete the installation and deployment and does not rely on other third-party software. The entire installation and deployment process in a few seconds; -- The built-in functions cover all of OpenTSDB's query functions. And support more time-series data query functions, scalar functions, and aggregation functions. And support advanced query functions such as multiple time-window aggregations, join query, expression operation, multiple group aggregation, user-defined sorting, and user-defined functions. Adopting SQL-like syntax rules is more straightforward and has no learning cost. +- Data writing and querying performance far exceeds that of OpenTSDB. +- Efficient compression mechanism for time-series data, which compresses to less than 1/5 of the storage space, on disk. +- The installation and deployment are straightforward. A single installation package can complete the installation and deployment and does not rely on other third-party software. The entire installation and deployment process takes a few seconds. +- The built-in functions cover all of OpenTSDB's query functions and TDengine supports more time-series data query functions, scalar functions, and aggregation functions. TDengine also supports advanced query functions such as multiple time-window aggregations, join query, expression operation, multiple group aggregation, user-defined sorting, and user-defined functions. With a SQL-like query language, querying is more straightforward and has no learning cost. - Supports up to 128 tags, with a total tag length of 16 KB. - In addition to the REST interface, it also provides interfaces to Java, Python, C, Rust, Go, C# and other languages. Its supports a variety of enterprise-class standard connector protocols such as JDBC. -If we migrate the applications originally running on OpenTSDB to TDengine, we will effectively reduce the compute and storage resource consumption and the number of deployed servers. And will also significantly reduce the operation and maintenance costs, making operation and maintenance management more straightforward and more accessible, and considerably reducing the total cost of ownership. Like OpenTSDB, TDengine has also been open-sourced, including the stand-alone version and the cluster version source code. So there is no need to be concerned about the vendor-lock problem. +Migrating applications originally running on OpenTSDB to TDengine, effectively reduces compute and storage resource consumption and the number of deployed servers. It also significantly reduces operation and maintenance costs, makes operation and maintenance management more straightforward and more accessible, and considerably reduces the total cost of ownership. Like OpenTSDB, TDengine has also been open-sourced. Both the stand-alone version and the cluster version are open-sourced and there is no need to be concerned about the vendor-lock problem. We will explain how to migrate OpenTSDB applications to TDengine quickly, securely, and reliably without coding, using the most typical DevOps scenarios. Subsequent chapters will go into more depth to facilitate migration for non-DevOps systems. @@ -34,7 +33,7 @@ The following figure (Figure 1) shows the system's overall architecture for a ty **Figure 1. Typical architecture in a DevOps scenario** ![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp "Figure 1. Typical architecture in a DevOps scenario") -In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics. Data collectors to aggregate information collected by agents, systems for persistent data storage and management, and tools for monitoring data visualization (e.g., Grafana, etc.). +In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics. There are also data collectors to aggregate information collected by agents, systems for persistent data storage and management, and tools for data visualization (e.g., Grafana, etc.). The agents deployed in the application nodes are responsible for providing operational metrics from different sources to collectd/Statsd. And collectd/StatsD is accountable for pushing the aggregated data to the OpenTSDB cluster system and then visualizing the data using the visualization kanban board software, Grafana. @@ -44,15 +43,15 @@ The agents deployed in the application nodes are responsible for providing opera First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to the blog ["Installation and Uninstallation of TDengine Multiple Installation Packages"](https://www.taosdata.com/blog/2019/08/09/566.html). -Note that once the installation is complete, do not start the `taosd` service immediately, but after properly configuring the parameters. +Note that once the installation is complete, do not start the `taosd` service before properly configuring the parameters. - **Adjusting the data collector configuration** TDengine version 2.4 and later version includes `taosAdapter`. taosAdapter is a stateless, rapidly elastic, and scalable component. taosAdapter supports Influxdb's Line Protocol and OpenTSDB's telnet/JSON writing protocol specification, providing rich data access capabilities, effectively saving user migration costs and reducing the difficulty of user migration. -Users can flexibly deploy taosAdapter instances according to their requirements to rapidly improve the throughput of data writes in conjunction with the needs of scenarios and provide guarantees for data writes in different application scenarios. +Users can flexibly deploy taosAdapter instances, based on their requirements, to improve data writing throughput and provide guarantees for data writes in different application scenarios. -Through taosAdapter, users can directly push the data collected by `collectd` or `StatsD` to TDengine to achieve seamless migration of application scenarios, which is very easy and convenient. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/). +Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/). If using collectd, modify the configuration file in its default location `/etc/collectd/collectd.conf` to point to the IP address and port of the node where to deploy taosAdapter. For example, assuming the taosAdapter IP address is 192.168.1.130 and port 6046, configure it as follows. @@ -66,56 +65,55 @@ LoadPlugin write_tsdb ``` -You can use collectd and push the data to taosAdapter utilizing the push to OpenTSDB plugin. taosAdapter will call the API to write the data to TDengine, thus completing the writing of the data. If you are using StatsD, adjust the profile information accordingly. +You can use collectd and push the data to taosAdapter utilizing the write_tsdb plugin. taosAdapter will call the API to write the data to TDengine. If you are using StatsD, adjust the profile information accordingly. - **Tuning the Dashboard system** -After writing the data to TDengine properly, you can adapt Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](/third-party/grafana). +After writing the data to TDengine, you can configure Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](/third-party/grafana). TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use. **Importing Grafana Templates** Figure 2. ![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp "Figure 2. Importing a Grafana Template") -After the above steps, you completed the migration to replace OpenTSDB with TDengine. You can see that the whole process is straightforward, there is no need to write any code, and only some configuration files need to be adjusted to meet the migration work. +With the above steps completed, you have finished replacing OpenTSDB with TDengine. You can see that the whole process is straightforward, there is no need to write any code, and only some configuration files need to be changed. ### 3. Post-migration architecture -After completing the migration, the figure below (Figure 3) shows the system's overall architecture. The whole process of the acquisition side, the data writing, and the monitoring and presentation side are all kept stable, except for a few configuration adjustments, which do not involve any critical changes or alterations. OpenTSDB to TDengine migration action, using TDengine more powerful processing power and query performance. +After completing the migration, the figure below (Figure 3) shows the system's overall architecture. The whole process of the acquisition side, the data writing, and the monitoring and presentation side are all kept stable. There are a few configuration adjustments, which do not involve any critical changes or alterations. Migrating to TDengine from OpenTSDB leads to powerful processing power and query performance. -In most DevOps scenarios, if you have a small OpenTSDB cluster (3 or fewer nodes) for providing the storage layer of DevOps and rely on OpenTSDB to give a data persistence layer and query capabilities, you can safely replace OpenTSDB with TDengine. TDengine will save more compute and storage resources. With the same compute resource allocation, a single TDengine can meet the service capacity provided by 3 to 5 OpenTSDB nodes. If the scale is more prominent, then TDengine clustering is required. - -Suppose your application is particularly complex, or the application domain is not a DevOps scenario. You can continue reading subsequent chapters for a more comprehensive and in-depth look at the advanced topics of migrating an OpenTSDB application to TDengine. +In most DevOps scenarios, if you have a small OpenTSDB cluster (3 or fewer nodes) which provides storage and data persistence layer in addition to query capability, you can safely replace OpenTSDB with TDengine. TDengine will save compute and storage resources. With the same compute resource allocation, a single TDengine can meet the service capacity provided by 3 to 5 OpenTSDB nodes. TDengine clustering may be required depending on the scale of the application. **Figure 3. System architecture after migration** ![TDengine Database IT-DevOps-Solutions-Immigrate-TDengine-Arch](./IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp "Figure 3. System architecture after migration completion") +The following chapters provide a more comprehensive and in-depth look at the advanced topics of migrating an OpenTSDB application to TDengine. This will be useful if your application is particularly complex and is not a DevOps application. + ## Migration evaluation and strategy for other scenarios ### 1. Differences between TDengine and OpenTSDB This chapter describes the differences between OpenTSDB and TDengine at the system functionality level. After reading this chapter, you can fully evaluate whether you can migrate some complex OpenTSDB-based applications to TDengine, and what you should pay attention to after migration. -TDengine currently only supports Grafana for visual kanban rendering, so if your application uses front-end kanban boards other than Grafana (e.g., [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github.com/box/StatusWolf), etc.). You cannot directly migrate those front-end kanbans to TDengine, and the front-end kanban will need to be ported to Grafana to work correctly. +TDengine currently only supports Grafana for visual kanban rendering, so if your application uses front-end kanban boards other than Grafana (e.g., [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github.com/box/StatusWolf), etc.) you cannot directly migrate those front-end kanbans to TDengine. The front-end kanban will need to be ported to Grafana to work correctly. -TDengine version 2.3.0.x only supports collectd and StatsD as data collection aggregation software but will provide more data collection aggregation software in the future. If you use other data aggregators on the collection side, your application needs to be ported to these two data aggregation systems to write data correctly. +TDengine version 2.3.0.x only supports collectd and StatsD as data collection and aggregation software but future versions will provide support for more data collection and aggregation software in the future. If you use other data aggregators on the collection side, your application needs to be ported to these two data aggregation systems to write data correctly. In addition to the two data aggregator software protocols mentioned above, TDengine also supports writing data directly via InfluxDB's line protocol and OpenTSDB's data writing protocol, JSON format. You can rewrite the logic on the data push side to write data using the line protocols supported by TDengine. -In addition, if your application uses the following features of OpenTSDB, you need to understand the following considerations before migrating your application to TDengine. +In addition, if your application uses the following features of OpenTSDB, you need to take into account the following considerations before migrating your application to TDengine. 1. `/api/stats`: If your application uses this feature to monitor the service status of OpenTSDB, and you have built the relevant logic to link the processing in your application, then this part of the status reading and fetching logic needs to be re-adapted to TDengine. TDengine provides a new mechanism for handling cluster state monitoring to meet the monitoring and maintenance needs of your application. -2. `/api/tree`: If you rely on this feature of OpenTSDB for the hierarchical organization and maintenance of timelines, you cannot migrate it directly to TDengine, which uses a database -> super table -> sub-table hierarchy to organize and maintain timelines, with all timelines belonging to the same super table in the same system hierarchy, but it is possible to simulate a logical multi-level structure of the application through the unique construction of different tag values. -3. `Rollup And PreAggregates`: The use of Rollup and PreAggregates requires the application to decide where to access the Rollup results and, in some scenarios, to access the actual results. The opacity of this structure makes the application processing logic extraordinarily complex and not portable at all. We think this strategy is a compromise when the time-series database does not. -TDengine does not support automatic downsampling of multiple timelines and preaggregation (for a range of periods) for the time being. Still, thanks to its high-performance query processing logic can provide very high-performance query responses without relying on Rollup and preaggregation (for a range of periods), making your application query processing logic much more straightforward. -The logic is much simpler. -4. `Rate`: TDengine provides two functions to calculate the rate of change of values, namely `Derivative` (the result is consistent with the Derivative behavior of InfluxDB) and `IRate` (the result is compatible with the IRate function in Prometheus). However, the results of these two functions are slightly different from Rate, but the functions are more powerful overall. In addition, TDengine supports all the calculation functions provided by OpenTSDB, and TDengine's query functions are much more potent than those supported by OpenTSDB, which can significantly simplify the processing logic of your application. +2. `/api/tree`: If you rely on this feature of OpenTSDB for the hierarchical organization and maintenance of timelines, you cannot migrate it directly to TDengine, which uses a database -> super table -> sub-table hierarchy to organize and maintain timelines, with all timelines belonging to the same super table in the same system hierarchy. But it is possible to simulate a logical multi-level structure of the application through the unique construction of different tag values. +3. `Rollup And PreAggregates`: The use of Rollup and PreAggregates requires the application to decide where to access the Rollup results and, in some scenarios, to access the actual results. The opacity of this structure makes the application processing logic extraordinarily complex and not portable at all. +While TDengine does not currently support automatic downsampling of multiple timelines and preaggregation (for a range of periods), thanks to its high-performance query processing logic, it can provide very high-performance query responses without relying on Rollup and preaggregation (for a range of periods). This makes your application query processing logic straightforward and simple. +4. `Rate`: TDengine provides two functions to calculate the rate of change of values, namely `Derivative` (the result is consistent with the Derivative behavior of InfluxDB) and `IRate` (the result is compatible with the IRate function in Prometheus). However, the results of these two functions are slightly different from that of Rate. But the TDengine functions are more powerful. In addition, TDengine supports all the calculation functions provided by OpenTSDB. TDengine's query functions are much more powerful than those supported by OpenTSDB, which can significantly simplify the processing logic of your application. -Through the above introduction, I believe you should be able to understand the changes brought about by the migration of OpenTSDB to TDengine. And this information will also help you correctly determine whether you would migrate your application to TDengine to experience the powerful and convenient time-series data processing capability provided by TDengine. +With the above introduction, we believe you should be able to understand the changes brought about by the migration of OpenTSDB to TDengine. And this information will also help you correctly determine whether you should migrate your application to TDengine to experience the powerful and convenient time-series data processing capability provided by TDengine. ### 2. Migration strategy suggestion -First, the OpenTSDB-based system migration involves data schema design, system scale estimation, and data write end transformation, data streaming, and application adaptation; after that, the two systems will run in parallel for a while and then migrate the historical data to TDengine. Of course, if your application has some functions that strongly depend on the above OpenTSDB features and you do not want to stop using them, you can migrate the historical data to TDengine. -You can consider keeping the original OpenTSDB system running while starting TDengine to provide the primary services. +OpenTSDB-based system migration involves data schema design, system scale estimation, data write transformation, data streaming, and application changes. The two systems should run in parallel for a while and then the historical data should be migrated to TDengine if your application has some functions that strongly depend on the above OpenTSDB features and you do not want to stop using them. +You can also consider keeping the original OpenTSDB system running while using TDengine to provide the primary services. ## Data model design @@ -129,16 +127,19 @@ Let us now assume a DevOps scenario where we use collectd to collect the underly | 2 | swap | value | double | host | swap_type | swap_type_instance | source | n/a | | 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source | -TDengine requires the data stored to have a data schema, i.e., you need to create a super table and specify the schema of the super table before writing the data. For data schema creation, you have two ways to do this: 1) Take advantage of TDengine's native data writing support for OpenTSDB by calling the TDengine API to write (text line or JSON format) -and automate the creation of single-value models. This approach does not require significant adjustments to the data writing application, nor does it require converting the written data format. +TDengine requires the data stored to have a data schema, i.e., you need to create a super table and specify the schema of the super table before writing the data. For data schema creation, you have two ways to do this: +1) Take advantage of TDengine's native data writing support for OpenTSDB by calling the TDengine API to write (text line or JSON format) and automate the creation of single-value models. This approach does not require significant adjustments to the data writing application, nor does it require converting the written data format. At the C level, TDengine provides the `taos_schemaless_insert()` function to write data in OpenTSDB format directly (in early version this function was named `taos_insert_lines()`). Please refer to the sample code `schemaless.c` in the installation package directory as reference. -(2) based on a complete understanding of TDengine's data model, to establish the mapping relationship between OpenTSDB and TDengine's data model adjustment manually. Considering that OpenTSDB is a single-value mapping model, recommended using the single-value model in TDengine. TDengine can support both multi-value and single-value models. +(2) Based on a thorough understanding of TDengine's data model, establish a mapping between OpenTSDB and TDengine's data model. Considering that OpenTSDB is a single-value mapping model, we recommended using the single-value model in TDengine for simplicity. But keep in mind that TDengine supports both multi-value and single-value models. - **Single-valued model**. -The steps are as follows: use the name of the metrics as the name of the TDengine super table, which build with two basic data columns - timestamp and value, and the label of the super table is equivalent to the label information of the metrics, and the number of labels is equal to the number of labels of the metrics. The names of sub-tables are named with fixed rules: `metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ...` as the sub-table name. +The steps are as follows: +- Use the name of the metrics as the name of the TDengine super table +- Build with two basic data columns - timestamp and value. The label of the super table is equivalent to the label information of the metrics, and the number of labels is equal to the number of labels of the metrics. +- The names of sub-tables are named with fixed rules: `metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ...` as the sub-table name. Create 3 super tables in TDengine. @@ -158,13 +159,13 @@ The final system will have about 340 sub-tables and three super-tables. Note tha - **Multi-value model** -Suppose you want to take advantage of TDengine's multi-value modeling capabilities. In that case, you need first to meet the requirements that different collection quantities have the same collection frequency and can reach the **data write side simultaneously via a message queue**, thus ensuring writing multiple metrics at once using SQL statements. The metric's name is used as the name of the super table to create a multi-column model of data that has the same collection frequency and can arrive simultaneously. The names of the sub-tables are named using a fixed rule. Each of the above metrics contains only one measurement value, so converting it into a multi-value model is impossible. +Ideally you should take advantage of TDengine's multi-value modeling capabilities. In that case, you first need to meet the requirement that different collection quantities have the same collection frequency and can reach the **data write side simultaneously via a message queue**, thus ensuring writing multiple metrics at once, using SQL statements. The metric's name is used as the name of the super table to create a multi-column model of data that has the same collection frequency and can arrive simultaneously. The sub-tables are named using a fixed rule. Each of the above metrics contains only one measurement value, so converting it into a multi-value model is impossible. ## Data triage and application adaptation -Subscribe data from the message queue and start the adapted writer to write the data. +Subscribe to the message queue and start writing data to TDengine. -After writing the data starts for a while, you can use SQL statements to check whether the amount of data written meets the expected writing requirements. Use the following SQL statement to count the amount of data. +After data has been written for a while, you can use SQL statements to check whether the amount of data written meets the expected writing requirements. Use the following SQL statement to count the amount of data. ```sql select count(*) from memory @@ -184,7 +185,7 @@ To facilitate historical data migration, we provide a plug-in for the data synch For the specific usage of DataX and how to use DataX to write data to TDengine, please refer to [DataX-based TDengine Data Migration Tool](https://www.taosdata.com/blog/2021/10/26/3156.html). -After migrating via DataX, we found that we can significantly improve the efficiency of migrating historical data by starting multiple processes and migrating numerous metrics simultaneously. The following are some records of the migration process. I wish to use these for application migration as a reference. +After migrating via DataX, we found that we can significantly improve the efficiency of migrating historical data by starting multiple processes and migrating numerous metrics simultaneously. The following are some records of the migration process. We provide these as a reference for application migration. | Number of datax instances (number of concurrent processes) | Migration record speed (pieces/second) | | ----------------------------- | ------------------- -- | @@ -202,13 +203,13 @@ Suppose you need to use the multi-value model for data writing. In that case, yo Manual migration of data requires attention to the following two issues: -1) When storing the exported data on the disk, the disk needs to have enough storage space to accommodate the exported data files fully. Adopting the partial import mode to avoid the shortage of disk file storage after the total amount of data is exported. Preferentially export the timelines belonging to the same super table. Then the exported data files are imported into the TDengine system. +1) When storing the exported data on the disk, the disk needs to have enough storage space to accommodate the exported data files fully. To avoid running out of disk space, you can adopt a partial import mode in which you preferentially export the timelines belonging to the same super table and then only those files are imported into TDengine. -2) Under the full load of the system, if there are enough remaining computing and IO resources, establish a multi-threaded importing to maximize the efficiency of data migration. Considering the vast load that data parsing brings to the CPU, it is necessary to control the maximum number of parallel tasks to avoid the overall overload of the system triggered by importing historical data. +2) Under the full load of the system, if there are enough remaining computing and IO resources, establish a multi-threaded import to maximize the efficiency of data migration. Considering the vast load that data parsing brings to the CPU, it is necessary to control the maximum number of parallel tasks to avoid overloading the system when importing historical data. Due to the ease of operation of TDengine itself, there is no need to perform index maintenance and data format change processing in the entire process. The whole process only needs to be executed sequentially. -When wholly importing the historical data into TDengine, the two systems run simultaneously and then switch the query request to TDengine to achieve seamless application switching. +While importing historical data into TDengine, the two systems should run simultaneously. Once all the data is migrated, switch the query request to TDengine to achieve seamless application switching. ## Appendix 1: OpenTSDB query function correspondence table @@ -222,12 +223,12 @@ Example: SELECT avg(val) FROM (SELECT first(val) FROM super_table WHERE ts >= startTime and ts <= endTime INTERVAL(20s) Fill(linear)) INTERVAL(20s) ``` -Remark: +Remarks: 1. The value in Interval needs to be the same as the interval value in the outer query. -2. The interpolation processing in TDengine needs to use subqueries to assist in the completion. As shown above, it is enough to specify the interpolation type in the inner query. Since the interpolation of the values ​​in OpenTSDB uses linear interpolation, use fill( in the interpolation clause. linear) to declare the interpolation type. The following functions with the exact interpolation calculation requirements are processed by this method. -3. The parameter 20s in Interval indicates that the inner query will generate results according to a time window of 20 seconds. In an actual query, it needs to adjust to the time interval between different records. It ensures that producing interpolation results equivalent to the original data. -4. Due to the particular interpolation strategy and mechanism of OpenTSDB, the method of the first interpolation and then calculation in the aggregate query (Aggregate) makes the calculation results impossible to be utterly consistent with TDengine. But in the case of downsampling (Downsample), TDengine and OpenTSDB can obtain consistent results (since OpenTSDB performs aggregation and downsampling queries). +2. Interpolation processing in TDengine uses subqueries to assist in completion. As shown above, it is enough to specify the interpolation type in the inner query. Since OpenTSDB uses linear interpolation, use `fill(linear)` to declare the interpolation type in TDengine. Some of the functions mentioned below have exactly the same interpolation calculation requirements. +3. The parameter 20s in Interval indicates that the inner query will generate results according to a time window of 20 seconds. In an actual query, it needs to adjust to the time interval between different records. It ensures that interpolation results are equivalent to the original data. +4. Due to the particular interpolation strategy and mechanism of OpenTSDB i.e. interpolation followed by aggregate calculation, it is impossible for the results to be completely consistent with those of TDengine. But in the case of downsampling (Downsample), TDengine and OpenTSDB can obtain consistent results (since OpenTSDB performs aggregation and downsampling queries). ### Count @@ -261,7 +262,7 @@ Select apercentile(col1, 50, “t-digest”) from table_name Remark: -1. During the approximate query processing, OpenTSDB uses the t-digest algorithm by default, so in order to obtain the same calculation result, the algorithm used needs to be specified in the `apercentile()` function. TDengine can support two different approximation processing algorithms, declared by "default" and "t-digest" respectively. +1. When calculating estimate percentiles, OpenTSDB uses the t-digest algorithm by default. In order to obtain the same calculation results in TDengine, the algorithm used needs to be specified in the `apercentile()` function. TDengine can support two different percentile calculation algorithms named "default" and "t-digest" respectively. ### First @@ -379,35 +380,34 @@ We still use the hypothetical environment from Chapter 4. There are three measur ### Storage resource estimation Assuming that the number of sensor devices that generate data and need to be stored is `n`, the frequency of data generation is `t` per second, and the length of each record is `L` bytes, the scale of data generated per day is `n * t * L` bytes. Assuming the compression ratio is `C`, the daily data size is `(n * t * L)/C` bytes. The storage resources are estimated to accommodate the data scale for 1.5 years. In the production environment, the compression ratio C of TDengine is generally between 5 and 7. -With additional 20% ​​redundancy, you can calculate the required storage resources: +With additional 20% redundancy, you can calculate the required storage resources: ```matlab (n * t * L) * (365 * 1.5) * (1+20%)/C ```` - -Combined with the above calculation formula, bring the parameters into the formula, and the raw data scale generated every year is 11.8TB without considering the label information. Note that since tag information is associated with each timeline in TDengine, not every record. The scale of the amount of data to be recorded is somewhat reduced relative to the generated data, and this part of label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB. +Substituting in the above formula, the raw data generated every year is 11.8TB without considering the label information. Note that tag information is associated with each timeline in TDengine, not every record. The amount of data to be recorded is somewhat reduced relative to the generated data, and label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB. ### Storage Device Selection Considerations -The hard disk should be capable of better random read performance. Considering using an SSD as much as possible is a better choice. A disk with better random read performance is a great help to improve the system's query performance and improve the query response performance as a whole system. To obtain better query performance, the performance index of the single-threaded random read IOPS of the hard disk device should not be lower than 1000, and it is better to reach 5000 IOPS or more. Recommend to use `fio` utility software to evaluate the running performance (please refer to Appendix 1 for specific usage) for the random IO read of the current device to confirm whether it can meet the requirements of random read of large files. +A disk with better random read performance, such as an SSD, improves the system's query performance and improves the query response performance of the whole system. To obtain better query performance, the performance index of the single-threaded random read IOPS of the hard disk device should not be lower than 1000, and it is better to reach 5000 IOPS or more. We recommend using `fio` utility software to evaluate the running performance (please refer to Appendix 1 for specific usage) for the random IO read of the current device to confirm whether it can meet the requirements of random read of large files. Hard disk writing performance has little effect on TDengine. The TDengine writing process adopts the append write mode, so as long as it has good sequential write performance, both SAS hard disks and SSDs in the general sense can well meet TDengine's requirements for disk write performance. ### Computational resource estimates -Due to the particularity of IoT data, after the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](/operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second. +Due to the characteristics of IoT data, when the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](/operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second. -In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many as 10 cores or 20 cores. +In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many cores i.e. 20 cores. ### Memory resource estimation -The database allocates 16MB\*3 buffer memory for each Vnode by default. If the cluster system includes 22 CPU cores, TDengine will create 22 Vnodes (virtual nodes) by default. Each Vnode contains 1000 tables, which can accommodate all the tables. Then it takes about 1.5 hours to write a block, which triggers the drop, and no adjustment is required. A total of 22 Vnodes require about 1GB of memory cache. Considering the memory needed for the query, assuming that the memory overhead of each query is about 50MB, the memory required for 500 queries concurrently is about 25GB. +The database allocates 16MB\*3 buffer memory for each Vnode by default. If the cluster system includes 22 CPU cores, TDengine will create 22 Vnodes (virtual nodes) by default. Each Vnode contains 1000 tables, which is more than enough to accommodate all the tables in our hypothetical scenario. Then it takes about 1.5 hours to write a block, which triggers persistence to disk without requiring any adjustment. A total of 22 Vnodes require about 1GB of memory cache. Considering the memory needed for the query, assuming that the memory overhead of each query is about 50MB, the memory required for 500 queries concurrently is about 25GB. In summary, using a single 16-core 32GB machine or a cluster of 2 8-core 16GB machines is enough. ## Appendix 3: Cluster Deployment and Startup -TDengine provides a wealth of help documents to explain many aspects of cluster installation and deployment. Here is the list of corresponding document for your reference. +TDengine provides a wealth of help documents to explain many aspects of cluster installation and deployment. Here is the list of documents for your reference. ### Cluster Deployment @@ -421,7 +421,7 @@ To ensure that the system can obtain the necessary information for regular opera FQDN, firstEp, secondEP, dataDir, logDir, tmpDir, serverPort. For the specific meaning and setting requirements of each parameter, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)" -Follow the same steps to set parameters on the nodes that need running, start the taosd service, and then add Dnodes to the cluster. +Follow the same steps to set parameters on the other nodes, start the taosd service, and then add Dnodes to the cluster. Finally, start `taos` and execute the `show dnodes` command. If you can see all the nodes that have joined the cluster, the cluster building process was successfully completed. For specific operation procedures and precautions, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)". diff --git a/docs-en/27-train-faq/01-faq.md b/docs-en/27-train-faq/01-faq.md index 439775170937ef11fc964914232b2739d688b26f..e182e25b9e98bad11b9c90146400e3720605489e 100644 --- a/docs-en/27-train-faq/01-faq.md +++ b/docs-en/27-train-faq/01-faq.md @@ -5,38 +5,38 @@ title: Frequently Asked Questions ## Submit an Issue -If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem description, including TDengine version, hardware and OS information, the steps to reproduce the problem, etc. It would be very helpful if you package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine, if they have been changed in your configuration, please use according to the actual configuration. It's recommended to firstly set `debugFlag` to 135 in `taos.cfg`, restart `taosd`, then reproduce the problem and collect logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131. +If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem. In your description please include the TDengine version, hardware and OS information, the steps to reproduce the problem and any other relevant information. It would be very helpful if you can package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine. If you have changed the default directories in your configuration, please package the files in your configured directories. We recommended setting `debugFlag` to 135 in `taos.cfg`, restarting `taosd`, then reproducing the problem and collecting the logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131. ## Frequently Asked Questions ### 1. How to upgrade to TDengine 2.0 from older version? -version 2.x is not compatible with version 1.x regarding configuration file and data file, please do following before upgrading: +version 2.x is not compatible with version 1.x. With regard to the configuration and data files, please perform the following steps before upgrading. Please follow data integrity, security, backup and other relevant SOPs, best practices before removing/deleting any data. -1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg` +1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg` 2. Delete log files: `sudo rm -rf /var/log/taos/` 3. Delete data files if the data doesn't need to be kept: `sudo rm -rf /var/lib/taos/` -4. Install latests 2.x version -5. If the data needs to be kept and migrated to newer version, please contact professional service of TDengine for assistance +4. Install latest 2.x version +5. If the data needs to be kept and migrated to newer version, please contact professional service at TDengine for assistance. ### 2. How to handle "Unable to establish connection"? -When the client is unable to connect to the server, you can try following ways to find out why. +When the client is unable to connect to the server, you can try the following ways to troubleshoot and resolve the problem. 1. Check the network - - Check if the hosts where the client and server are running can be accessible to each other, for example by `ping` command. - - Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. It's better to firstly disable firewall for diagnostics. - - Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side - - Check if the `firstEp` is set properly in the `taos.cfg` used by the client side + - Check if the hosts where the client and server are running are accessible to each other, for example by `ping` command. + - Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. If possible, disable the firewall for diagnostics, but please ensure that you are following security and other relevant protocols. + - Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side. + - Check if the `firstEp` is set properly in the `taos.cfg` used by the client side. 2. Make sure the client version and server version are same. 3. On server side, check the running status of `taosd` by executing `systemctl status taosd` . If your server is started using another way instead of `systemctl`, use the proper method to check whether the server process is running normally. -4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect toe the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`. +4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect to the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`. -5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path, it's suggested to put `taos.dll` under `C:\Windows\System32`. +5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path. We recommend putting `taos.dll` under `C:\Windows\System32`. 6. Some advanced network diagnostics tools @@ -45,7 +45,7 @@ When the client is unable to connect to the server, you can try following ways t Check whether a TCP port on server side is open: `nc -l {port}` Check whether a TCP port on client side is open: `nc {hostIP} {port}` - - On Windows system `Net-TestConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on serer side is open for access. + - On Windows system `Net-TestConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on server side is open for access. 7. TDengine CLI `taos` can also be used to check network, please refer to [TDengine CLI](/reference/taos-shell). diff --git a/docs-en/27-train-faq/03-docker.md b/docs-en/27-train-faq/03-docker.md index 8f27c35d7945043d39ad83626ceccee941ad135e..afee13c1377b0b4331d6f7ec20251d1aa2db81a1 100644 --- a/docs-en/27-train-faq/03-docker.md +++ b/docs-en/27-train-faq/03-docker.md @@ -3,15 +3,15 @@ sidebar_label: TDengine in Docker title: Deploy TDengine in Docker --- -Even though it's not recommended to deploy TDengine using docker in production system, docker is still very useful in development environment, especially when your host is not Linux. From version 2.0.14.0, the official image of TDengine can support X86-64, X86, arm64, and rm32 . +We do not recommend deploying TDengine using Docker in a production system. However, Docker is still very useful in a development environment, especially when your host is not Linux. From version 2.0.14.0, the official image of TDengine can support X86-64, X86, arm64, and rm32 . -In this chapter a simple step by step guide of using TDengine in docker is introduced. +In this chapter we introduce a simple step by step guide to use TDengine in Docker. ## Install Docker -The installation of docker please refer to [Get Docker](https://docs.docker.com/get-docker/). +To install Docker please refer to [Get Docker](https://docs.docker.com/get-docker/). -After docker is installed, you can check whether Docker is installed properly by displaying Docker version. +After Docker is installed, you can check whether Docker is installed properly by displaying Docker version. ```bash $ docker -v @@ -27,7 +27,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdeng 526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd ``` -In the above command, a docker container is started to run TDengine server, the port range 6030-6049 of the container is mapped to host port range 6030-6049. If port range 6030-6049 has been occupied on the host, please change to an available host port range. Regarding the requirements about ports on the host, please refer to [Port Configuration](/reference/config/#serverport). +In the above command, a docker container is started to run TDengine server, the port range 6030-6049 of the container is mapped to host port range 6030-6049. If port range 6030-6049 has been occupied on the host, please change to an available host port range. For port requirements on the host, please refer to [Port Configuration](/reference/config/#serverport). - **docker run**: Launch a docker container - **-d**: the container will run in background mode @@ -95,7 +95,7 @@ In TDengine CLI, SQL commands can be executed to create/drop databases, tables, ### Access TDengine from host -If `-p` used to map ports properly between host and container, it's also able to access TDengine in container from the host as long as `firstEp` is configured correctly for the client on host. +If option `-p` used to map ports properly between host and container, it's also able to access TDengine in container from the host as long as `firstEp` is configured correctly for the client on host. ``` $ taos @@ -271,7 +271,7 @@ Below is an example output: ### Access TDengine from 3rd party tools -A lot of 3rd party tools can be used to write data into TDengine through `taosAdapter` , for details please refer to [3rd party tools](/third-party/). +A lot of 3rd party tools can be used to write data into TDengine through `taosAdapter`, for details please refer to [3rd party tools](/third-party/). There is nothing different from the 3rd party side to access TDengine server inside a container, as long as the end point is specified correctly, the end point should be the FQDN and the mapped port of the host. diff --git a/docs-examples/go/connect/cgoexample/main.go b/docs-examples/go/connect/cgoexample/main.go index 8b9aba4ce4217c00605bc8796c788f3dd52805e6..ba7ed0f728a1cd546dbc3199ce4c0dc854ebee91 100644 --- a/docs-examples/go/connect/cgoexample/main.go +++ b/docs-examples/go/connect/cgoexample/main.go @@ -20,4 +20,4 @@ func main() { // use // var taosDSN = "root:taosdata@tcp(localhost:6030)/dbName" -// if you want to connect to a default database. +// if you want to connect a specified database named "dbName". diff --git a/docs-examples/go/connect/restexample/main.go b/docs-examples/go/connect/restexample/main.go index 9c05e7eed80dee4ae7e6b20637d265f388d7438d..1efc98b988c183c4c680884057bf2a72a9dd19e9 100644 --- a/docs-examples/go/connect/restexample/main.go +++ b/docs-examples/go/connect/restexample/main.go @@ -18,6 +18,6 @@ func main() { defer taos.Close() } -// use +// use // var taosDSN = "root:taosdata@http(localhost:6041)/dbName" -// if you want to connect to a default database. +// if you want to connect a specified database named "dbName". diff --git a/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java b/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java index c6ce2ef9785a010daa55ad29415f81711760cd57..84292f7e8682dbb8171c807da74a603f4ae8256e 100644 --- a/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java @@ -22,4 +22,4 @@ public class JNIConnectExample { // use // String jdbcUrl = "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata"; -// if you want to connect to a default database. \ No newline at end of file +// if you want to connect a specified database named "dbName". \ No newline at end of file diff --git a/example/src/tmq.c b/example/src/tmq.c index 913096ee90294cf65ba81d605ed3e7d4f2fa803c..7e4de21f2eeeedde3b252bc9eae407fd3f1cc7d9 100644 --- a/example/src/tmq.c +++ b/example/src/tmq.c @@ -106,7 +106,7 @@ int32_t create_topic() { } taos_free_result(pRes); - /*pRes = taos_query(pConn, "create topic topic_ctb_column as abc1");*/ + /*pRes = taos_query(pConn, "create topic topic_ctb_column as database abc1");*/ pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1"); if (taos_errno(pRes) != 0) { printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes)); diff --git a/include/common/tmsg.h b/include/common/tmsg.h index faf4addb4b5cc242c6cd1014fb64d7f004aabe43..07605c735c8ff7225bc02d3270903f3e9d3a3500 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1439,8 +1439,10 @@ typedef struct { int32_t code; } STaskDropRsp; -#define STREAM_TRIGGER_AT_ONCE 1 -#define STREAM_TRIGGER_WINDOW_CLOSE 2 +#define STREAM_TRIGGER_AT_ONCE_SMA 0 +#define STREAM_TRIGGER_AT_ONCE 1 +#define STREAM_TRIGGER_WINDOW_CLOSE 2 +#define STREAM_TRIGGER_WINDOW_CLOSE_SMA 3 typedef struct { char name[TSDB_TABLE_FNAME_LEN]; @@ -1472,15 +1474,22 @@ typedef struct { int64_t streamId; } SMVCreateStreamRsp, SMSCreateStreamRsp; +enum { + TOPIC_SUB_TYPE__DB = 1, + TOPIC_SUB_TYPE__TABLE, + TOPIC_SUB_TYPE__COLUMN, +}; + typedef struct { char name[TSDB_TOPIC_FNAME_LEN]; // accout.topic int8_t igExists; - int8_t withTbName; - int8_t withSchema; - int8_t withTag; + int8_t subType; char* sql; - char* ast; - char subscribeDbName[TSDB_DB_NAME_LEN]; + char subDbName[TSDB_DB_FNAME_LEN]; + union { + char* ast; + char subStbName[TSDB_TABLE_FNAME_LEN]; + }; } SCMCreateTopicReq; int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTopicReq* pReq); @@ -2144,11 +2153,6 @@ static FORCE_INLINE void* taosDecodeSMqMsg(void* buf, SMqHbMsg* pMsg) { return buf; } -enum { - TOPIC_SUB_TYPE__DB = 1, - TOPIC_SUB_TYPE__TABLE, -}; - typedef struct { SMsgHead head; int64_t leftForVer; @@ -2168,10 +2172,10 @@ typedef struct { int64_t newConsumerId; char subKey[TSDB_SUBSCRIBE_KEY_LEN]; int8_t subType; - int8_t withTbName; - int8_t withSchema; - int8_t withTag; - char* qmsg; + // int8_t withTbName; + // int8_t withSchema; + // int8_t withTag; + char* qmsg; } SMqRebVgReq; static FORCE_INLINE int32_t tEncodeSMqRebVgReq(void** buf, const SMqRebVgReq* pReq) { @@ -2182,10 +2186,10 @@ static FORCE_INLINE int32_t tEncodeSMqRebVgReq(void** buf, const SMqRebVgReq* pR tlen += taosEncodeFixedI64(buf, pReq->newConsumerId); tlen += taosEncodeString(buf, pReq->subKey); tlen += taosEncodeFixedI8(buf, pReq->subType); - tlen += taosEncodeFixedI8(buf, pReq->withTbName); - tlen += taosEncodeFixedI8(buf, pReq->withSchema); - tlen += taosEncodeFixedI8(buf, pReq->withTag); - if (pReq->subType == TOPIC_SUB_TYPE__TABLE) { + // tlen += taosEncodeFixedI8(buf, pReq->withTbName); + // tlen += taosEncodeFixedI8(buf, pReq->withSchema); + // tlen += taosEncodeFixedI8(buf, pReq->withTag); + if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) { tlen += taosEncodeString(buf, pReq->qmsg); } return tlen; @@ -2198,10 +2202,10 @@ static FORCE_INLINE void* tDecodeSMqRebVgReq(const void* buf, SMqRebVgReq* pReq) buf = taosDecodeFixedI64(buf, &pReq->newConsumerId); buf = taosDecodeStringTo(buf, pReq->subKey); buf = taosDecodeFixedI8(buf, &pReq->subType); - buf = taosDecodeFixedI8(buf, &pReq->withTbName); - buf = taosDecodeFixedI8(buf, &pReq->withSchema); - buf = taosDecodeFixedI8(buf, &pReq->withTag); - if (pReq->subType == TOPIC_SUB_TYPE__TABLE) { + // buf = taosDecodeFixedI8(buf, &pReq->withTbName); + // buf = taosDecodeFixedI8(buf, &pReq->withSchema); + // buf = taosDecodeFixedI8(buf, &pReq->withTag); + if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) { buf = taosDecodeString(buf, &pReq->qmsg); } return (void*)buf; diff --git a/include/common/ttokendef.h b/include/common/ttokendef.h index 2fc524eeac39eefba6ce87c39d7bf4746fd83de1..c3b0e54f3da416ccc2e2b2dbd6c05ec356a50a30 100644 --- a/include/common/ttokendef.h +++ b/include/common/ttokendef.h @@ -127,134 +127,131 @@ #define TK_BLOB 109 #define TK_VARBINARY 110 #define TK_DECIMAL 111 -#define TK_DELAY 112 -#define TK_FILE_FACTOR 113 -#define TK_NK_FLOAT 114 -#define TK_ROLLUP 115 -#define TK_TTL 116 -#define TK_SMA 117 -#define TK_SHOW 118 -#define TK_DATABASES 119 -#define TK_TABLES 120 -#define TK_STABLES 121 -#define TK_MNODES 122 -#define TK_MODULES 123 -#define TK_QNODES 124 -#define TK_FUNCTIONS 125 -#define TK_INDEXES 126 -#define TK_ACCOUNTS 127 -#define TK_APPS 128 -#define TK_CONNECTIONS 129 -#define TK_LICENCE 130 -#define TK_GRANTS 131 -#define TK_QUERIES 132 -#define TK_SCORES 133 -#define TK_TOPICS 134 -#define TK_VARIABLES 135 -#define TK_BNODES 136 -#define TK_SNODES 137 -#define TK_CLUSTER 138 -#define TK_TRANSACTIONS 139 -#define TK_LIKE 140 -#define TK_INDEX 141 -#define TK_FULLTEXT 142 -#define TK_FUNCTION 143 -#define TK_INTERVAL 144 -#define TK_TOPIC 145 -#define TK_AS 146 -#define TK_CGROUP 147 -#define TK_WITH 148 -#define TK_SCHEMA 149 -#define TK_DESC 150 -#define TK_DESCRIBE 151 -#define TK_RESET 152 -#define TK_QUERY 153 -#define TK_CACHE 154 -#define TK_EXPLAIN 155 -#define TK_ANALYZE 156 -#define TK_VERBOSE 157 -#define TK_NK_BOOL 158 -#define TK_RATIO 159 -#define TK_COMPACT 160 -#define TK_VNODES 161 -#define TK_IN 162 -#define TK_OUTPUTTYPE 163 -#define TK_AGGREGATE 164 -#define TK_BUFSIZE 165 -#define TK_STREAM 166 -#define TK_INTO 167 -#define TK_TRIGGER 168 -#define TK_AT_ONCE 169 -#define TK_WINDOW_CLOSE 170 -#define TK_WATERMARK 171 -#define TK_KILL 172 -#define TK_CONNECTION 173 -#define TK_TRANSACTION 174 -#define TK_MERGE 175 -#define TK_VGROUP 176 -#define TK_REDISTRIBUTE 177 -#define TK_SPLIT 178 -#define TK_SYNCDB 179 -#define TK_NULL 180 -#define TK_NK_QUESTION 181 -#define TK_NK_ARROW 182 -#define TK_ROWTS 183 -#define TK_TBNAME 184 -#define TK_QSTARTTS 185 -#define TK_QENDTS 186 -#define TK_WSTARTTS 187 -#define TK_WENDTS 188 -#define TK_WDURATION 189 -#define TK_CAST 190 -#define TK_NOW 191 -#define TK_TODAY 192 -#define TK_TIMEZONE 193 -#define TK_COUNT 194 -#define TK_FIRST 195 -#define TK_LAST 196 -#define TK_LAST_ROW 197 -#define TK_BETWEEN 198 -#define TK_IS 199 -#define TK_NK_LT 200 -#define TK_NK_GT 201 -#define TK_NK_LE 202 -#define TK_NK_GE 203 -#define TK_NK_NE 204 -#define TK_MATCH 205 -#define TK_NMATCH 206 -#define TK_CONTAINS 207 -#define TK_JOIN 208 -#define TK_INNER 209 -#define TK_SELECT 210 -#define TK_DISTINCT 211 -#define TK_WHERE 212 -#define TK_PARTITION 213 -#define TK_BY 214 -#define TK_SESSION 215 -#define TK_STATE_WINDOW 216 -#define TK_SLIDING 217 -#define TK_FILL 218 -#define TK_VALUE 219 -#define TK_NONE 220 -#define TK_PREV 221 -#define TK_LINEAR 222 -#define TK_NEXT 223 -#define TK_GROUP 224 -#define TK_HAVING 225 -#define TK_ORDER 226 -#define TK_SLIMIT 227 -#define TK_SOFFSET 228 -#define TK_LIMIT 229 -#define TK_OFFSET 230 -#define TK_ASC 231 -#define TK_NULLS 232 -#define TK_ID 233 -#define TK_NK_BITNOT 234 -#define TK_INSERT 235 -#define TK_VALUES 236 -#define TK_IMPORT 237 -#define TK_NK_SEMI 238 -#define TK_FILE 239 +#define TK_FILE_FACTOR 112 +#define TK_NK_FLOAT 113 +#define TK_ROLLUP 114 +#define TK_TTL 115 +#define TK_SMA 116 +#define TK_SHOW 117 +#define TK_DATABASES 118 +#define TK_TABLES 119 +#define TK_STABLES 120 +#define TK_MNODES 121 +#define TK_MODULES 122 +#define TK_QNODES 123 +#define TK_FUNCTIONS 124 +#define TK_INDEXES 125 +#define TK_ACCOUNTS 126 +#define TK_APPS 127 +#define TK_CONNECTIONS 128 +#define TK_LICENCE 129 +#define TK_GRANTS 130 +#define TK_QUERIES 131 +#define TK_SCORES 132 +#define TK_TOPICS 133 +#define TK_VARIABLES 134 +#define TK_BNODES 135 +#define TK_SNODES 136 +#define TK_CLUSTER 137 +#define TK_TRANSACTIONS 138 +#define TK_LIKE 139 +#define TK_INDEX 140 +#define TK_FULLTEXT 141 +#define TK_FUNCTION 142 +#define TK_INTERVAL 143 +#define TK_TOPIC 144 +#define TK_AS 145 +#define TK_CONSUMER 146 +#define TK_GROUP 147 +#define TK_DESC 148 +#define TK_DESCRIBE 149 +#define TK_RESET 150 +#define TK_QUERY 151 +#define TK_CACHE 152 +#define TK_EXPLAIN 153 +#define TK_ANALYZE 154 +#define TK_VERBOSE 155 +#define TK_NK_BOOL 156 +#define TK_RATIO 157 +#define TK_COMPACT 158 +#define TK_VNODES 159 +#define TK_IN 160 +#define TK_OUTPUTTYPE 161 +#define TK_AGGREGATE 162 +#define TK_BUFSIZE 163 +#define TK_STREAM 164 +#define TK_INTO 165 +#define TK_TRIGGER 166 +#define TK_AT_ONCE 167 +#define TK_WINDOW_CLOSE 168 +#define TK_WATERMARK 169 +#define TK_KILL 170 +#define TK_CONNECTION 171 +#define TK_TRANSACTION 172 +#define TK_MERGE 173 +#define TK_VGROUP 174 +#define TK_REDISTRIBUTE 175 +#define TK_SPLIT 176 +#define TK_SYNCDB 177 +#define TK_NULL 178 +#define TK_NK_QUESTION 179 +#define TK_NK_ARROW 180 +#define TK_ROWTS 181 +#define TK_TBNAME 182 +#define TK_QSTARTTS 183 +#define TK_QENDTS 184 +#define TK_WSTARTTS 185 +#define TK_WENDTS 186 +#define TK_WDURATION 187 +#define TK_CAST 188 +#define TK_NOW 189 +#define TK_TODAY 190 +#define TK_TIMEZONE 191 +#define TK_COUNT 192 +#define TK_FIRST 193 +#define TK_LAST 194 +#define TK_LAST_ROW 195 +#define TK_BETWEEN 196 +#define TK_IS 197 +#define TK_NK_LT 198 +#define TK_NK_GT 199 +#define TK_NK_LE 200 +#define TK_NK_GE 201 +#define TK_NK_NE 202 +#define TK_MATCH 203 +#define TK_NMATCH 204 +#define TK_CONTAINS 205 +#define TK_JOIN 206 +#define TK_INNER 207 +#define TK_SELECT 208 +#define TK_DISTINCT 209 +#define TK_WHERE 210 +#define TK_PARTITION 211 +#define TK_BY 212 +#define TK_SESSION 213 +#define TK_STATE_WINDOW 214 +#define TK_SLIDING 215 +#define TK_FILL 216 +#define TK_VALUE 217 +#define TK_NONE 218 +#define TK_PREV 219 +#define TK_LINEAR 220 +#define TK_NEXT 221 +#define TK_HAVING 222 +#define TK_ORDER 223 +#define TK_SLIMIT 224 +#define TK_SOFFSET 225 +#define TK_LIMIT 226 +#define TK_OFFSET 227 +#define TK_ASC 228 +#define TK_NULLS 229 +#define TK_ID 230 +#define TK_NK_BITNOT 231 +#define TK_INSERT 232 +#define TK_VALUES 233 +#define TK_IMPORT 234 +#define TK_NK_SEMI 235 +#define TK_FILE 236 #define TK_NK_SPACE 300 #define TK_NK_COMMENT 301 diff --git a/include/dnode/qnode/qnode.h b/include/dnode/qnode/qnode.h index 1ab101f705ac3f71fad134c200a22f903e4a8e86..90a952939577fc9cd945d0dc9fd8bde8d906667f 100644 --- a/include/dnode/qnode/qnode.h +++ b/include/dnode/qnode/qnode.h @@ -26,14 +26,17 @@ extern "C" { typedef struct SQnode SQnode; typedef struct { - int64_t numOfStartTask; - int64_t numOfStopTask; - int64_t numOfRecvedFetch; - int64_t numOfSentHb; - int64_t numOfSentFetch; - int64_t numOfTaskInQueue; + int64_t numOfProcessedQuery; + int64_t numOfProcessedCQuery; + int64_t numOfProcessedFetch; + int64_t numOfProcessedDrop; + int64_t memSizeInCache; + int64_t dataSizeSend; + int64_t dataSizeRecv; + int64_t numOfQueryInQueue; int64_t numOfFetchInQueue; - int64_t numOfErrors; + int64_t waitTimeInQueryQUeue; + int64_t waitTimeInFetchQUeue; } SQnodeLoad; typedef struct { @@ -71,10 +74,10 @@ int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad); * @param pQnode The qnode object. * @param pMsg The request message */ -int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg); +int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg); #ifdef __cplusplus } #endif -#endif /*_TD_QNODE_H_*/ \ No newline at end of file +#endif /*_TD_QNODE_H_*/ diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index 7bd3a40c7199f204bd14e5af3231e59d5b7383be..82924bef3f206911b803ace70ea15435dc29e882 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -80,8 +80,7 @@ typedef struct SAlterDatabaseStmt { typedef struct STableOptions { ENodeType type; char comment[TSDB_TB_COMMENT_LEN]; - int32_t delay; - float filesFactor; + double filesFactor; SNodeList* pRollupFuncs; int32_t ttl; SNodeList* pSma; @@ -239,20 +238,13 @@ typedef struct SDropComponentNodeStmt { int32_t dnodeId; } SDropComponentNodeStmt; -typedef struct STopicOptions { - ENodeType type; - bool withTable; - bool withSchema; - bool withTag; -} STopicOptions; - typedef struct SCreateTopicStmt { - ENodeType type; - char topicName[TSDB_TABLE_NAME_LEN]; - char subscribeDbName[TSDB_DB_NAME_LEN]; - bool ignoreExists; - SNode* pQuery; - STopicOptions* pOptions; + ENodeType type; + char topicName[TSDB_TABLE_NAME_LEN]; + char subDbName[TSDB_DB_NAME_LEN]; + char subSTbName[TSDB_TABLE_NAME_LEN]; + bool ignoreExists; + SNode* pQuery; } SCreateTopicStmt; typedef struct SDropTopicStmt { diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 38602667252e429eb9840c75d2c23b98139df184..d960ccbd65d835dd4fc098777b2adb08b7361cfe 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -95,7 +95,6 @@ typedef enum ENodeType { QUERY_NODE_INDEX_OPTIONS, QUERY_NODE_EXPLAIN_OPTIONS, QUERY_NODE_STREAM_OPTIONS, - QUERY_NODE_TOPIC_OPTIONS, QUERY_NODE_LEFT_VALUE, // Statement nodes are used in parser and planner module. diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 2648a468dd3fa82fe91825d60b739387d9255bd7..44e7295b6939ff65b7dfc0310eddd486c21917a7 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -59,6 +59,7 @@ typedef struct SScanLogicNode { int8_t triggerType; int64_t watermark; int16_t tsColId; + double filesFactor; } SScanLogicNode; typedef struct SJoinLogicNode { @@ -113,6 +114,7 @@ typedef struct SWindowLogicNode { SNode* pStateExpr; int8_t triggerType; int64_t watermark; + double filesFactor; } SWindowLogicNode; typedef struct SFillLogicNode { @@ -222,6 +224,7 @@ typedef struct STableScanPhysiNode { int8_t triggerType; int64_t watermark; int16_t tsColId; + double filesFactor; } STableScanPhysiNode; typedef STableScanPhysiNode STableSeqScanPhysiNode; @@ -272,6 +275,7 @@ typedef struct SWinodwPhysiNode { SNode* pTspk; // timestamp primary key int8_t triggerType; int64_t watermark; + double filesFactor; } SWinodwPhysiNode; typedef struct SIntervalPhysiNode { diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index 1466bb7400950b9f80b998063ebf67a7163589bc..06272b81514cec2a294da513ec2a57447ad74ef1 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -55,9 +55,9 @@ int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery); bool qIsInsertSql(const char* pStr, size_t length); // for async mode -int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq); -int32_t qSemanticAnalysisSql(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq, - const struct SMetaData* pMetaData, SQuery* pQuery); +int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq); +int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq, + const struct SMetaData* pMetaData, SQuery* pQuery); void qDestroyQuery(SQuery* pQueryNode); diff --git a/include/libs/planner/planner.h b/include/libs/planner/planner.h index c4f71e57a8174c62cf331e4afec35604786282a0..6e14445ac7abc12eb4ccca7a9f08f3e7ff43ddb8 100644 --- a/include/libs/planner/planner.h +++ b/include/libs/planner/planner.h @@ -36,6 +36,7 @@ typedef struct SPlanContext { int64_t watermark; char* pMsg; int32_t msgLen; + double filesFactor; } SPlanContext; // Create the physical plan for the query, according to the AST. diff --git a/include/libs/qworker/qworker.h b/include/libs/qworker/qworker.h index 9e3b318019e6a689ed8b976870659f4890bcec44..5942d00cb212002d5309cec4cba253dc7e3d7388 100644 --- a/include/libs/qworker/qworker.h +++ b/include/libs/qworker/qworker.h @@ -52,22 +52,24 @@ typedef struct { int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb); -int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); void qWorkerDestroy(void **qWorkerMgmt); +int64_t qWorkerGetWaitTimeInQueue(void *qWorkerMgmt, EQueueType type); + #ifdef __cplusplus } #endif diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index 2e04afdbdc8d06029808da29398392f481832d75..a587ad6ef22fb80538147a61980ae4cdadd8ec03 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -66,12 +66,6 @@ typedef struct SSyncCfg { SNodeInfo nodeInfo[TSDB_MAX_REPLICA]; } SSyncCfg; -typedef struct SSnapshot { - void* data; - SyncIndex lastApplyIndex; - SyncTerm lastApplyTerm; -} SSnapshot; - typedef struct SFsmCbMeta { SyncIndex index; bool isWeak; @@ -93,6 +87,12 @@ typedef struct SReConfigCbMeta { uint64_t flag; } SReConfigCbMeta; +typedef struct SSnapshot { + void *data; + SyncIndex lastApplyIndex; + SyncTerm lastApplyTerm; +} SSnapshot; + typedef struct SSyncFSM { void* data; @@ -101,23 +101,17 @@ typedef struct SSyncFSM { void (*FpRollBackCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta); void (*FpRestoreFinishCb)(struct SSyncFSM* pFsm); - int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot); - - // if (*ppIter == NULL) - // *ppIter = new iter; - // else - // *ppIter.next(); - // - // if success, return 0. else return error code - int32_t (*FpSnapshotRead)(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, void** ppIter, char** ppBuf, - int32_t* len); + void (*FpReConfigCb)(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta); - // apply data into fsm - int32_t (*FpSnapshotApply)(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, char* pBuf, int32_t len); + int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot); - void (*FpReConfigCb)(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta); + int32_t (*FpSnapshotStartRead)(struct SSyncFSM* pFsm, void** ppReader); + int32_t (*FpSnapshotStopRead)(struct SSyncFSM* pFsm, void* pReader); + int32_t (*FpSnapshotDoRead)(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len); - // int32_t (*FpRestoreSnapshot)(struct SSyncFSM* pFsm, const SSnapshot* snapshot); + int32_t (*FpSnapshotStartWrite)(struct SSyncFSM* pFsm, void** ppWriter); + int32_t (*FpSnapshotStopWrite)(struct SSyncFSM* pFsm, void* pWriter, bool isApply); + int32_t (*FpSnapshotDoWrite)(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_t len); } SSyncFSM; diff --git a/include/util/taoserror.h b/include/util/taoserror.h index b0bd1dc31982042857c3fd203e71b45a87ad77aa..65cfe8de0be9e387cecba70141c0bab513d6fc63 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -69,6 +69,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_DUP_KEY TAOS_DEF_ERROR_CODE(0, 0x0027) #define TSDB_CODE_NEED_RETRY TAOS_DEF_ERROR_CODE(0, 0x0028) #define TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE TAOS_DEF_ERROR_CODE(0, 0x0029) +#define TSDB_CODE_INVALID_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x0030) #define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0040) #define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0041) diff --git a/include/util/tdef.h b/include/util/tdef.h index ad7206f7bb1d5840a779cfcddbff6680538ad8d8..839fd0b26318a9cf6db6b4fd5178a51a7ec6d0ed 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -254,6 +254,7 @@ typedef enum ELogicConditionType { #define TSDB_TRANS_STAGE_LEN 12 #define TSDB_TRANS_TYPE_LEN 16 #define TSDB_TRANS_ERROR_LEN 64 +#define TSDB_TRANS_DESC_LEN 128 #define TSDB_STEP_NAME_LEN 32 #define TSDB_STEP_DESC_LEN 128 @@ -344,9 +345,6 @@ typedef enum ELogicConditionType { #define TSDB_MIN_ROLLUP_FILE_FACTOR 0 #define TSDB_MAX_ROLLUP_FILE_FACTOR 1 #define TSDB_DEFAULT_ROLLUP_FILE_FACTOR 0.1 -#define TSDB_MIN_ROLLUP_DELAY 1 -#define TSDB_MAX_ROLLUP_DELAY 10 -#define TSDB_DEFAULT_ROLLUP_DELAY 2 #define TSDB_MIN_TABLE_TTL 0 #define TSDB_DEFAULT_TABLE_TTL 0 @@ -368,7 +366,11 @@ typedef enum ELogicConditionType { #define PRIMARYKEY_TIMESTAMP_COL_ID 1 #define COL_REACH_END(colId, maxColId) ((colId) > (maxColId)) +#ifdef WINDOWS +#define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections. +#else #define TSDB_MAX_RPC_THREADS 5 +#endif #define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type #define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01u // free qhandle at vnode diff --git a/include/util/tqueue.h b/include/util/tqueue.h index dbc4d03177e4c489240c04aac37710ce995102d4..466c577c0079d07774722ff2efdd30bf207e0fc3 100644 --- a/include/util/tqueue.h +++ b/include/util/tqueue.h @@ -46,6 +46,7 @@ typedef struct { void *ahandle; int32_t workerId; int32_t threadNum; + int64_t timestamp; } SQueueInfo; typedef enum { @@ -80,7 +81,7 @@ int32_t taosAddIntoQset(STaosQset *qset, STaosQueue *queue, void *ahandle); void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue); int32_t taosGetQueueNumber(STaosQset *qset); -int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp); +int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp); int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp); void taosResetQsetThread(STaosQset *qset, void *pItem); diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index 26349e257676d99d0ea81e03509c8b09c20a2248..35bea0e65ccc5070fe9d4e82adadc7132ae7cc81 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -1,32 +1,25 @@ -FROM ubuntu:18.04 - -WORKDIR /root - -ARG pkgFile -ARG dirName -ARG cpuType -RUN echo ${pkgFile} && echo ${dirName} - -COPY ${pkgFile} /root/ -RUN tar -zxf ${pkgFile} -WORKDIR /root/ -RUN cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root -RUN rm /root/${pkgFile} -RUN rm -rf /root/${dirName} - -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8 -ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \ - LC_CTYPE=en_US.UTF-8 \ - LANG=en_US.UTF-8 \ - LC_ALL=en_US.UTF-8 - -COPY ./bin/* /usr/bin/ - -ENV TINI_VERSION v0.19.0 -RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."' -ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini -RUN chmod +x /tini -ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"] -CMD ["taosd"] -VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ] +FROM ubuntu:18.04 + +WORKDIR /root + +ARG pkgFile +ARG dirName +ARG cpuType +RUN echo ${pkgFile} && echo ${dirName} + +COPY ${pkgFile} /root/ +ENV TINI_VERSION v0.19.0 +ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini +ENV DEBIAN_FRONTEND=noninteractive +WORKDIR /root/ +RUN tar -zxf ${pkgFile} && cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root && rm /root/${pkgFile} && rm -rf /root/${dirName} && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8 && apt-get clean && rm -rf /var/lib/apt/lists/ && chmod +x /tini + +ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \ + LC_CTYPE=en_US.UTF-8 \ + LANG=en_US.UTF-8 \ + LC_ALL=en_US.UTF-8 +COPY ./bin/* /usr/bin/ + +ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"] +CMD ["taosd"] +VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ] diff --git a/packaging/docker/bin/entrypoint.sh b/packaging/docker/bin/entrypoint.sh index 5fb441004d8b454de1039eb3f4b23eb51f32be64..f4be349c0de0ea0df382fc6fee033120c5c48007 100644 --- a/packaging/docker/bin/entrypoint.sh +++ b/packaging/docker/bin/entrypoint.sh @@ -11,39 +11,22 @@ DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0} unset TAOS_DISABLE_ADAPTER # to get mnodeEpSet from data dir -DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos} +DATA_DIR=$(taosd -C|grep -E 'dataDir.*(\S+)' -o |head -n1|sed 's/dataDir *//') +DATA_DIR=${DATA_DIR:-/var/lib/taos} -# append env to custom taos.cfg -CFG_DIR=/tmp/taos -CFG_FILE=$CFG_DIR/taos.cfg - -mkdir -p $CFG_DIR >/dev/null 2>&1 - -[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE -env-to-cfg >>$CFG_FILE - -FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//') +FQDN=$(taosd -C|grep -E 'fqdn.*(\S+)' -o |head -n1|sed 's/fqdn *//') # ensure the fqdn is resolved as localhost grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts - +FIRSET_EP=$(taosd -C|grep -E 'firstEp.*(\S+)' -o |head -n1|sed 's/firstEp *//') # parse first ep host and port -FIRST_EP_HOST=${TAOS_FIRST_EP%:*} -FIRST_EP_PORT=${TAOS_FIRST_EP#*:} +FIRST_EP_HOST=${FIRSET_EP%:*} +FIRST_EP_PORT=${FIRSET_EP#*:} # in case of custom server port -SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//') +SERVER_PORT=$(taosd -C|grep -E 'serverPort.*(\S+)' -o |head -n1|sed 's/serverPort *//') SERVER_PORT=${SERVER_PORT:-6030} -# for other binaries like interpreters -if echo $1 | grep -E "taosd$" - >/dev/null; then - true # will run taosd -else - cp -f $CFG_FILE /etc/taos/taos.cfg || true - $@ - exit $? -fi - set +e ulimit -c unlimited # set core files pattern, maybe failed @@ -62,22 +45,23 @@ fi # if has mnode ep set or the host is first ep or not for cluster, just start. if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] || [ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then - $@ -c $CFG_DIR + $@ # others will first wait the first ep ready. else if [ "$TAOS_FIRST_EP" = "" ]; then echo "run TDengine with single node." - $@ -c $CFG_DIR + $@ exit $? fi while true; do - es=0 - taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$? - if [ "$es" -eq 0 ]; then + es=$(taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT --check) + echo ${es} + if [ "${es%%:*}" -eq 2 ]; then + echo "execute create dnode" taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";" break fi sleep 1s done - $@ -c $CFG_DIR + $@ fi diff --git a/packaging/docker/bin/taos-check b/packaging/docker/bin/taos-check new file mode 100644 index 0000000000000000000000000000000000000000..5dc06b6018b93b627610b446ca6363773fd0fd72 --- /dev/null +++ b/packaging/docker/bin/taos-check @@ -0,0 +1,8 @@ +#!/bin/sh +es=$(taos --check) +code=${es%%:*} +if [ "$code" -ne "0" ] && [ "$code" -ne "4" ]; then + exit 0 +fi +echo $es +exit 1 diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 0c5c72906b9963fe4847b5ef3c6b1c42c29cea0f..eb4c4cb59feac8c8a0db6cd85f45f3482b31e96f 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1249,6 +1249,8 @@ void resetConnectDB(STscObj* pTscObj) { int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4) { assert(pResultInfo != NULL && pRsp != NULL); + taosMemoryFreeClear(pResultInfo->pRspMsg); + pResultInfo->pRspMsg = (const char*)pRsp; pResultInfo->pData = (void*)pRsp->data; pResultInfo->numOfRows = htonl(pRsp->numOfRows); diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 33dee016d6ce5835975e0c2a89337628b1cec0c3..349564ae28b8601565d57533ae57b5a1b49e51f5 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -611,6 +611,7 @@ int32_t blockDataFromBuf1(SSDataBlock* pBlock, const char* buf, size_t capacity) for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i); + pCol->hasNull = true; if (IS_VAR_DATA_TYPE(pCol->info.type)) { size_t metaSize = capacity * sizeof(int32_t); @@ -1292,8 +1293,8 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) { static void colDataTrimFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) { if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { - memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n)); - memset(&pColInfoData->varmeta.offset[total - n - 1], 0, n); + memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n) * sizeof(int32_t)); + memset(&pColInfoData->varmeta.offset[total - n], 0, n); } else { int32_t bytes = pColInfoData->info.bytes; memmove(pColInfoData->pData, ((char*)pColInfoData->pData + n * bytes), (total - n) * bytes); @@ -1462,7 +1463,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { } void blockDebugShowData(const SArray* dataBlocks) { - char pBuf[128]; + char pBuf[128] = {0}; int32_t sz = taosArrayGetSize(dataBlocks); for (int32_t i = 0; i < sz; i++) { SSDataBlock* pDataBlock = taosArrayGet(dataBlocks, i); diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 7615f7b070ca350a27cc7d6a05520386fcaa6759..751c990ca4f907cc28bd0b81f43395083e4b3eb4 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -2668,25 +2668,23 @@ int32_t tDeserializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq * } int32_t tSerializeSCMCreateTopicReq(void *buf, int32_t bufLen, const SCMCreateTopicReq *pReq) { - int32_t sqlLen = 0; - int32_t astLen = 0; - if (pReq->sql != NULL) sqlLen = (int32_t)strlen(pReq->sql); - if (pReq->ast != NULL) astLen = (int32_t)strlen(pReq->ast); - SEncoder encoder = {0}; tEncoderInit(&encoder, buf, bufLen); if (tStartEncode(&encoder) < 0) return -1; if (tEncodeCStr(&encoder, pReq->name) < 0) return -1; if (tEncodeI8(&encoder, pReq->igExists) < 0) return -1; - if (tEncodeI8(&encoder, pReq->withTbName) < 0) return -1; - if (tEncodeI8(&encoder, pReq->withSchema) < 0) return -1; - if (tEncodeI8(&encoder, pReq->withTag) < 0) return -1; - if (tEncodeCStr(&encoder, pReq->subscribeDbName) < 0) return -1; - if (tEncodeI32(&encoder, sqlLen) < 0) return -1; - if (tEncodeI32(&encoder, astLen) < 0) return -1; - if (sqlLen > 0 && tEncodeCStr(&encoder, pReq->sql) < 0) return -1; - if (astLen > 0 && tEncodeCStr(&encoder, pReq->ast) < 0) return -1; + if (tEncodeI8(&encoder, pReq->subType) < 0) return -1; + if (tEncodeCStr(&encoder, pReq->subDbName) < 0) return -1; + if (TOPIC_SUB_TYPE__DB == pReq->subType) { + } else if (TOPIC_SUB_TYPE__TABLE == pReq->subType) { + if (tEncodeCStr(&encoder, pReq->subStbName) < 0) return -1; + } else { + if (tEncodeI32(&encoder, strlen(pReq->ast)) < 0) return -1; + if (tEncodeCStr(&encoder, pReq->ast) < 0) return -1; + } + if (tEncodeI32(&encoder, strlen(pReq->sql)) < 0) return -1; + if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1; tEndEncode(&encoder); @@ -2705,26 +2703,26 @@ int32_t tDeserializeSCMCreateTopicReq(void *buf, int32_t bufLen, SCMCreateTopicR if (tStartDecode(&decoder) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1; if (tDecodeI8(&decoder, &pReq->igExists) < 0) return -1; - if (tDecodeI8(&decoder, &pReq->withTbName) < 0) return -1; - if (tDecodeI8(&decoder, &pReq->withSchema) < 0) return -1; - if (tDecodeI8(&decoder, &pReq->withTag) < 0) return -1; - if (tDecodeCStrTo(&decoder, pReq->subscribeDbName) < 0) return -1; + if (tDecodeI8(&decoder, &pReq->subType) < 0) return -1; + if (tDecodeCStrTo(&decoder, pReq->subDbName) < 0) return -1; + if (TOPIC_SUB_TYPE__DB == pReq->subType) { + } else if (TOPIC_SUB_TYPE__TABLE == pReq->subType) { + if (tDecodeCStrTo(&decoder, pReq->subStbName) < 0) return -1; + } else { + if (tDecodeI32(&decoder, &astLen) < 0) return -1; + if (astLen > 0) { + pReq->ast = taosMemoryCalloc(1, astLen + 1); + if (pReq->ast == NULL) return -1; + if (tDecodeCStrTo(&decoder, pReq->ast) < 0) return -1; + } + } if (tDecodeI32(&decoder, &sqlLen) < 0) return -1; - if (tDecodeI32(&decoder, &astLen) < 0) return -1; - if (sqlLen > 0) { pReq->sql = taosMemoryCalloc(1, sqlLen + 1); if (pReq->sql == NULL) return -1; if (tDecodeCStrTo(&decoder, pReq->sql) < 0) return -1; } - if (astLen > 0) { - pReq->ast = taosMemoryCalloc(1, astLen + 1); - if (pReq->ast == NULL) return -1; - if (tDecodeCStrTo(&decoder, pReq->ast) < 0) return -1; - } else { - } - tEndDecode(&decoder); tDecoderClear(&decoder); @@ -2733,7 +2731,9 @@ int32_t tDeserializeSCMCreateTopicReq(void *buf, int32_t bufLen, SCMCreateTopicR void tFreeSCMCreateTopicReq(SCMCreateTopicReq *pReq) { taosMemoryFreeClear(pReq->sql); - taosMemoryFreeClear(pReq->ast); + if (TOPIC_SUB_TYPE__COLUMN == pReq->subType) { + taosMemoryFreeClear(pReq->ast); + } } int32_t tSerializeSCMCreateTopicRsp(void *buf, int32_t bufLen, const SCMCreateTopicRsp *pRsp) { diff --git a/source/common/src/trow.c b/source/common/src/trow.c index cc18240325ffa95aba75b4c7123d4d5749694035..c8a28d7f28f747b65fae3802bc392ac6163e5e1e 100644 --- a/source/common/src/trow.c +++ b/source/common/src/trow.c @@ -605,6 +605,10 @@ static int32_t tdAppendKvRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols * @param pCols */ int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) { +#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS + printf("%s:%d ts: %" PRIi64 " sver:%d maxCols:%" PRIi16 " nCols:%" PRIi16 ", nRows:%d\n", __func__, __LINE__, + TD_ROW_KEY(pRow), TD_ROW_SVER(pRow), pCols->maxCols, pCols->numOfCols, pCols->numOfRows); +#endif if (TD_IS_TP_ROW(pRow)) { return tdAppendTpRowToDataCol(pRow, pSchema, pCols, isMerge); } else if (TD_IS_KV_ROW(pRow)) { diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 685ee168939925a05309006e2475ab3a9c22b2d3..10ba58af298c59306badc2e299e588e3ec46874f 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -521,10 +521,10 @@ int32_t convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_VARBINARY) { newColData = taosMemoryCalloc(1, charLen + 1); memcpy(newColData, varDataVal(inputData), charLen); - bool ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight); + int32_t ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight); if (ret != TSDB_CODE_SUCCESS) { taosMemoryFree(newColData); - return ret; + return TSDB_CODE_INVALID_TIMESTAMP; } taosMemoryFree(newColData); } else if (type == TSDB_DATA_TYPE_NCHAR) { @@ -783,7 +783,7 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio // 2020-07-03 17:48:42 // and the parameter can also be a variable. const char* fmtts(int64_t ts) { - static char buf[96]; + static char buf[96] = {0}; size_t pos = 0; struct tm tm; diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c index 916973b4ca596ce7b6ee9d5bd89a4840161c6b86..65794b7b8136f0d6314880399ac08a195eecd22a 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c @@ -16,7 +16,11 @@ #define _DEFAULT_SOURCE #include "qmInt.h" -void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) {} +void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) { + SQnodeLoad qload = {0}; + qndGetLoad(pMgmt->pQnode, &qload); + +} int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pMsg) { SMonQmInfo qmInfo = {0}; diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c index 35c94b7fbe786434cfb59191c8899949099d0325..e7fc261b67a8a6416cdbafae07552a5c9576bc22 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c @@ -36,7 +36,7 @@ static void qmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { code = qmProcessGetMonitorInfoReq(pMgmt, pMsg); break; default: - code = qndProcessQueryMsg(pMgmt->pQnode, pMsg); + code = qndProcessQueryMsg(pMgmt->pQnode, pInfo->timestamp, pMsg); break; } diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index c7509eb9d8a7e1ed47bbc65f8b8e1e2d15364ebc..e5893fd94740fa20fa244bd1957a02a50e39bf08 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -62,7 +62,7 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { dmProcessNetTestReq(pDnode, pRpc); return; } else if (pRpc->msgType == TDMT_MND_SYSTABLE_RETRIEVE_RSP || pRpc->msgType == TDMT_VND_FETCH_RSP) { - qWorkerProcessFetchRsp(NULL, NULL, pRpc); + qWorkerProcessFetchRsp(NULL, NULL, pRpc, 0); return; } else if (pRpc->msgType == TDMT_MND_STATUS_RSP && pEpSet != NULL) { dmSetMnodeEpSet(&pDnode->data, pEpSet); @@ -130,7 +130,7 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { _OVER: if (code != 0) { - dError("msg:%p, failed to process since %s", pMsg, terrstr()); + dTrace("msg:%p, failed to process since %s, type:%s", pMsg, terrstr(), TMSG_INFO(pRpc->msgType)); if (terrno != 0) code = terrno; if (IsReq(pRpc)) { diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 9e036d7b2533332c23b70cabb0e75326fbc1edd5..4ecfb13a2ca9c3c2e87214efe376de92e20f5953 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -53,16 +53,19 @@ typedef enum { MND_AUTH_MAX } EAuthOp; +typedef enum { + TRN_STEP_LOG = 1, + TRN_STEP_ACTION = 2, +} ETrnStep; + typedef enum { TRN_STAGE_PREPARE = 0, - TRN_STAGE_REDO_LOG = 1, - TRN_STAGE_REDO_ACTION = 2, - TRN_STAGE_ROLLBACK = 3, - TRN_STAGE_UNDO_ACTION = 4, - TRN_STAGE_UNDO_LOG = 5, - TRN_STAGE_COMMIT = 6, - TRN_STAGE_COMMIT_LOG = 7, - TRN_STAGE_FINISHED = 8 + TRN_STAGE_REDO_ACTION = 1, + TRN_STAGE_ROLLBACK = 2, + TRN_STAGE_UNDO_ACTION = 3, + TRN_STAGE_COMMIT = 4, + TRN_STAGE_COMMIT_ACTION = 5, + TRN_STAGE_FINISHED = 6 } ETrnStage; typedef enum { @@ -126,7 +129,7 @@ typedef enum { typedef enum { TRN_EXEC_PARALLEL = 0, - TRN_EXEC_ONE_BY_ONE = 1, + TRN_EXEC_NO_PARALLEL = 1, } ETrnExecType; typedef enum { @@ -163,16 +166,16 @@ typedef struct { SRpcHandleInfo rpcInfo; void* rpcRsp; int32_t rpcRspLen; - SArray* redoLogs; - SArray* undoLogs; - SArray* commitLogs; + int32_t redoActionPos; SArray* redoActions; SArray* undoActions; + SArray* commitActions; int64_t createdTime; int64_t lastExecTime; int64_t dbUid; char dbname[TSDB_DB_FNAME_LEN]; char lastError[TSDB_TRANS_ERROR_LEN]; + char desc[TSDB_TRANS_DESC_LEN]; int32_t startFunc; int32_t stopFunc; int32_t paramLen; @@ -449,17 +452,17 @@ int32_t tEncodeSMqOffsetObj(void** buf, const SMqOffsetObj* pOffset); void* tDecodeSMqOffsetObj(void* buf, SMqOffsetObj* pOffset); typedef struct { - char name[TSDB_TOPIC_FNAME_LEN]; - char db[TSDB_DB_FNAME_LEN]; - int64_t createTime; - int64_t updateTime; - int64_t uid; - int64_t dbUid; - int32_t version; - int8_t subType; // db or table - int8_t withTbName; - int8_t withSchema; - int8_t withTag; + char name[TSDB_TOPIC_FNAME_LEN]; + char db[TSDB_DB_FNAME_LEN]; + int64_t createTime; + int64_t updateTime; + int64_t uid; + int64_t dbUid; + int32_t version; + int8_t subType; // column, db or stable + // int8_t withTbName; + // int8_t withSchema; + // int8_t withTag; SRWLatch lock; int32_t consumerCnt; int32_t sqlLen; @@ -468,7 +471,7 @@ typedef struct { char* ast; char* physicalPlan; SSchemaWrapper schema; - int32_t refConsumerCnt; + // int32_t refConsumerCnt; } SMqTopicObj; typedef struct { @@ -522,14 +525,14 @@ int32_t tEncodeSMqConsumerEp(void** buf, const SMqConsumerEp* pEp); void* tDecodeSMqConsumerEp(const void* buf, SMqConsumerEp* pEp); typedef struct { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - SRWLatch lock; - int64_t dbUid; - int32_t vgNum; - int8_t subType; - int8_t withTbName; - int8_t withSchema; - int8_t withTag; + char key[TSDB_SUBSCRIBE_KEY_LEN]; + SRWLatch lock; + int64_t dbUid; + int32_t vgNum; + int8_t subType; + // int8_t withTbName; + // int8_t withSchema; + // int8_t withTag; SHashObj* consumerHash; // consumerId -> SMqConsumerEp SArray* unassignedVgs; // SArray } SMqSubscribeObj; diff --git a/source/dnode/mnode/impl/inc/mndScheduler.h b/source/dnode/mnode/impl/inc/mndScheduler.h index 9f4e377dd17dffc94ab04366e2c1ba61e170b92f..05aea3f68c4023ceb68e16ad875f59c666f63171 100644 --- a/source/dnode/mnode/impl/inc/mndScheduler.h +++ b/source/dnode/mnode/impl/inc/mndScheduler.h @@ -30,7 +30,7 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream); int32_t mndConvertRSmaTask(const char* ast, int64_t uid, int8_t triggerType, int64_t watermark, char** pStr, - int32_t* pLen); + int32_t* pLen, double filesFactor); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/inc/mndTrans.h b/source/dnode/mnode/impl/inc/mndTrans.h index 84bed13e2e99239536a7a1c5f7f88a4c4c2d089c..ba6f5faf1ec74580576b18dd13e620368d0541c4 100644 --- a/source/dnode/mnode/impl/inc/mndTrans.h +++ b/source/dnode/mnode/impl/inc/mndTrans.h @@ -22,24 +22,29 @@ extern "C" { #endif +typedef enum { + TRANS_START_FUNC_TEST = 1, + TRANS_STOP_FUNC_TEST = 2, + TRANS_START_FUNC_MQ_REB = 3, + TRANS_STOP_FUNC_MQ_REB = 4, +} ETrnFunc; + typedef struct { - SEpSet epSet; - tmsg_t msgType; - int8_t msgSent; - int8_t msgReceived; - int32_t errCode; - int32_t acceptableCode; - int32_t contLen; - void *pCont; + int32_t id; + int32_t errCode; + int32_t acceptableCode; + int8_t stage; + int8_t isRaw; + int8_t rawWritten; + int8_t msgSent; + int8_t msgReceived; + tmsg_t msgType; + SEpSet epSet; + int32_t contLen; + void *pCont; + SSdbRaw *pRaw; } STransAction; -typedef enum { - TEST_TRANS_START_FUNC = 1, - TEST_TRANS_STOP_FUNC = 2, - MQ_REB_TRANS_START_FUNC = 3, - MQ_REB_TRANS_STOP_FUNC = 4, -} ETrnFuncType; - typedef void (*TransCbFp)(SMnode *pMnode, void *param, int32_t paramLen); int32_t mndInitTrans(SMnode *pMnode); @@ -55,9 +60,9 @@ int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw); int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction); int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction); void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen); -void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen); +void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen); void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb); -void mndTransSetExecOneByOne(STrans *pTrans); +void mndTransSetNoParallel(STrans *pTrans); int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans); void mndTransProcessRsp(SRpcMsg *pRsp); diff --git a/source/dnode/mnode/impl/src/mndAcct.c b/source/dnode/mnode/impl/src/mndAcct.c index a4fde4b70670952dbf14554aa0fce15f77cb49f5..f3ec3a421b6290dbe00997ba13707d62459dccff 100644 --- a/source/dnode/mnode/impl/src/mndAcct.c +++ b/source/dnode/mnode/impl/src/mndAcct.c @@ -78,10 +78,8 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) { if (pRaw == NULL) return -1; sdbSetRawStatus(pRaw, SDB_STATUS_READY); - mDebug("acct:%s, will be created while deploy sdb, raw:%p", acctObj.acct, pRaw); -#if 0 - return sdbWrite(pMnode->pSdb, pRaw); -#else + mDebug("acct:%s, will be created when deploying, raw:%p", acctObj.acct, pRaw); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_ACCT, NULL); if (pTrans == NULL) { mError("acct:%s, failed to create since %s", acctObj.acct, terrstr()); @@ -94,7 +92,6 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) { mndTransDrop(pTrans); return -1; } - sdbSetRawStatus(pRaw, SDB_STATUS_READY); if (mndTransPrepare(pMnode, pTrans) != 0) { mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); @@ -104,7 +101,6 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) { mndTransDrop(pTrans); return 0; -#endif } static SSdbRaw *mndAcctActionEncode(SAcctObj *pAcct) { diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c index a421be5c062a709bdd1e74f583a95142da2aac82..76c8acf407762cbb4d6d455f2bd552f055ecd0f4 100644 --- a/source/dnode/mnode/impl/src/mndCluster.c +++ b/source/dnode/mnode/impl/src/mndCluster.c @@ -172,13 +172,13 @@ static int32_t mndCreateDefaultCluster(SMnode *pMnode) { clusterObj.id = mndGenerateUid(clusterObj.name, TSDB_CLUSTER_ID_LEN); clusterObj.id = (clusterObj.id >= 0 ? clusterObj.id : -clusterObj.id); pMnode->clusterId = clusterObj.id; - mDebug("cluster:%" PRId64 ", name is %s", clusterObj.id, clusterObj.name); + mInfo("cluster:%" PRId64 ", name is %s", clusterObj.id, clusterObj.name); SSdbRaw *pRaw = mndClusterActionEncode(&clusterObj); if (pRaw == NULL) return -1; sdbSetRawStatus(pRaw, SDB_STATUS_READY); - mDebug("cluster:%" PRId64 ", will be created while deploy sdb, raw:%p", clusterObj.id, pRaw); + mDebug("cluster:%" PRId64 ", will be created when deploying, raw:%p", clusterObj.id, pRaw); #if 0 return sdbWrite(pMnode->pSdb, pRaw); #else diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 7cebeb35f5bb9e3f2b363c438a1ce70ad3296717..c3eaeb73b2e21a7d26c7b260a7ebf43c87d707d1 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -414,6 +414,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { goto SUBSCRIBE_OVER; } +#if 0 // ref topic to prevent drop // TODO make topic complete SMqTopicObj topicObj = {0}; @@ -422,6 +423,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { mInfo("subscribe topic %s by consumer %ld cgroup %s, refcnt %d", pTopic->name, consumerId, cgroup, topicObj.refConsumerCnt); if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) goto SUBSCRIBE_OVER; +#endif mndReleaseTopic(pMnode, pTopic); } diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 95d3383ee10e378c4c5a66e9d16de4fda90db9ed..a0d940c049384b2c19ad6f36e62f8f5460bd62ed 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -1044,9 +1044,9 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) { if (mndSetDropDbRedoLogs(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndSetDropDbCommitLogs(pMnode, pTrans, pDb) != 0) goto _OVER; - /*if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/ - /*if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/ - /*if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/ + if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER; + if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER; + if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndSetDropDbRedoActions(pMnode, pTrans, pDb) != 0) goto _OVER; SUserObj *pUser = mndAcquireUser(pMnode, pDb->createUser); @@ -1314,7 +1314,7 @@ int32_t mndValidateDbInfo(SMnode *pMnode, SDbVgVersion *pDbs, int32_t numOfDbs, SDbObj *pDb = mndAcquireDb(pMnode, pDbVgVersion->dbFName); if (pDb == NULL) { - mDebug("db:%s, no exist", pDbVgVersion->dbFName); + mTrace("db:%s, no exist", pDbVgVersion->dbFName); memcpy(usedbRsp.db, pDbVgVersion->dbFName, TSDB_DB_FNAME_LEN); usedbRsp.uid = pDbVgVersion->dbId; usedbRsp.vgVersion = -1; diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index 35ba25acd54abf39351e51cfea42152f41b57b9e..b45b6f9ee9927ed3688463bfd3820c8d3d7fef6f 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -396,9 +396,9 @@ SMqSubscribeObj *tCloneSubscribeObj(const SMqSubscribeObj *pSub) { pSubNew->dbUid = pSub->dbUid; pSubNew->subType = pSub->subType; - pSubNew->withTbName = pSub->withTbName; - pSubNew->withSchema = pSub->withSchema; - pSubNew->withTag = pSub->withTag; + /*pSubNew->withTbName = pSub->withTbName;*/ + /*pSubNew->withSchema = pSub->withSchema;*/ + /*pSubNew->withTag = pSub->withTag;*/ pSubNew->vgNum = pSub->vgNum; pSubNew->consumerHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); @@ -431,9 +431,9 @@ int32_t tEncodeSubscribeObj(void **buf, const SMqSubscribeObj *pSub) { tlen += taosEncodeFixedI64(buf, pSub->dbUid); tlen += taosEncodeFixedI32(buf, pSub->vgNum); tlen += taosEncodeFixedI8(buf, pSub->subType); - tlen += taosEncodeFixedI8(buf, pSub->withTbName); - tlen += taosEncodeFixedI8(buf, pSub->withSchema); - tlen += taosEncodeFixedI8(buf, pSub->withTag); + /*tlen += taosEncodeFixedI8(buf, pSub->withTbName);*/ + /*tlen += taosEncodeFixedI8(buf, pSub->withSchema);*/ + /*tlen += taosEncodeFixedI8(buf, pSub->withTag);*/ void *pIter = NULL; int32_t sz = taosHashGetSize(pSub->consumerHash); @@ -458,9 +458,9 @@ void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub) { buf = taosDecodeFixedI64(buf, &pSub->dbUid); buf = taosDecodeFixedI32(buf, &pSub->vgNum); buf = taosDecodeFixedI8(buf, &pSub->subType); - buf = taosDecodeFixedI8(buf, &pSub->withTbName); - buf = taosDecodeFixedI8(buf, &pSub->withSchema); - buf = taosDecodeFixedI8(buf, &pSub->withTag); + /*buf = taosDecodeFixedI8(buf, &pSub->withTbName);*/ + /*buf = taosDecodeFixedI8(buf, &pSub->withSchema);*/ + /*buf = taosDecodeFixedI8(buf, &pSub->withTag);*/ int32_t sz; buf = taosDecodeFixedI32(buf, &sz); diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 22f858c60bdbfd56652570195b89cbf3f207651a..8e06139c8c041672a292672fc9712bbabfcb74fd 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -98,7 +98,7 @@ static int32_t mndCreateDefaultDnode(SMnode *pMnode) { if (pRaw == NULL) return -1; if (sdbSetRawStatus(pRaw, SDB_STATUS_READY) != 0) return -1; - mDebug("dnode:%d, will be created while deploy sdb, raw:%p", dnodeObj.id, pRaw); + mDebug("dnode:%d, will be created when deploying, raw:%p", dnodeObj.id, pRaw); #if 0 return sdbWrite(pMnode->pSdb, pRaw); @@ -388,9 +388,10 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) { mndReleaseMnode(pMnode, pObj); } + int64_t dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE) + sdbGetTableVer(pMnode->pSdb, SDB_MNODE); int64_t curMs = taosGetTimestampMs(); bool online = mndIsDnodeOnline(pMnode, pDnode, curMs); - bool dnodeChanged = (statusReq.dnodeVer != sdbGetTableVer(pMnode->pSdb, SDB_DNODE)); + bool dnodeChanged = (statusReq.dnodeVer != dnodeVer); bool reboot = (pDnode->rebootTime != statusReq.rebootTime); bool needCheck = !online || dnodeChanged || reboot; @@ -433,7 +434,8 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) { if (!online) { mInfo("dnode:%d, from offline to online", pDnode->id); } else { - mDebug("dnode:%d, send dnode eps", pDnode->id); + mDebug("dnode:%d, send dnode epset, online:%d ver:% " PRId64 ":%" PRId64 " reboot:%d", pDnode->id, online, + statusReq.dnodeVer, dnodeVer, reboot); } pDnode->rebootTime = statusReq.rebootTime; @@ -441,7 +443,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) { pDnode->numOfSupportVnodes = statusReq.numOfSupportVnodes; SStatusRsp statusRsp = {0}; - statusRsp.dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE) + sdbGetTableVer(pMnode->pSdb, SDB_MNODE); + statusRsp.dnodeVer = dnodeVer; statusRsp.dnodeCfg.dnodeId = pDnode->id; statusRsp.dnodeCfg.clusterId = pMnode->clusterId; statusRsp.pDnodeEps = taosArrayInit(mndGetDnodeSize(pMnode), sizeof(SDnodeEp)); diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index 995fe83cc5a69502a99a1b807ceffb6c4ec80a52..2a2a45a45d5759dfb8fbe8f0dc4662cfab8fcd14 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -369,8 +369,8 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { mError("failed to process sync msg:%p type:%s since %s", pMsg, TMSG_INFO(pMsg->msgType), terrstr()); return TAOS_SYNC_PROPOSE_OTHER_ERROR; } - - char logBuf[512]; + + char logBuf[512] = {0}; char *syncNodeStr = sync2SimpleStr(pMgmt->sync); snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr); syncRpcMsgLog2(logBuf, pMsg); @@ -472,7 +472,7 @@ int32_t mndProcessRpcMsg(SRpcMsg *pMsg) { } else if (code == 0) { mTrace("msg:%p, successfully processed and response", pMsg); } else { - mError("msg:%p, failed to process since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, + mDebug("msg:%p, failed to process since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); } diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index 23634be77b3ca6f21f31019275353de12ab6c83b..5b8ba6deaa2f768154d90af5b774c098f81c6434 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -90,7 +90,7 @@ static int32_t mndCreateDefaultMnode(SMnode *pMnode) { if (pRaw == NULL) return -1; sdbSetRawStatus(pRaw, SDB_STATUS_READY); - mDebug("mnode:%d, will be created while deploy sdb, raw:%p", mnodeObj.id, pRaw); + mDebug("mnode:%d, will be created when deploying, raw:%p", mnodeObj.id, pRaw); #if 0 return sdbWrite(pMnode->pSdb, pRaw); @@ -367,7 +367,7 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId); - mndTransSetExecOneByOne(pTrans); + mndTransSetNoParallel(pTrans); if (mndSetCreateMnodeRedoLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER; if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER; if (mndSetCreateMnodeRedoActions(pMnode, pTrans, pDnode, &mnodeObj) != 0) goto _OVER; @@ -539,7 +539,7 @@ static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) { if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id); - mndTransSetExecOneByOne(pTrans); + mndTransSetNoParallel(pTrans); if (mndSetDropMnodeRedoLogs(pMnode, pTrans, pObj) != 0) goto _OVER; if (mndSetDropMnodeCommitLogs(pMnode, pTrans, pObj) != 0) goto _OVER; if (mndSetDropMnodeRedoActions(pMnode, pTrans, pObj->pDnode, pObj) != 0) goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndOffset.c b/source/dnode/mnode/impl/src/mndOffset.c index 01516d03f28f168b71ea5272bf983c181a059bcd..6cbaca3c07818304417f05baed75fdeab70da5ca 100644 --- a/source/dnode/mnode/impl/src/mndOffset.c +++ b/source/dnode/mnode/impl/src/mndOffset.c @@ -21,6 +21,7 @@ #include "mndMnode.h" #include "mndShow.h" #include "mndStb.h" +#include "mndTopic.h" #include "mndTrans.h" #include "mndUser.h" #include "mndVgroup.h" @@ -188,7 +189,15 @@ static int32_t mndProcessCommitOffsetReq(SRpcMsg *pMsg) { bool create = false; SMqOffsetObj *pOffsetObj = mndAcquireOffset(pMnode, key); if (pOffsetObj == NULL) { + SMqTopicObj *pTopic = mndAcquireTopic(pMnode, pOffset->topicName); + if (pTopic == NULL) { + terrno = TSDB_CODE_MND_TOPIC_NOT_EXIST; + mError("submit offset to topic %s failed since %s", pOffset->topicName, terrstr()); + continue; + } pOffsetObj = taosMemoryMalloc(sizeof(SMqOffsetObj)); + pOffsetObj->dbUid = pTopic->dbUid; + mndReleaseTopic(pMnode, pTopic); memcpy(pOffsetObj->key, key, TSDB_PARTITION_KEY_LEN); create = true; } diff --git a/source/dnode/mnode/impl/src/mndQuery.c b/source/dnode/mnode/impl/src/mndQuery.c index 78b70c9a74133b859b4175b195d4a939c37ebccc..97594f2b913334ac17e2bd5e6c8fc95e19a03e9e 100644 --- a/source/dnode/mnode/impl/src/mndQuery.c +++ b/source/dnode/mnode/impl/src/mndQuery.c @@ -26,19 +26,19 @@ int32_t mndProcessQueryMsg(SRpcMsg *pMsg) { mTrace("msg:%p, in query queue is processing", pMsg); switch (pMsg->msgType) { case TDMT_VND_QUERY: - code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg); + code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_QUERY_CONTINUE: - code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg); + code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_FETCH: - code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg); + code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_DROP_TASK: - code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg); + code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_QUERY_HEARTBEAT: - code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg); + code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg, 0); break; default: terrno = TSDB_CODE_VND_APP_ERROR; diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index 58b51e4c548106a393b65ab3142064cd0c249481..b390a7fe4a37bcb057fcc19837a58eb08d277799 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -36,7 +36,7 @@ extern bool tsStreamSchedV; int32_t mndConvertRSmaTask(const char* ast, int64_t uid, int8_t triggerType, int64_t watermark, char** pStr, - int32_t* pLen) { + int32_t* pLen, double filesFactor) { SNode* pAst = NULL; SQueryPlan* pPlan = NULL; terrno = TSDB_CODE_SUCCESS; @@ -58,6 +58,7 @@ int32_t mndConvertRSmaTask(const char* ast, int64_t uid, int8_t triggerType, int .rSmaQuery = true, .triggerType = triggerType, .watermark = watermark, + .filesFactor = filesFactor, }; if (qCreateQueryPlan(&cxt, &pPlan, NULL) < 0) { terrno = TSDB_CODE_QRY_INVALID_INPUT; @@ -286,7 +287,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { pStream->tasks = taosArrayInit(totLevel, sizeof(void*)); bool hasExtraSink = false; - if (totLevel == 2) { + if (totLevel == 2 || strcmp(pStream->sourceDb, pStream->targetDb) != 0) { SArray* taskOneLevel = taosArrayInit(0, sizeof(void*)); taosArrayPush(pStream->tasks, &taskOneLevel); // add extra sink @@ -407,7 +408,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { /*pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC;*/ pTask->dispatchMsgType = TDMT_VND_TASK_DISPATCH; - SDbObj* pDb = mndAcquireDb(pMnode, pStream->sourceDb); + SDbObj* pDb = mndAcquireDb(pMnode, pStream->targetDb); ASSERT(pDb); if (mndExtractDbInfo(pMnode, pDb, &pTask->shuffleDispatcher.dbInfo, NULL) < 0) { sdbRelease(pSdb, pDb); @@ -506,7 +507,7 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib SQueryPlan* pPlan = NULL; SSubplan* plan = NULL; - if (pTopic->subType == TOPIC_SUB_TYPE__TABLE) { + if (pTopic->subType == TOPIC_SUB_TYPE__COLUMN) { pPlan = qStringToQueryPlan(pTopic->physicalPlan); if (pPlan == NULL) { terrno = TSDB_CODE_QRY_INVALID_INPUT; @@ -552,7 +553,7 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib mDebug("init subscription %s, assign vg: %d", pSub->key, pVgEp->vgId); - if (pTopic->subType == TOPIC_SUB_TYPE__TABLE) { + if (pTopic->subType == TOPIC_SUB_TYPE__COLUMN) { int32_t msgLen; plan->execNode.epSet = pVgEp->epSet; diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index 7b5d1b6c32eaf802eb0683a385acd85498b8a88d..0493b00d3330724a6d27c25c80c767482bfcb2ac 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -507,7 +507,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea mDebug("trans:%d, used to create sma:%s", pTrans->id, pCreate->name); mndTransSetDbInfo(pTrans, pDb); - mndTransSetExecOneByOne(pTrans); + mndTransSetNoParallel(pTrans); if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER; if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index b33c09a0f9d0a4740a3b0b9ce9fb06dd5ea878ae..53befd731c8b214c3b4feb87040b0aa5fd11d605 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -397,13 +397,13 @@ static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pSt req.pRSmaParam.xFilesFactor = pStb->xFilesFactor; req.pRSmaParam.delay = pStb->delay; if (pStb->ast1Len > 0) { - if (mndConvertRSmaTask(pStb->pAst1, pStb->uid, 0, 0, &req.pRSmaParam.qmsg1, &req.pRSmaParam.qmsg1Len) != + if (mndConvertRSmaTask(pStb->pAst1, pStb->uid, 0, 0, &req.pRSmaParam.qmsg1, &req.pRSmaParam.qmsg1Len, req.pRSmaParam.xFilesFactor) != TSDB_CODE_SUCCESS) { return NULL; } } if (pStb->ast2Len > 0) { - if (mndConvertRSmaTask(pStb->pAst2, pStb->uid, 0, 0, &req.pRSmaParam.qmsg2, &req.pRSmaParam.qmsg2Len) != + if (mndConvertRSmaTask(pStb->pAst2, pStb->uid, 0, 0, &req.pRSmaParam.qmsg2, &req.pRSmaParam.qmsg2Len, req.pRSmaParam.xFilesFactor) != TSDB_CODE_SUCCESS) { return NULL; } @@ -1597,7 +1597,7 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) { pReq->info.rspLen = rspLen; code = 0; - mDebug("stb:%s.%s, meta is retrieved", infoReq.dbFName, infoReq.tbName); + mTrace("%s.%s, meta is retrieved", infoReq.dbFName, infoReq.tbName); _OVER: if (code != 0) { diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index cbef1facdcd5c1a680c90b3f11936316e12a2a4f..13071b5c538a45f9339f4bc97fce9d9e3239a0f6 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -393,6 +393,15 @@ static int32_t mndCreateStream(SMnode *pMnode, SRpcMsg *pReq, SCMCreateStreamReq streamObj.trigger = pCreate->triggerType; streamObj.waterMark = pCreate->watermark; + if (streamObj.targetSTbName[0]) { + pDb = mndAcquireDbByStb(pMnode, streamObj.targetSTbName); + if (pDb == NULL) { + terrno = TSDB_CODE_MND_DB_NOT_SELECTED; + return -1; + } + tstrncpy(streamObj.targetDb, pDb->name, TSDB_DB_FNAME_LEN); + } + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_STREAM, pReq); if (pTrans == NULL) { mError("stream:%s, failed to create since %s", pCreate->name, terrstr()); diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 3f3f4f5b5d70dbb70f88f395b86d84833010c873..2f0e2cdd8f4b900738a7266ead582b83a2e6e79a 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -93,9 +93,9 @@ static SMqSubscribeObj *mndCreateSub(SMnode *pMnode, const SMqTopicObj *pTopic, } pSub->dbUid = pTopic->dbUid; pSub->subType = pTopic->subType; - pSub->withTbName = pTopic->withTbName; - pSub->withSchema = pTopic->withSchema; - pSub->withTag = pTopic->withTag; + /*pSub->withTbName = pTopic->withTbName;*/ + /*pSub->withSchema = pTopic->withSchema;*/ + /*pSub->withTag = pTopic->withTag;*/ ASSERT(pSub->unassignedVgs->size == 0); ASSERT(taosHashGetSize(pSub->consumerHash) == 0); @@ -120,9 +120,9 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri req.vgId = pRebVg->pVgEp->vgId; req.qmsg = pRebVg->pVgEp->qmsg; req.subType = pSub->subType; - req.withTbName = pSub->withTbName; - req.withSchema = pSub->withSchema; - req.withTag = pSub->withTag; + /*req.withTbName = pSub->withTbName;*/ + /*req.withSchema = pSub->withSchema;*/ + /*req.withTag = pSub->withTag;*/ strncpy(req.subKey, pSub->key, TSDB_SUBSCRIBE_KEY_LEN); int32_t tlen = sizeof(SMsgHead) + tEncodeSMqRebVgReq(NULL, &req); @@ -157,6 +157,7 @@ static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, const SM int32_t vgId = pRebVg->pVgEp->vgId; SVgObj *pVgObj = mndAcquireVgroup(pMnode, vgId); if (pVgObj == NULL) { + ASSERT(0); taosMemoryFree(buf); return -1; } @@ -451,6 +452,7 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu taosArrayPush(pConsumerNew->rebNewTopics, &topic); mndReleaseConsumer(pMnode, pConsumerOld); if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { + ASSERT(0); goto REB_FAIL; } } @@ -469,9 +471,11 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu taosArrayPush(pConsumerNew->rebRemovedTopics, &topic); mndReleaseConsumer(pMnode, pConsumerOld); if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { + ASSERT(0); goto REB_FAIL; } } +#if 0 if (consumerNum) { char topic[TSDB_TOPIC_FNAME_LEN]; char cgroup[TSDB_CGROUP_LEN]; @@ -486,17 +490,24 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu pTopic->refConsumerCnt = topicObj.refConsumerCnt; mInfo("subscribe topic %s unref %d consumer cgroup %s, refcnt %d", pTopic->name, consumerNum, cgroup, topicObj.refConsumerCnt); - if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) goto REB_FAIL; + if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) { + ASSERT(0); + goto REB_FAIL; + } } } +#endif // 4. TODO commit log: modification log // 5. set cb - mndTransSetCb(pTrans, MQ_REB_TRANS_START_FUNC, MQ_REB_TRANS_STOP_FUNC, NULL, 0); + mndTransSetCb(pTrans, TRANS_START_FUNC_MQ_REB, TRANS_STOP_FUNC_MQ_REB, NULL, 0); // 6. execution - if (mndTransPrepare(pMnode, pTrans) != 0) goto REB_FAIL; + if (mndTransPrepare(pMnode, pTrans) != 0) { + ASSERT(0); + goto REB_FAIL; + } mndTransDrop(pTrans); return 0; diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index 39f28c8dea9746f65b3926db13b0a289856e754a..245f0938b906300af29bf3f6caf71c834877eaa1 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -65,40 +65,14 @@ int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) { void mndRestoreFinish(struct SSyncFSM *pFsm) { SMnode *pMnode = pFsm->data; if (!pMnode->deploy) { - mInfo("mnode sync restore finished"); + mInfo("mnode sync restore finished, and will handle outstanding transactions"); mndTransPullup(pMnode); mndSetRestore(pMnode, true); + } else { + mInfo("mnode sync restore finished, and will set ready after first deploy"); } } -int32_t mndSnapshotRead(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, void **ppIter, char **ppBuf, int32_t *len) { - SMnode *pMnode = pFsm->data; - mInfo("start to read snapshot from sdb"); - - // sdbStartRead - // sdbDoRead - // sdbStopRead - - return 0; -} - -int32_t mndSnapshotApply(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, char *pBuf, int32_t len) { - SMnode *pMnode = pFsm->data; - - // sdbStartWrite - // sdbDoWrite - - mndSetRestore(pMnode, false); - mInfo("start to apply snapshot to sdb"); - - // sdbStopWrite - mInfo("successfully to apply snapshot to sdb"); - mndSetRestore(pMnode, true); - - // taosMemoryFree(pBuf); - return 0; -} - void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) { SMnode *pMnode = pFsm->data; SSyncMgmt *pMgmt = &pMnode->syncMgmt; @@ -115,20 +89,55 @@ void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) } } +int32_t mndSnapshotStartRead(struct SSyncFSM *pFsm, void **ppReader) { + mInfo("start to read snapshot from sdb"); + SMnode *pMnode = pFsm->data; + return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader); +} + +int32_t mndSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) { + mInfo("stop to read snapshot from sdb"); + SMnode *pMnode = pFsm->data; + return sdbStopRead(pMnode->pSdb, pReader); +} + +int32_t mndSnapshotDoRead(struct SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) { + SMnode *pMnode = pFsm->data; + return sdbDoRead(pMnode->pSdb, pReader, ppBuf, len); +} + +int32_t mndSnapshotStartWrite(struct SSyncFSM *pFsm, void **ppWriter) { + mInfo("start to apply snapshot to sdb"); + SMnode *pMnode = pFsm->data; + return sdbStartWrite(pMnode->pSdb, (SSdbIter **)ppWriter); +} + +int32_t mndSnapshotStopWrite(struct SSyncFSM *pFsm, void *pWriter, bool isApply) { + mInfo("stop to apply snapshot to sdb, apply:%d", isApply); + SMnode *pMnode = pFsm->data; + return sdbStopWrite(pMnode->pSdb, pWriter, isApply); +} + +int32_t mndSnapshotDoWrite(struct SSyncFSM *pFsm, void *pWriter, void *pBuf, int32_t len) { + SMnode *pMnode = pFsm->data; + return sdbDoWrite(pMnode->pSdb, pWriter, pBuf, len); +} + SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) { SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM)); pFsm->data = pMnode; - pFsm->FpCommitCb = mndSyncCommitMsg; pFsm->FpPreCommitCb = NULL; pFsm->FpRollBackCb = NULL; - - pFsm->FpGetSnapshot = mndSyncGetSnapshot; pFsm->FpRestoreFinishCb = mndRestoreFinish; - pFsm->FpSnapshotRead = mndSnapshotRead; - pFsm->FpSnapshotApply = mndSnapshotApply; pFsm->FpReConfigCb = mndReConfig; - + pFsm->FpGetSnapshot = mndSyncGetSnapshot; + pFsm->FpSnapshotStartRead = mndSnapshotStartRead; + pFsm->FpSnapshotStopRead = mndSnapshotStopRead; + pFsm->FpSnapshotDoRead = mndSnapshotDoRead; + pFsm->FpSnapshotStartWrite = mndSnapshotStartWrite; + pFsm->FpSnapshotStopWrite = mndSnapshotStopWrite; + pFsm->FpSnapshotDoWrite = mndSnapshotDoWrite; return pFsm; } @@ -235,7 +244,7 @@ void mndSyncStart(SMnode *pMnode) { } else { syncStart(pMgmt->sync); } - mDebug("sync:%" PRId64 " is started, standby:%d", pMgmt->sync, pMgmt->standby); + mDebug("mnode sync started, id:%" PRId64 " standby:%d", pMgmt->sync, pMgmt->standby); } void mndSyncStop(SMnode *pMnode) {} diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 2048c798475062055520fe25e0249f411615b81f..02f06a0de81a5f2ae332ec7c5f77faad1136edd8 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -15,6 +15,7 @@ #include "mndTopic.h" #include "mndAuth.h" +#include "mndConsumer.h" #include "mndDb.h" #include "mndDnode.h" #include "mndMnode.h" @@ -95,9 +96,9 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) { SDB_SET_INT64(pRaw, dataPos, pTopic->dbUid, TOPIC_ENCODE_OVER); SDB_SET_INT32(pRaw, dataPos, pTopic->version, TOPIC_ENCODE_OVER); SDB_SET_INT8(pRaw, dataPos, pTopic->subType, TOPIC_ENCODE_OVER); - SDB_SET_INT8(pRaw, dataPos, pTopic->withTbName, TOPIC_ENCODE_OVER); - SDB_SET_INT8(pRaw, dataPos, pTopic->withSchema, TOPIC_ENCODE_OVER); - SDB_SET_INT8(pRaw, dataPos, pTopic->withTag, TOPIC_ENCODE_OVER); + /*SDB_SET_INT8(pRaw, dataPos, pTopic->withTbName, TOPIC_ENCODE_OVER);*/ + /*SDB_SET_INT8(pRaw, dataPos, pTopic->withSchema, TOPIC_ENCODE_OVER);*/ + /*SDB_SET_INT8(pRaw, dataPos, pTopic->withTag, TOPIC_ENCODE_OVER);*/ SDB_SET_INT32(pRaw, dataPos, pTopic->consumerCnt, TOPIC_ENCODE_OVER); SDB_SET_INT32(pRaw, dataPos, pTopic->sqlLen, TOPIC_ENCODE_OVER); @@ -121,7 +122,7 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) { SDB_SET_BINARY(pRaw, dataPos, swBuf, schemaLen, TOPIC_ENCODE_OVER); } - SDB_SET_INT32(pRaw, dataPos, pTopic->refConsumerCnt, TOPIC_ENCODE_OVER); + /*SDB_SET_INT32(pRaw, dataPos, pTopic->refConsumerCnt, TOPIC_ENCODE_OVER);*/ SDB_SET_RESERVE(pRaw, dataPos, MND_TOPIC_RESERVE_SIZE, TOPIC_ENCODE_OVER); SDB_SET_DATALEN(pRaw, dataPos, TOPIC_ENCODE_OVER); @@ -167,9 +168,9 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) { SDB_GET_INT64(pRaw, dataPos, &pTopic->dbUid, TOPIC_DECODE_OVER); SDB_GET_INT32(pRaw, dataPos, &pTopic->version, TOPIC_DECODE_OVER); SDB_GET_INT8(pRaw, dataPos, &pTopic->subType, TOPIC_DECODE_OVER); - SDB_GET_INT8(pRaw, dataPos, &pTopic->withTbName, TOPIC_DECODE_OVER); - SDB_GET_INT8(pRaw, dataPos, &pTopic->withSchema, TOPIC_DECODE_OVER); - SDB_GET_INT8(pRaw, dataPos, &pTopic->withTag, TOPIC_DECODE_OVER); + /*SDB_GET_INT8(pRaw, dataPos, &pTopic->withTbName, TOPIC_DECODE_OVER);*/ + /*SDB_GET_INT8(pRaw, dataPos, &pTopic->withSchema, TOPIC_DECODE_OVER);*/ + /*SDB_GET_INT8(pRaw, dataPos, &pTopic->withTag, TOPIC_DECODE_OVER);*/ SDB_GET_INT32(pRaw, dataPos, &pTopic->consumerCnt, TOPIC_DECODE_OVER); @@ -221,7 +222,7 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) { pTopic->schema.pSchema = NULL; } - SDB_GET_INT32(pRaw, dataPos, &pTopic->refConsumerCnt, TOPIC_DECODE_OVER); + /*SDB_GET_INT32(pRaw, dataPos, &pTopic->refConsumerCnt, TOPIC_DECODE_OVER);*/ SDB_GET_RESERVE(pRaw, dataPos, MND_TOPIC_RESERVE_SIZE, TOPIC_DECODE_OVER); @@ -253,7 +254,7 @@ static int32_t mndTopicActionUpdate(SSdb *pSdb, SMqTopicObj *pOldTopic, SMqTopic atomic_exchange_64(&pOldTopic->updateTime, pNewTopic->updateTime); atomic_exchange_32(&pOldTopic->version, pNewTopic->version); - atomic_store_32(&pOldTopic->refConsumerCnt, pNewTopic->refConsumerCnt); + /*atomic_store_32(&pOldTopic->refConsumerCnt, pNewTopic->refConsumerCnt);*/ /*taosWLockLatch(&pOldTopic->lock);*/ @@ -307,11 +308,19 @@ static SDDropTopicReq *mndBuildDropTopicMsg(SMnode *pMnode, SVgObj *pVgroup, SMq } static int32_t mndCheckCreateTopicReq(SCMCreateTopicReq *pCreate) { - if (pCreate->name[0] == 0 || pCreate->sql == NULL || pCreate->sql[0] == 0 || pCreate->subscribeDbName[0] == 0) { - terrno = TSDB_CODE_MND_INVALID_TOPIC; - return -1; + terrno = TSDB_CODE_MND_INVALID_TOPIC; + + if (pCreate->sql == NULL) return -1; + + if (pCreate->subType == TOPIC_SUB_TYPE__COLUMN) { + if (pCreate->ast == NULL || pCreate->ast[0] == 0) return -1; + } else if (pCreate->subType == TOPIC_SUB_TYPE__TABLE) { + if (pCreate->subStbName[0] == 0) return -1; + } else if (pCreate->subType == TOPIC_SUB_TYPE__DB) { + if (pCreate->subDbName[0] == 0) return -1; } + terrno = TSDB_CODE_SUCCESS; return 0; } @@ -327,14 +336,13 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * topicObj.version = 1; topicObj.sql = strdup(pCreate->sql); topicObj.sqlLen = strlen(pCreate->sql) + 1; - topicObj.refConsumerCnt = 0; + topicObj.subType = pCreate->subType; - if (pCreate->ast && pCreate->ast[0]) { + if (pCreate->subType == TOPIC_SUB_TYPE__COLUMN) { topicObj.ast = strdup(pCreate->ast); topicObj.astLen = strlen(pCreate->ast) + 1; - topicObj.subType = TOPIC_SUB_TYPE__TABLE; - topicObj.withTbName = pCreate->withTbName; - topicObj.withSchema = pCreate->withSchema; + /*topicObj.withTbName = pCreate->withTbName;*/ + /*topicObj.withSchema = pCreate->withSchema;*/ SNode *pAst = NULL; if (nodesStringToNode(pCreate->ast, &pAst) != 0) { @@ -367,13 +375,12 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * taosMemoryFree(topicObj.sql); return -1; } - } else { - topicObj.ast = NULL; - topicObj.astLen = 0; - topicObj.physicalPlan = NULL; - topicObj.subType = TOPIC_SUB_TYPE__DB; - topicObj.withTbName = 1; - topicObj.withSchema = 1; + /*} else if (pCreate->subType == TOPIC_SUB_TYPE__DB) {*/ + /*topicObj.ast = NULL;*/ + /*topicObj.astLen = 0;*/ + /*topicObj.physicalPlan = NULL;*/ + /*topicObj.withTbName = 1;*/ + /*topicObj.withSchema = 1;*/ } STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_TOPIC, pReq); @@ -441,7 +448,7 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { goto CREATE_TOPIC_OVER; } - pDb = mndAcquireDb(pMnode, createTopicReq.subscribeDbName); + pDb = mndAcquireDb(pMnode, createTopicReq.subDbName); if (pDb == NULL) { terrno = TSDB_CODE_MND_DB_NOT_SELECTED; goto CREATE_TOPIC_OVER; @@ -492,8 +499,8 @@ static int32_t mndDropTopic(SMnode *pMnode, STrans *pTrans, SRpcMsg *pReq, SMqTo } static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { - SMnode *pMnode = pReq->info.node; - /*SSdb *pSdb = pMnode->pSdb;*/ + SMnode *pMnode = pReq->info.node; + SSdb *pSdb = pMnode->pSdb; SMDropTopicReq dropReq = {0}; if (tDeserializeSMDropTopicReq(pReq->pCont, pReq->contLen, &dropReq) != 0) { @@ -513,12 +520,36 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { } } + void *pIter = NULL; + SMqConsumerObj *pConsumer; + while (1) { + pIter = sdbFetch(pSdb, SDB_CONSUMER, pIter, (void **)&pConsumer); + if (pIter == NULL) break; + + if (pConsumer->status == MQ_CONSUMER_STATUS__LOST_REBD) continue; + int32_t sz = taosArrayGetSize(pConsumer->assignedTopics); + for (int32_t i = 0; i < sz; i++) { + char *name = taosArrayGetP(pConsumer->assignedTopics, i); + if (strcmp(name, pTopic->name) == 0) { + mndReleaseConsumer(pMnode, pConsumer); + mndReleaseTopic(pMnode, pTopic); + terrno = TSDB_CODE_MND_TOPIC_SUBSCRIBED; + mError("topic:%s, failed to drop since subscribed by consumer %ld from cgroup %s", dropReq.name, + pConsumer->consumerId, pConsumer->cgroup); + return -1; + } + } + sdbRelease(pSdb, pConsumer); + } + +#if 0 if (pTopic->refConsumerCnt != 0) { mndReleaseTopic(pMnode, pTopic); terrno = TSDB_CODE_MND_TOPIC_SUBSCRIBED; mError("topic:%s, failed to drop since %s", dropReq.name, terrstr()); return -1; } +#endif STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_TOPIC, pReq); if (pTrans == NULL) { diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index a8e78ddafeae7861456c7271ec978097c56ef9f5..ad6388c585139b537832b0c4fe9d1f61d5569e27 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -37,19 +37,18 @@ static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction); static void mndTransDropLogs(SArray *pArray); static void mndTransDropActions(SArray *pArray); static void mndTransDropData(STrans *pTrans); -static int32_t mndTransExecuteLogs(SMnode *pMnode, SArray *pArray); static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray); static int32_t mndTransExecuteRedoLogs(SMnode *pMnode, STrans *pTrans); static int32_t mndTransExecuteUndoLogs(SMnode *pMnode, STrans *pTrans); static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans); static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans); -static int32_t mndTransExecuteCommitLogs(SMnode *pMnode, STrans *pTrans); +static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans); -static bool mndTransPerformCommitLogStage(SMnode *pMnode, STrans *pTrans); +static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans); @@ -83,40 +82,30 @@ int32_t mndInitTrans(SMnode *pMnode) { void mndCleanupTrans(SMnode *pMnode) {} -static SSdbRaw *mndTransActionEncode(STrans *pTrans) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - - int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE; - int32_t redoLogNum = taosArrayGetSize(pTrans->redoLogs); - int32_t undoLogNum = taosArrayGetSize(pTrans->undoLogs); - int32_t commitLogNum = taosArrayGetSize(pTrans->commitLogs); - int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions); - int32_t undoActionNum = taosArrayGetSize(pTrans->undoActions); - - for (int32_t i = 0; i < redoLogNum; ++i) { - SSdbRaw *pTmp = taosArrayGetP(pTrans->redoLogs, i); - rawDataLen += (sdbGetRawTotalSize(pTmp) + sizeof(int32_t)); - } +static int32_t mndTransGetActionsSize(SArray *pArray) { + int32_t actionNum = taosArrayGetSize(pArray); + int32_t rawDataLen = 0; - for (int32_t i = 0; i < undoLogNum; ++i) { - SSdbRaw *pTmp = taosArrayGetP(pTrans->undoLogs, i); - rawDataLen += (sdbGetRawTotalSize(pTmp) + sizeof(int32_t)); + for (int32_t i = 0; i < actionNum; ++i) { + STransAction *pAction = taosArrayGet(pArray, i); + if (pAction->isRaw) { + rawDataLen += (sdbGetRawTotalSize(pAction->pRaw) + sizeof(int32_t)); + } else { + rawDataLen += (sizeof(STransAction) + pAction->contLen); + } + rawDataLen += sizeof(pAction->isRaw); } - for (int32_t i = 0; i < commitLogNum; ++i) { - SSdbRaw *pTmp = taosArrayGetP(pTrans->commitLogs, i); - rawDataLen += (sdbGetRawTotalSize(pTmp) + sizeof(int32_t)); - } + return rawDataLen; +} - for (int32_t i = 0; i < redoActionNum; ++i) { - STransAction *pAction = taosArrayGet(pTrans->redoActions, i); - rawDataLen += (sizeof(STransAction) + pAction->contLen); - } +static SSdbRaw *mndTransActionEncode(STrans *pTrans) { + terrno = TSDB_CODE_OUT_OF_MEMORY; - for (int32_t i = 0; i < undoActionNum; ++i) { - STransAction *pAction = taosArrayGet(pTrans->undoActions, i); - rawDataLen += (sizeof(STransAction) + pAction->contLen); - } + int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE; + rawDataLen += mndTransGetActionsSize(pTrans->redoActions); + rawDataLen += mndTransGetActionsSize(pTrans->undoActions); + rawDataLen += mndTransGetActionsSize(pTrans->commitActions); SSdbRaw *pRaw = sdbAllocRaw(SDB_TRANS, TRANS_VER_NUMBER, rawDataLen); if (pRaw == NULL) { @@ -126,67 +115,85 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) { int32_t dataPos = 0; SDB_SET_INT32(pRaw, dataPos, pTrans->id, _OVER) - - ETrnStage stage = pTrans->stage; - if (stage == TRN_STAGE_REDO_LOG || stage == TRN_STAGE_REDO_ACTION) { - stage = TRN_STAGE_PREPARE; - } else if (stage == TRN_STAGE_UNDO_ACTION || stage == TRN_STAGE_UNDO_LOG) { - stage = TRN_STAGE_ROLLBACK; - } else if (stage == TRN_STAGE_COMMIT_LOG || stage == TRN_STAGE_FINISHED) { - stage = TRN_STAGE_COMMIT; - } else { - } - - SDB_SET_INT16(pRaw, dataPos, stage, _OVER) + SDB_SET_INT16(pRaw, dataPos, pTrans->stage, _OVER) SDB_SET_INT16(pRaw, dataPos, pTrans->policy, _OVER) SDB_SET_INT16(pRaw, dataPos, pTrans->type, _OVER) SDB_SET_INT16(pRaw, dataPos, pTrans->parallel, _OVER) SDB_SET_INT64(pRaw, dataPos, pTrans->createdTime, _OVER) - SDB_SET_INT64(pRaw, dataPos, pTrans->dbUid, _OVER) SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER) - SDB_SET_INT32(pRaw, dataPos, redoLogNum, _OVER) - SDB_SET_INT32(pRaw, dataPos, undoLogNum, _OVER) - SDB_SET_INT32(pRaw, dataPos, commitLogNum, _OVER) + SDB_SET_INT32(pRaw, dataPos, pTrans->redoActionPos, _OVER) + + int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions); + int32_t undoActionNum = taosArrayGetSize(pTrans->undoActions); + int32_t commitActionNum = taosArrayGetSize(pTrans->commitActions); SDB_SET_INT32(pRaw, dataPos, redoActionNum, _OVER) SDB_SET_INT32(pRaw, dataPos, undoActionNum, _OVER) - - for (int32_t i = 0; i < redoLogNum; ++i) { - SSdbRaw *pTmp = taosArrayGetP(pTrans->redoLogs, i); - int32_t len = sdbGetRawTotalSize(pTmp); - SDB_SET_INT32(pRaw, dataPos, len, _OVER) - SDB_SET_BINARY(pRaw, dataPos, (void *)pTmp, len, _OVER) - } - - for (int32_t i = 0; i < undoLogNum; ++i) { - SSdbRaw *pTmp = taosArrayGetP(pTrans->undoLogs, i); - int32_t len = sdbGetRawTotalSize(pTmp); - SDB_SET_INT32(pRaw, dataPos, len, _OVER) - SDB_SET_BINARY(pRaw, dataPos, (void *)pTmp, len, _OVER) - } - - for (int32_t i = 0; i < commitLogNum; ++i) { - SSdbRaw *pTmp = taosArrayGetP(pTrans->commitLogs, i); - int32_t len = sdbGetRawTotalSize(pTmp); - SDB_SET_INT32(pRaw, dataPos, len, _OVER) - SDB_SET_BINARY(pRaw, dataPos, (void *)pTmp, len, _OVER) - } + SDB_SET_INT32(pRaw, dataPos, commitActionNum, _OVER) for (int32_t i = 0; i < redoActionNum; ++i) { STransAction *pAction = taosArrayGet(pTrans->redoActions, i); - SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER) - SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER) SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER) - SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER) - SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->isRaw, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER) + if (pAction->isRaw) { + int32_t len = sdbGetRawTotalSize(pAction->pRaw); + SDB_SET_INT8(pRaw, dataPos, pAction->rawWritten, _OVER) + SDB_SET_INT32(pRaw, dataPos, len, _OVER) + SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER) + } else { + SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER) + SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->msgSent, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->msgReceived, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER) + SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER) + } } for (int32_t i = 0; i < undoActionNum; ++i) { STransAction *pAction = taosArrayGet(pTrans->undoActions, i); - SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER) - SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->isRaw, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER) + if (pAction->isRaw) { + int32_t len = sdbGetRawTotalSize(pAction->pRaw); + SDB_SET_INT8(pRaw, dataPos, pAction->rawWritten, _OVER) + SDB_SET_INT32(pRaw, dataPos, len, _OVER) + SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER) + } else { + SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER) + SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->msgSent, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->msgReceived, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER) + SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER) + } + } + + for (int32_t i = 0; i < commitActionNum; ++i) { + STransAction *pAction = taosArrayGet(pTrans->commitActions, i); + SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER) SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER) - SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER) - SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pCont, pAction->contLen, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->isRaw, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER) + if (pAction->isRaw) { + int32_t len = sdbGetRawTotalSize(pAction->pRaw); + SDB_SET_INT8(pRaw, dataPos, pAction->rawWritten, _OVER) + SDB_SET_INT32(pRaw, dataPos, len, _OVER) + SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER) + } else { + SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER) + SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->msgSent, _OVER) + SDB_SET_INT8(pRaw, dataPos, pAction->msgReceived, _OVER) + SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER) + SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER) + } } SDB_SET_INT32(pRaw, dataPos, pTrans->startFunc, _OVER) @@ -220,11 +227,9 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) { char *pData = NULL; int32_t dataLen = 0; int8_t sver = 0; - int32_t redoLogNum = 0; - int32_t undoLogNum = 0; - int32_t commitLogNum = 0; int32_t redoActionNum = 0; int32_t undoActionNum = 0; + int32_t commitActionNum = 0; int32_t dataPos = 0; STransAction action = {0}; @@ -256,78 +261,105 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) { pTrans->type = type; pTrans->parallel = parallel; SDB_GET_INT64(pRaw, dataPos, &pTrans->createdTime, _OVER) - SDB_GET_INT64(pRaw, dataPos, &pTrans->dbUid, _OVER) SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER) - SDB_GET_INT32(pRaw, dataPos, &redoLogNum, _OVER) - SDB_GET_INT32(pRaw, dataPos, &undoLogNum, _OVER) - SDB_GET_INT32(pRaw, dataPos, &commitLogNum, _OVER) + SDB_GET_INT32(pRaw, dataPos, &pTrans->redoActionPos, _OVER) SDB_GET_INT32(pRaw, dataPos, &redoActionNum, _OVER) SDB_GET_INT32(pRaw, dataPos, &undoActionNum, _OVER) + SDB_GET_INT32(pRaw, dataPos, &commitActionNum, _OVER) - pTrans->redoLogs = taosArrayInit(redoLogNum, sizeof(void *)); - pTrans->undoLogs = taosArrayInit(undoLogNum, sizeof(void *)); - pTrans->commitLogs = taosArrayInit(commitLogNum, sizeof(void *)); pTrans->redoActions = taosArrayInit(redoActionNum, sizeof(STransAction)); pTrans->undoActions = taosArrayInit(undoActionNum, sizeof(STransAction)); + pTrans->commitActions = taosArrayInit(commitActionNum, sizeof(STransAction)); - if (pTrans->redoLogs == NULL) goto _OVER; - if (pTrans->undoLogs == NULL) goto _OVER; - if (pTrans->commitLogs == NULL) goto _OVER; if (pTrans->redoActions == NULL) goto _OVER; if (pTrans->undoActions == NULL) goto _OVER; - - for (int32_t i = 0; i < redoLogNum; ++i) { - SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER) - pData = taosMemoryMalloc(dataLen); - if (pData == NULL) goto _OVER; - mTrace("raw:%p, is created", pData); - SDB_GET_BINARY(pRaw, dataPos, pData, dataLen, _OVER); - if (taosArrayPush(pTrans->redoLogs, &pData) == NULL) goto _OVER; - pData = NULL; - } - - for (int32_t i = 0; i < undoLogNum; ++i) { - SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER) - pData = taosMemoryMalloc(dataLen); - if (pData == NULL) goto _OVER; - mTrace("raw:%p, is created", pData); - SDB_GET_BINARY(pRaw, dataPos, pData, dataLen, _OVER); - if (taosArrayPush(pTrans->undoLogs, &pData) == NULL) goto _OVER; - pData = NULL; - } - - for (int32_t i = 0; i < commitLogNum; ++i) { - SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER) - pData = taosMemoryMalloc(dataLen); - if (pData == NULL) goto _OVER; - mTrace("raw:%p, is created", pData); - SDB_GET_BINARY(pRaw, dataPos, pData, dataLen, _OVER); - if (taosArrayPush(pTrans->commitLogs, &pData) == NULL) goto _OVER; - pData = NULL; - } + if (pTrans->commitActions == NULL) goto _OVER; for (int32_t i = 0; i < redoActionNum; ++i) { - SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER); - SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER) SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER) - SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER) - action.pCont = taosMemoryMalloc(action.contLen); - if (action.pCont == NULL) goto _OVER; - SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER); - if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER; - action.pCont = NULL; + SDB_GET_INT8(pRaw, dataPos, &action.isRaw, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.stage, _OVER) + if (action.isRaw) { + SDB_GET_INT8(pRaw, dataPos, &action.rawWritten, _OVER) + SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER) + action.pRaw = taosMemoryMalloc(dataLen); + if (action.pRaw == NULL) goto _OVER; + mTrace("raw:%p, is created", pData); + SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER); + if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER; + action.pRaw = NULL; + } else { + SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER); + SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.msgSent, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.msgReceived, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER) + action.pCont = taosMemoryMalloc(action.contLen); + if (action.pCont == NULL) goto _OVER; + SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER); + if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER; + action.pCont = NULL; + } } for (int32_t i = 0; i < undoActionNum; ++i) { - SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER); - SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.isRaw, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.stage, _OVER) + if (action.isRaw) { + SDB_GET_INT8(pRaw, dataPos, &action.rawWritten, _OVER) + SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER) + action.pRaw = taosMemoryMalloc(dataLen); + if (action.pRaw == NULL) goto _OVER; + mTrace("raw:%p, is created", pData); + SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER); + if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER; + action.pRaw = NULL; + } else { + SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER); + SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.msgSent, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.msgReceived, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER) + action.pCont = taosMemoryMalloc(action.contLen); + if (action.pCont == NULL) goto _OVER; + SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER); + if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER; + action.pCont = NULL; + } + } + + for (int32_t i = 0; i < commitActionNum; ++i) { + SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER) SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER) - SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER) - action.pCont = taosMemoryMalloc(action.contLen); - if (action.pCont == NULL) goto _OVER; - SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER); - if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER; - action.pCont = NULL; + SDB_GET_INT8(pRaw, dataPos, &action.isRaw, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.stage, _OVER) + if (action.isRaw) { + SDB_GET_INT8(pRaw, dataPos, &action.rawWritten, _OVER) + SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER) + action.pRaw = taosMemoryMalloc(dataLen); + if (action.pRaw == NULL) goto _OVER; + mTrace("raw:%p, is created", pData); + SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER); + if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER; + action.pRaw = NULL; + } else { + SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER); + SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.msgSent, _OVER) + SDB_GET_INT8(pRaw, dataPos, &action.msgReceived, _OVER) + SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER) + action.pCont = taosMemoryMalloc(action.contLen); + if (action.pCont == NULL) goto _OVER; + SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER); + if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER; + action.pCont = NULL; + } } SDB_GET_INT32(pRaw, dataPos, &pTrans->startFunc, _OVER) @@ -347,7 +379,6 @@ _OVER: mError("trans:%d, failed to parse from raw:%p since %s", pTrans->id, pRaw, terrstr()); mndTransDropData(pTrans); taosMemoryFreeClear(pRow); - taosMemoryFreeClear(pData); taosMemoryFreeClear(action.pCont); return NULL; } @@ -360,20 +391,16 @@ static const char *mndTransStr(ETrnStage stage) { switch (stage) { case TRN_STAGE_PREPARE: return "prepare"; - case TRN_STAGE_REDO_LOG: - return "redoLog"; case TRN_STAGE_REDO_ACTION: return "redoAction"; - case TRN_STAGE_COMMIT: - return "commit"; - case TRN_STAGE_COMMIT_LOG: - return "commitLog"; - case TRN_STAGE_UNDO_ACTION: - return "undoAction"; - case TRN_STAGE_UNDO_LOG: - return "undoLog"; case TRN_STAGE_ROLLBACK: return "rollback"; + case TRN_STAGE_UNDO_ACTION: + return "undoAction"; + case TRN_STAGE_COMMIT: + return "commit"; + case TRN_STAGE_COMMIT_ACTION: + return "commitAction"; case TRN_STAGE_FINISHED: return "finished"; default: @@ -464,15 +491,15 @@ static void mndTransTestStopFunc(SMnode *pMnode, void *param, int32_t paramLen) mInfo("test trans stop, param:%s, len:%d", (char *)param, paramLen); } -static TransCbFp mndTransGetCbFp(ETrnFuncType ftype) { +static TransCbFp mndTransGetCbFp(ETrnFunc ftype) { switch (ftype) { - case TEST_TRANS_START_FUNC: + case TRANS_START_FUNC_TEST: return mndTransTestStartFunc; - case TEST_TRANS_STOP_FUNC: + case TRANS_STOP_FUNC_TEST: return mndTransTestStopFunc; - case MQ_REB_TRANS_START_FUNC: + case TRANS_START_FUNC_MQ_REB: return mndRebCntInc; - case MQ_REB_TRANS_STOP_FUNC: + case TRANS_STOP_FUNC_MQ_REB: return mndRebCntDec; default: return NULL; @@ -493,11 +520,9 @@ static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans) { } static void mndTransDropData(STrans *pTrans) { - mndTransDropLogs(pTrans->redoLogs); - mndTransDropLogs(pTrans->undoLogs); - mndTransDropLogs(pTrans->commitLogs); mndTransDropActions(pTrans->redoActions); mndTransDropActions(pTrans->undoActions); + mndTransDropActions(pTrans->commitActions); if (pTrans->rpcRsp != NULL) { taosMemoryFree(pTrans->rpcRsp); pTrans->rpcRsp = NULL; @@ -511,7 +536,7 @@ static void mndTransDropData(STrans *pTrans) { } static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) { - mDebug("trans:%d, perform delete action, row:%p stage:%s callfunc:%d", pTrans->id, pTrans, mndTransStr(pTrans->stage), + mTrace("trans:%d, perform delete action, row:%p stage:%s callfunc:%d", pTrans->id, pTrans, mndTransStr(pTrans->stage), callFunc); if (pTrans->stopFunc > 0 && callFunc) { TransCbFp fp = mndTransGetCbFp(pTrans->stopFunc); @@ -524,20 +549,35 @@ static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) { return 0; } -static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) { - if (pNew->stage == TRN_STAGE_COMMIT) { - pNew->stage = TRN_STAGE_COMMIT_LOG; - mTrace("trans:%d, stage from %s to %s", pNew->id, mndTransStr(TRN_STAGE_COMMIT), mndTransStr(TRN_STAGE_COMMIT_LOG)); - } - - if (pNew->stage == TRN_STAGE_ROLLBACK) { - pNew->stage = TRN_STAGE_FINISHED; - mTrace("trans:%d, stage from %s to %s", pNew->id, mndTransStr(TRN_STAGE_ROLLBACK), mndTransStr(TRN_STAGE_FINISHED)); +static void mndTransUpdateActions(SArray *pOldArray, SArray *pNewArray) { + for (int32_t i = 0; i < taosArrayGetSize(pOldArray); ++i) { + STransAction *pOldAction = taosArrayGet(pOldArray, i); + STransAction *pNewAction = taosArrayGet(pNewArray, i); + pOldAction->rawWritten = pNewAction->rawWritten; + pOldAction->msgSent = pNewAction->msgSent; + pOldAction->msgReceived = pNewAction->msgReceived; + pOldAction->errCode = pNewAction->errCode; } +} +static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) { mTrace("trans:%d, perform update action, old row:%p stage:%s, new row:%p stage:%s", pOld->id, pOld, mndTransStr(pOld->stage), pNew, mndTransStr(pNew->stage)); + mndTransUpdateActions(pOld->redoActions, pNew->redoActions); + mndTransUpdateActions(pOld->undoActions, pNew->undoActions); + mndTransUpdateActions(pOld->commitActions, pNew->commitActions); pOld->stage = pNew->stage; + pOld->redoActionPos = pNew->redoActionPos; + + if (pOld->stage == TRN_STAGE_COMMIT) { + pOld->stage = TRN_STAGE_COMMIT_ACTION; + mTrace("trans:%d, stage from commit to commitAction", pNew->id); + } + + if (pOld->stage == TRN_STAGE_ROLLBACK) { + pOld->stage = TRN_STAGE_FINISHED; + mTrace("trans:%d, stage from rollback to finished", pNew->id); + } return 0; } @@ -566,40 +606,32 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnType type, const S pTrans->stage = TRN_STAGE_PREPARE; pTrans->policy = policy; pTrans->type = type; + pTrans->parallel = TRN_EXEC_PARALLEL; pTrans->createdTime = taosGetTimestampMs(); - if (pReq != NULL) pTrans->rpcInfo = pReq->info; - pTrans->redoLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *)); - pTrans->undoLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *)); - pTrans->commitLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *)); pTrans->redoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction)); pTrans->undoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction)); + pTrans->commitActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction)); - if (pTrans->redoLogs == NULL || pTrans->undoLogs == NULL || pTrans->commitLogs == NULL || - pTrans->redoActions == NULL || pTrans->undoActions == NULL) { + if (pTrans->redoActions == NULL || pTrans->undoActions == NULL || pTrans->commitActions == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; mError("failed to create transaction since %s", terrstr()); return NULL; } - mDebug("trans:%d, local object is created, data:%p", pTrans->id, pTrans); + if (pReq != NULL) pTrans->rpcInfo = pReq->info; + mTrace("trans:%d, local object is created, data:%p", pTrans->id, pTrans); return pTrans; } -static void mndTransDropLogs(SArray *pArray) { - int32_t size = taosArrayGetSize(pArray); - for (int32_t i = 0; i < size; ++i) { - SSdbRaw *pRaw = taosArrayGetP(pArray, i); - sdbFreeRaw(pRaw); - } - - taosArrayDestroy(pArray); -} - static void mndTransDropActions(SArray *pArray) { int32_t size = taosArrayGetSize(pArray); for (int32_t i = 0; i < size; ++i) { STransAction *pAction = taosArrayGet(pArray, i); - taosMemoryFreeClear(pAction->pCont); + if (pAction->isRaw) { + taosMemoryFreeClear(pAction->pRaw); + } else { + taosMemoryFreeClear(pAction->pCont); + } } taosArrayDestroy(pArray); @@ -608,18 +640,15 @@ static void mndTransDropActions(SArray *pArray) { void mndTransDrop(STrans *pTrans) { if (pTrans != NULL) { mndTransDropData(pTrans); - mDebug("trans:%d, local object is freed, data:%p", pTrans->id, pTrans); + mTrace("trans:%d, local object is freed, data:%p", pTrans->id, pTrans); taosMemoryFreeClear(pTrans); } } -static int32_t mndTransAppendLog(SArray *pArray, SSdbRaw *pRaw) { - if (pArray == NULL || pRaw == NULL) { - terrno = TSDB_CODE_INVALID_PARA; - return -1; - } +static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction) { + pAction->id = taosArrayGetSize(pArray); - void *ptr = taosArrayPush(pArray, &pRaw); + void *ptr = taosArrayPush(pArray, pAction); if (ptr == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; @@ -628,27 +657,28 @@ static int32_t mndTransAppendLog(SArray *pArray, SSdbRaw *pRaw) { return 0; } -int32_t mndTransAppendRedolog(STrans *pTrans, SSdbRaw *pRaw) { return mndTransAppendLog(pTrans->redoLogs, pRaw); } - -int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw) { return mndTransAppendLog(pTrans->undoLogs, pRaw); } - -int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw) { return mndTransAppendLog(pTrans->commitLogs, pRaw); } +int32_t mndTransAppendRedolog(STrans *pTrans, SSdbRaw *pRaw) { + STransAction action = {.stage = TRN_STAGE_REDO_ACTION, .isRaw = true, .pRaw = pRaw}; + return mndTransAppendAction(pTrans->redoActions, &action); +} -static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction) { - void *ptr = taosArrayPush(pArray, pAction); - if (ptr == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } +int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw) { + STransAction action = {.stage = TRN_STAGE_UNDO_ACTION, .isRaw = true, .pRaw = pRaw}; + return mndTransAppendAction(pTrans->undoActions, &action); +} - return 0; +int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw) { + STransAction action = {.stage = TRN_STAGE_COMMIT_ACTION, .isRaw = true, .pRaw = pRaw}; + return mndTransAppendAction(pTrans->commitActions, &action); } int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction) { + pAction->stage = TRN_STAGE_REDO_ACTION; return mndTransAppendAction(pTrans->redoActions, pAction); } int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction) { + pAction->stage = TRN_STAGE_UNDO_ACTION; return mndTransAppendAction(pTrans->undoActions, pAction); } @@ -657,7 +687,7 @@ void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen) { pTrans->rpcRspLen = contLen; } -void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen) { +void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen) { pTrans->startFunc = startFunc; pTrans->stopFunc = stopFunc; pTrans->param = param; @@ -665,11 +695,10 @@ void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc } void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb) { - pTrans->dbUid = pDb->uid; memcpy(pTrans->dbname, pDb->name, TSDB_DB_FNAME_LEN); } -void mndTransSetExecOneByOne(STrans *pTrans) { pTrans->parallel = TRN_EXEC_ONE_BY_ONE; } +void mndTransSetNoParallel(STrans *pTrans) { pTrans->parallel = TRN_EXEC_NO_PARALLEL; } static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) { SSdbRaw *pRaw = mndTransActionEncode(pTrans); @@ -679,7 +708,7 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) { } sdbSetRawStatus(pRaw, SDB_STATUS_READY); - mDebug("trans:%d, sync to other nodes", pTrans->id); + mDebug("trans:%d, sync to other mnodes", pTrans->id); int32_t code = mndSyncPropose(pMnode, pRaw, pTrans->id); if (code != 0) { mError("trans:%d, failed to sync since %s", pTrans->id, terrstr()); @@ -732,7 +761,7 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNewTrans) { mError("trans:%d, can't execute since trans:%d in progress", pNewTrans->id, pTrans->id); conflict = true; } else if (mndIsDbTrans(pTrans) || mndIsStbTrans(pTrans)) { - if (pNewTrans->dbUid == pTrans->dbUid) { + if (strcmp(pNewTrans->dbname, pTrans->dbname) == 0) { mError("trans:%d, can't execute since trans:%d in progress db:%s", pNewTrans->id, pTrans->id, pTrans->dbname); conflict = true; } @@ -745,7 +774,7 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNewTrans) { mError("trans:%d, can't execute since trans:%d in progress", pNewTrans->id, pTrans->id); conflict = true; } else if (mndIsDbTrans(pTrans)) { - if (pNewTrans->dbUid == pTrans->dbUid) { + if (strcmp(pNewTrans->dbname, pTrans->dbname) == 0) { mError("trans:%d, can't execute since trans:%d in progress db:%s", pNewTrans->id, pTrans->id, pTrans->dbname); conflict = true; } @@ -768,7 +797,7 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) { return -1; } - if (taosArrayGetSize(pTrans->commitLogs) <= 0) { + if (taosArrayGetSize(pTrans->commitActions) <= 0) { terrno = TSDB_CODE_MND_TRANS_CLOG_IS_NULL; mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); return -1; @@ -799,8 +828,6 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) { } static int32_t mndTransCommit(SMnode *pMnode, STrans *pTrans) { - if (taosArrayGetSize(pTrans->commitLogs) == 0 && taosArrayGetSize(pTrans->redoActions) == 0) return 0; - mDebug("trans:%d, commit transaction", pTrans->id); if (mndTransSync(pMnode, pTrans) != 0) { mError("trans:%d, failed to commit since %s", pTrans->id, terrstr()); @@ -829,8 +856,7 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) { } if (pTrans->policy == TRN_POLICY_ROLLBACK) { - if (pTrans->stage == TRN_STAGE_UNDO_LOG || pTrans->stage == TRN_STAGE_UNDO_ACTION || - pTrans->stage == TRN_STAGE_ROLLBACK) { + if (pTrans->stage == TRN_STAGE_UNDO_ACTION || pTrans->stage == TRN_STAGE_ROLLBACK) { if (code == 0) code = TSDB_CODE_MND_TRANS_UNKNOW_ERROR; sendRsp = true; } @@ -848,13 +874,9 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) { } taosMemoryFree(pTrans->rpcRsp); - mDebug("trans:%d, send rsp, code:0x%x stage:%d app:%p", pTrans->id, code, pTrans->stage, pTrans->rpcInfo.ahandle); - SRpcMsg rspMsg = { - .code = code, - .pCont = rpcCont, - .contLen = pTrans->rpcRspLen, - .info = pTrans->rpcInfo, - }; + mDebug("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage), + pTrans->rpcInfo.ahandle); + SRpcMsg rspMsg = {.code = code, .pCont = rpcCont, .contLen = pTrans->rpcRspLen, .info = pTrans->rpcInfo}; tmsgSendRsp(&rspMsg); pTrans->rpcInfo.handle = NULL; pTrans->rpcRsp = NULL; @@ -904,146 +926,125 @@ void mndTransProcessRsp(SRpcMsg *pRsp) { } } - mDebug("trans:%d, action:%d response is received, code:0x%x, accept:0x%04x", transId, action, pRsp->code, - pAction->acceptableCode); + mDebug("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x", transId, mndTransStr(pAction->stage), action, + pRsp->code, pAction->acceptableCode); mndTransExecute(pMnode, pTrans); _OVER: mndReleaseTrans(pMnode, pTrans); } -static int32_t mndTransExecuteLogs(SMnode *pMnode, SArray *pArray) { - SSdb *pSdb = pMnode->pSdb; - int32_t arraySize = taosArrayGetSize(pArray); +static void mndTransResetActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) { + int32_t numOfActions = taosArrayGetSize(pArray); - if (arraySize == 0) return 0; + for (int32_t action = 0; action < numOfActions; ++action) { + STransAction *pAction = taosArrayGet(pArray, action); + if (pAction->msgSent && pAction->msgReceived && + (pAction->errCode == 0 || pAction->errCode == pAction->acceptableCode)) + continue; + if (pAction->rawWritten && (pAction->errCode == 0 || pAction->errCode == pAction->acceptableCode)) continue; - int32_t code = 0; - for (int32_t i = 0; i < arraySize; ++i) { - SSdbRaw *pRaw = taosArrayGetP(pArray, i); - if (sdbWriteWithoutFree(pSdb, pRaw) != 0) { - code = ((terrno != 0) ? terrno : -1); - } + pAction->rawWritten = 0; + pAction->msgSent = 0; + pAction->msgReceived = 0; + pAction->errCode = 0; + mDebug("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), action); } - - terrno = code; - return code; } -static int32_t mndTransExecuteRedoLogs(SMnode *pMnode, STrans *pTrans) { - int32_t code = mndTransExecuteLogs(pMnode, pTrans->redoLogs); - if (code != 0) { - mError("failed to execute redoLogs since %s", terrstr()); - } - return code; -} +static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransAction *pAction) { + if (pAction->rawWritten) return 0; -static int32_t mndTransExecuteUndoLogs(SMnode *pMnode, STrans *pTrans) { - int32_t code = mndTransExecuteLogs(pMnode, pTrans->undoLogs); - if (code != 0) { - mError("failed to execute undoLogs since %s, return success", terrstr()); + int32_t code = sdbWriteWithoutFree(pMnode->pSdb, pAction->pRaw); + if (code == 0 || terrno == TSDB_CODE_SDB_OBJ_NOT_THERE) { + pAction->rawWritten = true; + pAction->errCode = 0; + code = 0; + mDebug("trans:%d, %s:%d write to sdb", pTrans->id, mndTransStr(pAction->stage), pAction->id); + } else { + pAction->errCode = (terrno != 0) ? terrno : code; + mError("trans:%d, %s:%d failed to write sdb since %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, + terrstr()); } - return 0; // return success in any case -} - -static int32_t mndTransExecuteCommitLogs(SMnode *pMnode, STrans *pTrans) { - int32_t code = mndTransExecuteLogs(pMnode, pTrans->commitLogs); - if (code != 0) { - mError("failed to execute commitLogs since %s", terrstr()); - } return code; } -static void mndTransResetActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) { - int32_t numOfActions = taosArrayGetSize(pArray); +static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction) { + if (pAction->msgSent) return 0; + if (!pMnode->deploy && !mndIsMaster(pMnode)) return -1; - for (int32_t action = 0; action < numOfActions; ++action) { - STransAction *pAction = taosArrayGet(pArray, action); - if (pAction == NULL) continue; - if (pAction->msgSent && pAction->msgReceived && pAction->errCode == 0) continue; + int64_t signature = pTrans->id; + signature = (signature << 32); + signature += pAction->id; - pAction->msgSent = 0; + SRpcMsg rpcMsg = {.msgType = pAction->msgType, .contLen = pAction->contLen, .info.ahandle = (void *)signature}; + rpcMsg.pCont = rpcMallocCont(pAction->contLen); + if (rpcMsg.pCont == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + memcpy(rpcMsg.pCont, pAction->pCont, pAction->contLen); + + int32_t code = tmsgSendReq(&pAction->epSet, &rpcMsg); + if (code == 0) { + pAction->msgSent = 1; pAction->msgReceived = 0; pAction->errCode = 0; - mDebug("trans:%d, action:%d execute status is reset", pTrans->id, action); + mDebug("trans:%d, %s:%d is sent to %s:%u", pTrans->id, mndTransStr(pAction->stage), pAction->id, + pAction->epSet.eps[pAction->epSet.inUse].fqdn, pAction->epSet.eps[pAction->epSet.inUse].port); + } else { + pAction->msgSent = 0; + pAction->msgReceived = 0; + pAction->errCode = (terrno != 0) ? terrno : code; + mError("trans:%d, %s:%d not send since %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, terrstr()); + } + + return code; +} + +static int32_t mndTransExecSingleAction(SMnode *pMnode, STrans *pTrans, STransAction *pAction) { + if (pAction->isRaw) { + return mndTransWriteSingleLog(pMnode, pTrans, pAction); + } else { + return mndTransSendSingleMsg(pMnode, pTrans, pAction); } } -static int32_t mndTransSendActionMsg(SMnode *pMnode, STrans *pTrans, SArray *pArray) { +static int32_t mndTransExecSingleActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) { int32_t numOfActions = taosArrayGetSize(pArray); + int32_t code = 0; for (int32_t action = 0; action < numOfActions; ++action) { STransAction *pAction = taosArrayGet(pArray, action); - if (pAction == NULL) continue; - - if (pAction->msgSent) { - if (pAction->msgReceived) { - continue; - } else { - if (pTrans->parallel == TRN_EXEC_ONE_BY_ONE) { - break; - } else { - continue; - } - } - } - - int64_t signature = pTrans->id; - signature = (signature << 32); - signature += action; - - SRpcMsg rpcMsg = {.msgType = pAction->msgType, .contLen = pAction->contLen, .info.ahandle = (void *)signature}; - rpcMsg.pCont = rpcMallocCont(pAction->contLen); - if (rpcMsg.pCont == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - memcpy(rpcMsg.pCont, pAction->pCont, pAction->contLen); - - if (tmsgSendReq(&pAction->epSet, &rpcMsg) == 0) { - mDebug("trans:%d, action:%d is sent to %s:%u", pTrans->id, action, pAction->epSet.eps[pAction->epSet.inUse].fqdn, - pAction->epSet.eps[pAction->epSet.inUse].port); - pAction->msgSent = 1; - pAction->msgReceived = 0; - pAction->errCode = 0; - if (pTrans->parallel == TRN_EXEC_ONE_BY_ONE) { - break; - } - } else { - pAction->msgSent = 0; - pAction->msgReceived = 0; - pAction->errCode = terrno; - mError("trans:%d, action:%d not send since %s", pTrans->id, action, terrstr()); - return -1; - } + code = mndTransExecSingleAction(pMnode, pTrans, pAction); + if (code != 0) break; } - return 0; + return code; } static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray) { int32_t numOfActions = taosArrayGetSize(pArray); if (numOfActions == 0) return 0; - if (mndTransSendActionMsg(pMnode, pTrans, pArray) != 0) { + if (mndTransExecSingleActions(pMnode, pTrans, pArray) != 0) { return -1; } - int32_t numOfReceived = 0; + int32_t numOfExecuted = 0; int32_t errCode = 0; for (int32_t action = 0; action < numOfActions; ++action) { STransAction *pAction = taosArrayGet(pArray, action); - if (pAction == NULL) continue; - if (pAction->msgSent && pAction->msgReceived) { - numOfReceived++; + if (pAction->msgReceived || pAction->rawWritten) { + numOfExecuted++; if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) { errCode = pAction->errCode; } } } - if (numOfReceived == numOfActions) { + if (numOfExecuted == numOfActions) { if (errCode == 0) { mDebug("trans:%d, all %d actions execute successfully", pTrans->id, numOfActions); return 0; @@ -1054,7 +1055,7 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA return errCode; } } else { - mDebug("trans:%d, %d of %d actions executed", pTrans->id, numOfReceived, numOfActions); + mDebug("trans:%d, %d of %d actions executed", pTrans->id, numOfExecuted, numOfActions); return TSDB_CODE_ACTION_IN_PROGRESS; } } @@ -1075,35 +1076,79 @@ static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans) { return code; } -static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) { - bool continueExec = true; - pTrans->stage = TRN_STAGE_REDO_LOG; - mDebug("trans:%d, stage from prepare to redoLog", pTrans->id); - return continueExec; +static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans) { + int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->commitActions); + if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { + mError("failed to execute commitActions since %s", terrstr()); + } + return code; } -static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans) { - bool continueExec = true; - int32_t code = mndTransExecuteRedoLogs(pMnode, pTrans); +static int32_t mndTransExecuteRedoActionsNoParallel(SMnode *pMnode, STrans *pTrans) { + int32_t code = 0; + int32_t numOfActions = taosArrayGetSize(pTrans->redoActions); + if (numOfActions == 0) return code; + if (pTrans->redoActionPos >= numOfActions) return code; + + for (int32_t action = pTrans->redoActionPos; action < numOfActions; ++action) { + STransAction *pAction = taosArrayGet(pTrans->redoActions, pTrans->redoActionPos); + + code = mndTransExecSingleAction(pMnode, pTrans, pAction); + if (code == 0) { + if (pAction->msgSent) { + if (pAction->msgReceived) { + if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) { + code = pAction->errCode; + } + } else { + code = TSDB_CODE_ACTION_IN_PROGRESS; + } + } + if (pAction->rawWritten) { + if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) { + code = pAction->errCode; + } + } + } - if (code == 0) { - pTrans->code = 0; - pTrans->stage = TRN_STAGE_REDO_ACTION; - mDebug("trans:%d, stage from redoLog to redoAction", pTrans->id); - } else { - pTrans->code = terrno; - pTrans->stage = TRN_STAGE_UNDO_LOG; - mError("trans:%d, stage from redoLog to undoLog since %s", pTrans->id, terrstr()); + if (code == 0) { + pTrans->redoActionPos++; + mDebug("trans:%d, %s:%d is executed and need sync to other mnodes", pTrans->id, mndTransStr(pAction->stage), + pAction->id); + code = mndTransSync(pMnode, pTrans); + if (code != 0) { + mError("trans:%d, failed to sync redoActionPos since %s", pTrans->id, terrstr()); + break; + } + } else if (code == TSDB_CODE_ACTION_IN_PROGRESS) { + mDebug("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id); + break; + } else { + mError("trans:%d, %s:%d failed to execute since %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, + terrstr()); + break; + } } + return code; +} + +static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) { + bool continueExec = true; + pTrans->stage = TRN_STAGE_REDO_ACTION; + mDebug("trans:%d, stage from prepare to redoAction", pTrans->id); return continueExec; } static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { - if (!pMnode->deploy && !mndIsMaster(pMnode)) return false; - bool continueExec = true; - int32_t code = mndTransExecuteRedoActions(pMnode, pTrans); + int32_t code = 0; + + if (pTrans->parallel == TRN_EXEC_NO_PARALLEL) { + code = mndTransExecuteRedoActionsNoParallel(pMnode, pTrans); + } else { + code = mndTransExecuteRedoActions(pMnode, pTrans); + } if (code == 0) { pTrans->code = 0; @@ -1135,8 +1180,8 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) { if (code == 0) { pTrans->code = 0; - pTrans->stage = TRN_STAGE_COMMIT_LOG; - mDebug("trans:%d, stage from commit to commitLog", pTrans->id); + pTrans->stage = TRN_STAGE_COMMIT_ACTION; + mDebug("trans:%d, stage from commit to commitAction", pTrans->id); continueExec = true; } else { pTrans->code = terrno; @@ -1155,35 +1200,19 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) { return continueExec; } -static bool mndTransPerformCommitLogStage(SMnode *pMnode, STrans *pTrans) { +static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans) { bool continueExec = true; - int32_t code = mndTransExecuteCommitLogs(pMnode, pTrans); + int32_t code = mndTransExecuteCommitActions(pMnode, pTrans); if (code == 0) { pTrans->code = 0; pTrans->stage = TRN_STAGE_FINISHED; - mDebug("trans:%d, stage from commitLog to finished", pTrans->id); + mDebug("trans:%d, stage from commitAction to finished", pTrans->id); continueExec = true; } else { pTrans->code = terrno; pTrans->failedTimes++; - mError("trans:%d, stage keep on commitLog since %s, failedTimes:%d", pTrans->id, terrstr(), pTrans->failedTimes); - continueExec = false; - } - - return continueExec; -} - -static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans) { - bool continueExec = true; - int32_t code = mndTransExecuteUndoLogs(pMnode, pTrans); - - if (code == 0) { - pTrans->stage = TRN_STAGE_ROLLBACK; - mDebug("trans:%d, stage from undoLog to rollback", pTrans->id); - continueExec = true; - } else { - mError("trans:%d, stage keep on undoLog since %s", pTrans->id, terrstr()); + mError("trans:%d, stage keep on commitAction since %s, failedTimes:%d", pTrans->id, terrstr(), pTrans->failedTimes); continueExec = false; } @@ -1191,14 +1220,12 @@ static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans) { } static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) { - if (!pMnode->deploy && !mndIsMaster(pMnode)) return false; - bool continueExec = true; int32_t code = mndTransExecuteUndoActions(pMnode, pTrans); if (code == 0) { - pTrans->stage = TRN_STAGE_UNDO_LOG; - mDebug("trans:%d, stage from undoAction to undoLog", pTrans->id); + pTrans->stage = TRN_STAGE_ROLLBACK; + mDebug("trans:%d, stage from undoAction to rollback", pTrans->id); continueExec = true; } else if (code == TSDB_CODE_ACTION_IN_PROGRESS) { mDebug("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code)); @@ -1243,8 +1270,7 @@ static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) { mError("trans:%d, failed to write sdb since %s", pTrans->id, terrstr()); } - mDebug("trans:%d, finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes); - + mDebug("trans:%d, execute finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes); return continueExec; } @@ -1257,24 +1283,18 @@ static void mndTransExecute(SMnode *pMnode, STrans *pTrans) { case TRN_STAGE_PREPARE: continueExec = mndTransPerformPrepareStage(pMnode, pTrans); break; - case TRN_STAGE_REDO_LOG: - continueExec = mndTransPerformRedoLogStage(pMnode, pTrans); - break; case TRN_STAGE_REDO_ACTION: continueExec = mndTransPerformRedoActionStage(pMnode, pTrans); break; - case TRN_STAGE_UNDO_LOG: - continueExec = mndTransPerformUndoLogStage(pMnode, pTrans); + case TRN_STAGE_COMMIT: + continueExec = mndTransPerformCommitStage(pMnode, pTrans); + break; + case TRN_STAGE_COMMIT_ACTION: + continueExec = mndTransPerformCommitActionStage(pMnode, pTrans); break; case TRN_STAGE_UNDO_ACTION: continueExec = mndTransPerformUndoActionStage(pMnode, pTrans); break; - case TRN_STAGE_COMMIT_LOG: - continueExec = mndTransPerformCommitLogStage(pMnode, pTrans); - break; - case TRN_STAGE_COMMIT: - continueExec = mndTransPerformCommitStage(pMnode, pTrans); - break; case TRN_STAGE_ROLLBACK: continueExec = mndTransPerformRollbackStage(pMnode, pTrans); break; @@ -1313,15 +1333,15 @@ int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans) { if (pAction == NULL) continue; if (pAction->msgReceived == 0) { - mInfo("trans:%d, action:%d set processed for kill msg received", pTrans->id, i); + mInfo("trans:%d, %s:%d set processed for kill msg received", pTrans->id, mndTransStr(pAction->stage), i); pAction->msgSent = 1; pAction->msgReceived = 1; pAction->errCode = 0; } if (pAction->errCode != 0) { - mInfo("trans:%d, action:%d set processed for kill msg received, errCode from %s to success", pTrans->id, i, - tstrerror(pAction->errCode)); + mInfo("trans:%d, %s:%d set processed for kill msg received, errCode from %s to success", pTrans->id, + mndTransStr(pAction->stage), i, tstrerror(pAction->errCode)); pAction->msgSent = 1; pAction->msgReceived = 1; pAction->errCode = 0; diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index cc6364c4571b7b56b096d282c4f8f29a7b624dca..83d00c86e3eb8797f5b40fa58243df429e905d5a 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -77,7 +77,7 @@ static int32_t mndCreateDefaultUser(SMnode *pMnode, char *acct, char *user, char if (pRaw == NULL) return -1; sdbSetRawStatus(pRaw, SDB_STATUS_READY); - mDebug("user:%s, will be created while deploy sdb, raw:%p", userObj.user, pRaw); + mDebug("user:%s, will be created when deploying, raw:%p", userObj.user, pRaw); #if 0 return sdbWrite(pMnode->pSdb, pRaw); diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index e05b38a7c0345293eb53caeab2eb680f6d113651..161fc5379c45a166ab331bce7b6cbf0f6e2f8c98 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -501,7 +501,7 @@ int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups) { *ppVgroups = pVgroups; code = 0; - mInfo("db:%s, %d vgroups is alloced, replica:%d", pDb->name, pDb->cfg.numOfVgroups, pDb->cfg.replications); + mInfo("db:%s, total %d vgroups is alloced, replica:%d", pDb->name, pDb->cfg.numOfVgroups, pDb->cfg.replications); _OVER: if (code != 0) taosMemoryFree(pVgroups); @@ -539,7 +539,7 @@ int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray) { pVgid->role = TAOS_SYNC_STATE_FOLLOWER; pDnode->numOfVnodes++; - mInfo("db:%s, vgId:%d, vn:%d dnode:%d is added", pVgroup->dbName, pVgroup->vgId, maxPos, pVgid->dnodeId); + mInfo("db:%s, vgId:%d, vnode_index:%d dnode:%d is added", pVgroup->dbName, pVgroup->vgId, maxPos, pVgid->dnodeId); maxPos++; if (maxPos == 3) return 0; } diff --git a/source/dnode/mnode/impl/test/db/CMakeLists.txt b/source/dnode/mnode/impl/test/db/CMakeLists.txt index 3f6a80835ffa7b2a0a6fcdcff21e1cfd39a02c5f..e28cdd4f61824c04f62513868a9010113140fd31 100644 --- a/source/dnode/mnode/impl/test/db/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/db/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME dbTest - COMMAND dbTest -) +if(NOT TD_WINDOWS) + add_test( + NAME dbTest + COMMAND dbTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/sma/CMakeLists.txt b/source/dnode/mnode/impl/test/sma/CMakeLists.txt index 3f9ec123a80e88371a98fa54c99342726831372d..fd596c5021674bb9d4ec185924129b0fd3bbade8 100644 --- a/source/dnode/mnode/impl/test/sma/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/sma/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME smaTest - COMMAND smaTest -) +if(NOT TD_WINDOWS) + add_test( + NAME smaTest + COMMAND smaTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/stb/CMakeLists.txt b/source/dnode/mnode/impl/test/stb/CMakeLists.txt index d2fe3879979f4f52a215a3d44e25e912be3abb90..857c404c1c299767685fa1572a7f5a0b6463c939 100644 --- a/source/dnode/mnode/impl/test/stb/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/stb/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME stbTest - COMMAND stbTest -) +if(NOT TD_WINDOWS) + add_test( + NAME stbTest + COMMAND stbTest + ) +endif(NOT TD_WINDOWS) \ No newline at end of file diff --git a/source/dnode/mnode/impl/test/trans/trans2.cpp b/source/dnode/mnode/impl/test/trans/trans2.cpp index b78f1c7021ef44313a2a6393ecc58294921f2a18..cfcfc2490e022092386b64f859befe4b1b922c80 100644 --- a/source/dnode/mnode/impl/test/trans/trans2.cpp +++ b/source/dnode/mnode/impl/test/trans/trans2.cpp @@ -123,7 +123,7 @@ class MndTestTrans2 : public ::testing::Test { sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED); char *param = strdup("====> test log <====="); - mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1); + mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1); if (pDb != NULL) { mndTransSetDbInfo(pTrans, pDb); @@ -156,7 +156,7 @@ class MndTestTrans2 : public ::testing::Test { sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED); char *param = strdup("====> test action <====="); - mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1); + mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1); { STransAction action = {0}; @@ -228,7 +228,7 @@ class MndTestTrans2 : public ::testing::Test { sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED); char *param = strdup("====> test log <====="); - mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1); + mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1); int32_t code = mndTransPrepare(pMnode, pTrans); mndTransDrop(pTrans); diff --git a/source/dnode/mnode/sdb/inc/sdb.h b/source/dnode/mnode/sdb/inc/sdb.h index c66b47a24b13f0c9efd55dc965743416737177ea..1fd0260d0d22849dafcd1698c89e98392fbe31b0 100644 --- a/source/dnode/mnode/sdb/inc/sdb.h +++ b/source/dnode/mnode/sdb/inc/sdb.h @@ -168,6 +168,7 @@ typedef struct SSdb { char *currDir; char *tmpDir; int64_t lastCommitVer; + int64_t lastCommitTerm; int64_t curVer; int64_t curTerm; int64_t tableVer[SDB_MAX]; diff --git a/source/dnode/mnode/sdb/src/sdb.c b/source/dnode/mnode/sdb/src/sdb.c index 485b729deb52ffcdf4c5b76c1999124a5157f5b2..0526ea5c2d65cee2b57d6312b92b90830bad0b8b 100644 --- a/source/dnode/mnode/sdb/src/sdb.c +++ b/source/dnode/mnode/sdb/src/sdb.c @@ -55,6 +55,7 @@ SSdb *sdbInit(SSdbOpt *pOption) { pSdb->curVer = -1; pSdb->curTerm = -1; pSdb->lastCommitVer = -1; + pSdb->lastCommitTerm = -1; pSdb->pMnode = pOption->pMnode; taosThreadMutexInit(&pSdb->filelock, NULL); mDebug("sdb init successfully"); diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c index 1ca5097ce62738fd0b5c5cf8ccc5f8bdf482314d..83135491a993e5f8106ed05409255951342c0ac7 100644 --- a/source/dnode/mnode/sdb/src/sdbFile.c +++ b/source/dnode/mnode/sdb/src/sdbFile.c @@ -70,6 +70,7 @@ static void sdbResetData(SSdb *pSdb) { pSdb->curVer = -1; pSdb->curTerm = -1; pSdb->lastCommitVer = -1; + pSdb->lastCommitTerm = -1; mDebug("sdb reset successfully"); } @@ -211,12 +212,12 @@ static int32_t sdbReadFileImp(SSdb *pSdb) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); - mDebug("start to read file:%s", file); + mDebug("start to read sdb file:%s", file); SSdbRaw *pRaw = taosMemoryMalloc(WAL_MAX_SIZE + 100); if (pRaw == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - mError("failed read file since %s", terrstr()); + mError("failed read sdb file since %s", terrstr()); return -1; } @@ -224,12 +225,12 @@ static int32_t sdbReadFileImp(SSdb *pSdb) { if (pFile == NULL) { taosMemoryFree(pRaw); terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to read file:%s since %s", file, terrstr()); + mError("failed to read sdb file:%s since %s", file, terrstr()); return 0; } if (sdbReadFileHead(pSdb, pFile) != 0) { - mError("failed to read file:%s head since %s", file, terrstr()); + mError("failed to read sdb file:%s head since %s", file, terrstr()); taosMemoryFree(pRaw); taosCloseFile(&pFile); return -1; @@ -245,13 +246,13 @@ static int32_t sdbReadFileImp(SSdb *pSdb) { if (ret < 0) { code = TAOS_SYSTEM_ERROR(errno); - mError("failed to read file:%s since %s", file, tstrerror(code)); + mError("failed to read sdb file:%s since %s", file, tstrerror(code)); break; } if (ret != readLen) { code = TSDB_CODE_FILE_CORRUPTED; - mError("failed to read file:%s since %s", file, tstrerror(code)); + mError("failed to read sdb file:%s since %s", file, tstrerror(code)); break; } @@ -259,34 +260,36 @@ static int32_t sdbReadFileImp(SSdb *pSdb) { ret = taosReadFile(pFile, pRaw->pData, readLen); if (ret < 0) { code = TAOS_SYSTEM_ERROR(errno); - mError("failed to read file:%s since %s", file, tstrerror(code)); + mError("failed to read sdb file:%s since %s", file, tstrerror(code)); break; } if (ret != readLen) { code = TSDB_CODE_FILE_CORRUPTED; - mError("failed to read file:%s since %s", file, tstrerror(code)); + mError("failed to read sdb file:%s since %s", file, tstrerror(code)); break; } int32_t totalLen = sizeof(SSdbRaw) + pRaw->dataLen + sizeof(int32_t); if ((!taosCheckChecksumWhole((const uint8_t *)pRaw, totalLen)) != 0) { code = TSDB_CODE_CHECKSUM_ERROR; - mError("failed to read file:%s since %s", file, tstrerror(code)); + mError("failed to read sdb file:%s since %s", file, tstrerror(code)); break; } code = sdbWriteWithoutFree(pSdb, pRaw); if (code != 0) { - mError("failed to read file:%s since %s", file, terrstr()); + mError("failed to read sdb file:%s since %s", file, terrstr()); goto _OVER; } } code = 0; pSdb->lastCommitVer = pSdb->curVer; + pSdb->lastCommitTerm = pSdb->curTerm; memcpy(pSdb->tableVer, tableVer, sizeof(tableVer)); - mDebug("read file:%s successfully, ver:%" PRId64, file, pSdb->lastCommitVer); + mDebug("read sdb file:%s successfully, ver:%" PRId64 " term:%" PRId64, file, pSdb->lastCommitVer, + pSdb->lastCommitTerm); _OVER: taosCloseFile(&pFile); @@ -302,7 +305,7 @@ int32_t sdbReadFile(SSdb *pSdb) { sdbResetData(pSdb); int32_t code = sdbReadFileImp(pSdb); if (code != 0) { - mError("failed to read sdb since %s", terrstr()); + mError("failed to read sdb file since %s", terrstr()); sdbResetData(pSdb); } @@ -318,18 +321,19 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) { char curfile[PATH_MAX] = {0}; snprintf(curfile, sizeof(curfile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); - mDebug("start to write file:%s, current ver:%" PRId64 " term:%" PRId64 ", commit ver:%" PRId64, curfile, pSdb->curVer, - pSdb->curTerm, pSdb->lastCommitVer); + mDebug("start to write sdb file, current ver:%" PRId64 " term:%" PRId64 ", commit ver:%" PRId64 " term:%" PRId64 + " file:%s", + pSdb->curVer, pSdb->curTerm, pSdb->lastCommitVer, pSdb->lastCommitTerm, curfile); TdFilePtr pFile = taosOpenFile(tmpfile, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); if (pFile == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to open file:%s for write since %s", tmpfile, terrstr()); + mError("failed to open sdb file:%s for write since %s", tmpfile, terrstr()); return -1; } if (sdbWriteFileHead(pSdb, pFile) != 0) { - mError("failed to write file:%s head since %s", tmpfile, terrstr()); + mError("failed to write sdb file:%s head since %s", tmpfile, terrstr()); taosCloseFile(&pFile); return -1; } @@ -338,7 +342,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) { SdbEncodeFp encodeFp = pSdb->encodeFps[i]; if (encodeFp == NULL) continue; - mTrace("write %s to file, total %d rows", sdbTableName(i), sdbGetSize(pSdb, i)); + mTrace("write %s to sdb file, total %d rows", sdbTableName(i), sdbGetSize(pSdb, i)); SHashObj *hash = pSdb->hashObjs[i]; TdThreadRwlock *pLock = &pSdb->locks[i]; @@ -394,7 +398,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) { code = taosFsyncFile(pFile); if (code != 0) { code = TAOS_SYSTEM_ERROR(errno); - mError("failed to sync file:%s since %s", tmpfile, tstrerror(code)); + mError("failed to sync sdb file:%s since %s", tmpfile, tstrerror(code)); } } @@ -404,15 +408,17 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) { code = taosRenameFile(tmpfile, curfile); if (code != 0) { code = TAOS_SYSTEM_ERROR(errno); - mError("failed to write file:%s since %s", curfile, tstrerror(code)); + mError("failed to write sdb file:%s since %s", curfile, tstrerror(code)); } } if (code != 0) { - mError("failed to write file:%s since %s", curfile, tstrerror(code)); + mError("failed to write sdb file:%s since %s", curfile, tstrerror(code)); } else { pSdb->lastCommitVer = pSdb->curVer; - mDebug("write file:%s successfully, ver:%" PRId64 " term:%" PRId64, curfile, pSdb->lastCommitVer, pSdb->curTerm); + pSdb->lastCommitTerm = pSdb->curTerm; + mDebug("write sdb file successfully, ver:%" PRId64 " term:%" PRId64 " file:%s", pSdb->lastCommitVer, + pSdb->lastCommitTerm, curfile); } terrno = code; @@ -427,7 +433,7 @@ int32_t sdbWriteFile(SSdb *pSdb) { taosThreadMutexLock(&pSdb->filelock); int32_t code = sdbWriteFileImp(pSdb); if (code != 0) { - mError("failed to write sdb since %s", terrstr()); + mError("failed to write sdb file since %s", terrstr()); } taosThreadMutexUnlock(&pSdb->filelock); return code; @@ -493,7 +499,7 @@ int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) { if (taosCopyFile(datafile, pIter->name) < 0) { taosThreadMutexUnlock(&pSdb->filelock); terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to copy file %s to %s since %s", datafile, pIter->name, terrstr()); + mError("failed to copy sdb file %s to %s since %s", datafile, pIter->name, terrstr()); sdbCloseIter(pIter); return -1; } @@ -502,7 +508,7 @@ int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) { pIter->file = taosOpenFile(pIter->name, TD_FILE_READ); if (pIter->file == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to open file:%s since %s", pIter->name, terrstr()); + mError("failed to open sdb file:%s since %s", pIter->name, terrstr()); sdbCloseIter(pIter); return -1; } @@ -522,7 +528,6 @@ int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len) { void *pBuf = taosMemoryCalloc(1, maxlen); if (pBuf == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - sdbCloseIter(pIter); return -1; } diff --git a/source/dnode/qnode/src/qnode.c b/source/dnode/qnode/src/qnode.c index 6d31e20d9bc03908025b100dd135c7e706a0b647..40aa572a56709a97e454cdc82cb7e97852356b27 100644 --- a/source/dnode/qnode/src/qnode.c +++ b/source/dnode/qnode/src/qnode.c @@ -40,37 +40,46 @@ void qndClose(SQnode *pQnode) { taosMemoryFree(pQnode); } -int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { return 0; } +int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { + SMsgCb* pCb = &pQnode->msgCb; -int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg) { + pLoad->numOfQueryInQueue = pCb->qsizeFp(pCb->mgmt, pQnode->qndId, QUERY_QUEUE); + pLoad->numOfFetchInQueue = pCb->qsizeFp(pCb->mgmt, pQnode->qndId, FETCH_QUEUE); + pLoad->waitTimeInQueryQUeue = qWorkerGetWaitTimeInQueue(pQnode->pQuery, QUERY_QUEUE); + pLoad->waitTimeInFetchQUeue = qWorkerGetWaitTimeInQueue(pQnode->pQuery, FETCH_QUEUE); + + return 0; +} + +int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg) { int32_t code = -1; SReadHandle handle = {.pMsgCb = &pQnode->msgCb}; qTrace("message in qnode queue is processing"); switch (pMsg->msgType) { case TDMT_VND_QUERY: - code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg); + code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_QUERY_CONTINUE: - code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg); + code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_FETCH: - code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_FETCH_RSP: - code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_CANCEL_TASK: - code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_DROP_TASK: - code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_CONSUME: // code = tqProcessConsumeReq(pQnode->pTq, pMsg); // break; case TDMT_VND_QUERY_HEARTBEAT: - code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg, ts); break; default: qError("unknown msg type:%d in qnode queue", pMsg->msgType); diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index d988f97188b9330e1229368554b0f75a5713025b..17445b7abe6872f038a5931d926cb9af6a95ce2d 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -51,6 +51,7 @@ target_sources( # tq "src/tq/tq.c" + "src/tq/tqExec.c" "src/tq/tqCommit.c" "src/tq/tqOffset.c" "src/tq/tqPush.c" diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 06ff6329e0b3ddc69cc50ec1becc9541e3939ca5..72138926aa2c73d1a4bf4ea780665df3ad39d9ed 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -44,21 +44,27 @@ extern "C" { typedef struct STqOffsetCfg STqOffsetCfg; typedef struct STqOffsetStore STqOffsetStore; +// tqRead + struct STqReadHandle { int64_t ver; - SHashObj* tbIdHash; const SSubmitReq* pMsg; SSubmitBlk* pBlock; SSubmitMsgIter msgIter; SSubmitBlkIter blkIter; - SMeta* pVnodeMeta; - SArray* pColIdList; // SArray - int32_t sver; - int64_t cachedSchemaUid; - SSchemaWrapper* pSchemaWrapper; - STSchema* pSchema; + + SMeta* pVnodeMeta; + SHashObj* tbIdHash; + SArray* pColIdList; // SArray + + int32_t cachedSchemaVer; + int64_t cachedSchemaUid; + SSchemaWrapper* pSchemaWrapper; + STSchema* pSchema; }; +// tqPush + typedef struct { int64_t consumerId; int32_t epoch; @@ -68,14 +74,15 @@ typedef struct { SRpcMsg* handle; } STqPushHandle; +#if 0 typedef struct { - char subKey[TSDB_SUBSCRIBE_KEY_LEN]; - int64_t consumerId; - int32_t epoch; - int8_t subType; - int8_t withTbName; - int8_t withSchema; - int8_t withTag; + char subKey[TSDB_SUBSCRIBE_KEY_LEN]; + int64_t consumerId; + int32_t epoch; + int8_t subType; + // int8_t withTbName; + // int8_t withSchema; + // int8_t withTag; char* qmsg; SHashObj* pDropTbUid; STqPushHandle pushHandle; @@ -85,15 +92,55 @@ typedef struct { STqReadHandle* pExecReader[5]; qTaskInfo_t task[5]; } STqExec; +#endif + +// tqExec + +typedef struct { + char* qmsg; + qTaskInfo_t task[5]; +} STqExecCol; + +typedef struct { + int64_t suid; +} STqExecTb; + +typedef struct { + SHashObj* pFilterOutTbUid; +} STqExecDb; + +typedef struct { + int8_t subType; + + STqReadHandle* pExecReader[5]; + union { + STqExecCol execCol; + STqExecTb execTb; + STqExecDb execDb; + } exec; +} STqExecHandle; -int32_t tEncodeSTqExec(SEncoder* pEncoder, const STqExec* pExec); -int32_t tDecodeSTqExec(SDecoder* pDecoder, STqExec* pExec); +typedef struct { + // info + char subKey[TSDB_SUBSCRIBE_KEY_LEN]; + int64_t consumerId; + int32_t epoch; + + // reader + SWalReadHandle* pWalReader; + + // push + STqPushHandle pushHandle; + + // exec + STqExecHandle execHandle; +} STqHandle; struct STQ { char* path; - SHashObj* pushMgr; // consumerId -> STqExec* - SHashObj* execs; // subKey -> STqExec - SHashObj* pStreamTasks; + SHashObj* pushMgr; // consumerId -> STqHandle* + SHashObj* handles; // subKey -> STqHandle + SHashObj* pStreamTasks; // taksId -> SStreamTask SVnode* pVnode; SWal* pWal; TDB* pMetaStore; @@ -111,6 +158,16 @@ static STqMgmt tqMgmt = {0}; int tqInit(); void tqCleanUp(); +// int32_t tEncodeSTqExec(SEncoder* pEncoder, const STqExec* pExec); +// int32_t tDecodeSTqExec(SDecoder* pDecoder, STqExec* pExec); + +int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle); +int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle); + +int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead** pHeadWithCkSum); + +int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkRsp* pRsp, int32_t workerId); + // tqOffset STqOffsetStore* STqOffsetOpen(STqOffsetCfg*); void STqOffsetClose(STqOffsetStore*); diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 6d3d23cc208092d779c0067348de8279db337f75..2e4ff6a4abd8315afa06e9a881955947af9144c6 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -79,13 +79,14 @@ struct STsdb { struct STable { uint64_t tid; uint64_t uid; - STSchema *pSchema; + STSchema *pSchema; // latest schema + STSchema *pCacheSchema; // cached cache }; #define TABLE_TID(t) (t)->tid #define TABLE_UID(t) (t)->uid -int tsdbPrepareCommit(STsdb *pTsdb); +int tsdbPrepareCommit(STsdb *pTsdb); typedef enum { TSDB_FILE_HEAD = 0, // .head TSDB_FILE_DATA, // .data @@ -181,13 +182,15 @@ int tsdbUnlockRepo(STsdb *pTsdb); static FORCE_INLINE STSchema *tsdbGetTableSchemaImpl(STsdb *pTsdb, STable *pTable, bool lock, bool copy, int32_t version) { - - if ((version != -1) && (schemaVersion(pTable->pSchema) != version)) { - taosMemoryFreeClear(pTable->pSchema); - pTable->pSchema = metaGetTbTSchema(REPO_META(pTsdb), pTable->uid, version); + if ((version < 0) || (schemaVersion(pTable->pSchema) == version)) { + return pTable->pSchema; } - return pTable->pSchema; + if (!pTable->pCacheSchema || (schemaVersion(pTable->pCacheSchema) != version)) { + taosMemoryFreeClear(pTable->pCacheSchema); + pTable->pCacheSchema = metaGetTbTSchema(REPO_META(pTsdb), pTable->uid, version); + } + return pTable->pCacheSchema; } // tsdbMemTable.h diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 0e67d9e426f1b708e927d986f7c9d797acc8759d..e3a0c94ccc8d210a04a60ded6dd8bbd79d203767 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -149,7 +149,7 @@ int32_t tdUpdateExpireWindow(SSma* pSma, SSubmitReq* pMsg, int64_t version); int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg); int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg); -int32_t tdProcessRSmaCreate(SSma* pSma, SMeta* pMeta, SVCreateStbReq* pReq, SMsgCb* pMsgCb); +int32_t tdProcessRSmaCreate(SVnode *pVnode, SVCreateStbReq* pReq); int32_t tdProcessRSmaSubmit(SSma* pSma, void* pMsg, int32_t inputType); int32_t tdFetchTbUidList(SSma* pSma, STbUidStore** ppStore, tb_uid_t suid, tb_uid_t uid); int32_t tdUpdateTbUidList(SSma* pSma, STbUidStore* pUidStore); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 71429ff1767e02a9b91391ab912ebaa2b6f105de..8d9a1afefc752728d9a58bee02429f8e9ea99e09 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -178,6 +178,7 @@ SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, boo if (me.type == TSDB_SUPER_TABLE) { pSchema = tCloneSSchemaWrapper(&me.stbEntry.schemaRow); } else if (me.type == TSDB_NORMAL_TABLE) { + pSchema = tCloneSSchemaWrapper(&me.ntbEntry.schemaRow); } else { ASSERT(0); } @@ -299,7 +300,7 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver) { pSW = metaGetTableSchema(pMeta, quid, sver, 0); if (!pSW) return NULL; - tdInitTSchemaBuilder(&sb, sver); + tdInitTSchemaBuilder(&sb, pSW->version); for (int i = 0; i < pSW->nCols; i++) { pSchema = pSW->pSchema + i; tdAddColToSchema(&sb, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes); diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 731ef2e36061494ec2440cda9f4818edbc0291cd..a1e397f11d8454b1f0a61afcfde2eeac65c361ea 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -165,7 +165,10 @@ int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_ui * @param pReq * @return int32_t */ -int32_t tdProcessRSmaCreate(SSma *pSma, SMeta *pMeta, SVCreateStbReq *pReq, SMsgCb *pMsgCb) { +int32_t tdProcessRSmaCreate(SVnode *pVnode, SVCreateStbReq *pReq) { + SSma *pSma = pVnode->pSma; + SMeta *pMeta = pVnode->pMeta; + SMsgCb *pMsgCb = &pVnode->msgCb; if (!pReq->rollup) { smaTrace("vgId:%d return directly since no rollup for stable %s %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid); return TSDB_CODE_SUCCESS; @@ -210,6 +213,7 @@ int32_t tdProcessRSmaCreate(SSma *pSma, SMeta *pMeta, SVCreateStbReq *pReq, SMsg .reader = pReadHandle, .meta = pMeta, .pMsgCb = pMsgCb, + .vnode = pVnode, }; if (param->qmsg1) { @@ -441,7 +445,7 @@ static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) { // TODO: use the proper schema instead of 0, and cache STSchema in cache - STSchema *pTSchema = metaGetTbTSchema(SMA_META(pSma), suid, 1); + STSchema *pTSchema = metaGetTbTSchema(SMA_META(pSma), suid, -1); if (!pTSchema) { terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; return TSDB_CODE_FAILED; diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 96ce6e8eeeeaf17243d8e29baa733c369437c931..b4747f2264abdfcd78b77cad4aa4c9c14731ee79 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -51,10 +51,10 @@ int tqExecKeyCompare(const void* pKey1, int32_t kLen1, const void* pKey2, int32_ return strcmp(pKey1, pKey2); } -int32_t tqStoreExec(STQ* pTq, const char* key, const STqExec* pExec) { +int32_t tqStoreHandle(STQ* pTq, const char* key, const STqHandle* pHandle) { int32_t code; int32_t vlen; - tEncodeSize(tEncodeSTqExec, pExec, vlen, code); + tEncodeSize(tEncodeSTqHandle, pHandle, vlen, code); ASSERT(code == 0); void* buf = taosMemoryCalloc(1, vlen); @@ -65,7 +65,7 @@ int32_t tqStoreExec(STQ* pTq, const char* key, const STqExec* pExec) { SEncoder encoder; tEncoderInit(&encoder, buf, vlen); - if (tEncodeSTqExec(&encoder, pExec) < 0) { + if (tEncodeSTqHandle(&encoder, pHandle) < 0) { ASSERT(0); } @@ -102,7 +102,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) { pTq->pVnode = pVnode; pTq->pWal = pWal; - pTq->execs = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); + pTq->handles = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); pTq->pStreamTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); @@ -112,7 +112,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) { ASSERT(0); } - if (tdbTbOpen("exec", -1, -1, tqExecKeyCompare, pTq->pMetaStore, &pTq->pExecStore) < 0) { + if (tdbTbOpen("handles", -1, -1, tqExecKeyCompare, pTq->pMetaStore, &pTq->pExecStore) < 0) { ASSERT(0); } @@ -122,10 +122,6 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) { ASSERT(0); } - /*if (tdbBegin(pTq->pMetaStore, &txn) < 0) {*/ - /*ASSERT(0);*/ - /*}*/ - TBC* pCur; if (tdbTbcOpen(pTq->pExecStore, &pCur, &txn) < 0) { ASSERT(0); @@ -138,30 +134,31 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) { tdbTbcMoveToFirst(pCur); SDecoder decoder; + while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) { - STqExec exec; + STqHandle handle; tDecoderInit(&decoder, (uint8_t*)pVal, vLen); - tDecodeSTqExec(&decoder, &exec); - exec.pWalReader = walOpenReadHandle(pTq->pVnode->pWal); - if (exec.subType == TOPIC_SUB_TYPE__TABLE) { + tDecodeSTqHandle(&decoder, &handle); + handle.pWalReader = walOpenReadHandle(pTq->pVnode->pWal); + for (int32_t i = 0; i < 5; i++) { + handle.execHandle.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); + } + if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { for (int32_t i = 0; i < 5; i++) { - exec.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); - - SReadHandle handle = { - .reader = exec.pExecReader[i], + SReadHandle reader = { + .reader = handle.execHandle.pExecReader[i], .meta = pTq->pVnode->pMeta, .pMsgCb = &pTq->pVnode->msgCb, }; - exec.task[i] = qCreateStreamExecTaskInfo(exec.qmsg, &handle); - ASSERT(exec.task[i]); + handle.execHandle.exec.execCol.task[i] = + qCreateStreamExecTaskInfo(handle.execHandle.exec.execCol.qmsg, &reader); + ASSERT(handle.execHandle.exec.execCol.task[i]); } } else { - for (int32_t i = 0; i < 5; i++) { - exec.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); - } - exec.pDropTbUid = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + handle.execHandle.exec.execDb.pFilterOutTbUid = + taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); } - taosHashPut(pTq->execs, pKey, kLen, &exec, sizeof(STqExec)); + taosHashPut(pTq->handles, pKey, kLen, &handle, sizeof(STqHandle)); } if (tdbTxnClose(&txn) < 0) { @@ -174,7 +171,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) { void tqClose(STQ* pTq) { if (pTq) { taosMemoryFreeClear(pTq->path); - taosHashCleanup(pTq->execs); + taosHashCleanup(pTq->handles); taosHashCleanup(pTq->pStreamTasks); taosHashCleanup(pTq->pushMgr); tdbClose(pTq->pMetaStore); @@ -183,16 +180,17 @@ void tqClose(STQ* pTq) { // TODO } +#if 0 int32_t tEncodeSTqExec(SEncoder* pEncoder, const STqExec* pExec) { if (tStartEncode(pEncoder) < 0) return -1; if (tEncodeCStr(pEncoder, pExec->subKey) < 0) return -1; if (tEncodeI64(pEncoder, pExec->consumerId) < 0) return -1; if (tEncodeI32(pEncoder, pExec->epoch) < 0) return -1; if (tEncodeI8(pEncoder, pExec->subType) < 0) return -1; - if (tEncodeI8(pEncoder, pExec->withTbName) < 0) return -1; - if (tEncodeI8(pEncoder, pExec->withSchema) < 0) return -1; - if (tEncodeI8(pEncoder, pExec->withTag) < 0) return -1; - if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { + /*if (tEncodeI8(pEncoder, pExec->withTbName) < 0) return -1;*/ + /*if (tEncodeI8(pEncoder, pExec->withSchema) < 0) return -1;*/ + /*if (tEncodeI8(pEncoder, pExec->withTag) < 0) return -1;*/ + if (pExec->subType == TOPIC_SUB_TYPE__COLUMN) { if (tEncodeCStr(pEncoder, pExec->qmsg) < 0) return -1; } tEndEncode(pEncoder); @@ -205,34 +203,64 @@ int32_t tDecodeSTqExec(SDecoder* pDecoder, STqExec* pExec) { if (tDecodeI64(pDecoder, &pExec->consumerId) < 0) return -1; if (tDecodeI32(pDecoder, &pExec->epoch) < 0) return -1; if (tDecodeI8(pDecoder, &pExec->subType) < 0) return -1; - if (tDecodeI8(pDecoder, &pExec->withTbName) < 0) return -1; - if (tDecodeI8(pDecoder, &pExec->withSchema) < 0) return -1; - if (tDecodeI8(pDecoder, &pExec->withTag) < 0) return -1; - if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { + /*if (tDecodeI8(pDecoder, &pExec->withTbName) < 0) return -1;*/ + /*if (tDecodeI8(pDecoder, &pExec->withSchema) < 0) return -1;*/ + /*if (tDecodeI8(pDecoder, &pExec->withTag) < 0) return -1;*/ + if (pExec->subType == TOPIC_SUB_TYPE__COLUMN) { if (tDecodeCStrAlloc(pDecoder, &pExec->qmsg) < 0) return -1; } tEndDecode(pDecoder); return 0; } +#endif + +int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) { + if (tStartEncode(pEncoder) < 0) return -1; + if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1; + if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1; + if (tEncodeI32(pEncoder, pHandle->epoch) < 0) return -1; + if (tEncodeI8(pEncoder, pHandle->execHandle.subType) < 0) return -1; + if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { + if (tEncodeCStr(pEncoder, pHandle->execHandle.exec.execCol.qmsg) < 0) return -1; + } + tEndEncode(pEncoder); + return pEncoder->pos; +} + +int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) { + if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1; + if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1; + if (tDecodeI32(pDecoder, &pHandle->epoch) < 0) return -1; + if (tDecodeI8(pDecoder, &pHandle->execHandle.subType) < 0) return -1; + if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { + if (tDecodeCStrAlloc(pDecoder, &pHandle->execHandle.exec.execCol.qmsg) < 0) return -1; + } + tEndDecode(pDecoder); + return 0; +} + int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { void* pIter = NULL; while (1) { - pIter = taosHashIterate(pTq->execs, pIter); + pIter = taosHashIterate(pTq->handles, pIter); if (pIter == NULL) break; - STqExec* pExec = (STqExec*)pIter; - if (pExec->subType == TOPIC_SUB_TYPE__DB) { + STqHandle* pExec = (STqHandle*)pIter; + if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { + for (int32_t i = 0; i < 5; i++) { + int32_t code = qUpdateQualifiedTableId(pExec->execHandle.exec.execCol.task[i], tbUidList, isAdd); + ASSERT(code == 0); + } + } else if (pExec->execHandle.subType == TOPIC_SUB_TYPE__DB) { if (!isAdd) { int32_t sz = taosArrayGetSize(tbUidList); for (int32_t i = 0; i < sz; i++) { int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i); - taosHashPut(pExec->pDropTbUid, &tbUid, sizeof(int64_t), NULL, 0); + taosHashPut(pExec->execHandle.exec.execDb.pFilterOutTbUid, &tbUid, sizeof(int64_t), NULL, 0); } } } else { - for (int32_t i = 0; i < 5; i++) { - int32_t code = qUpdateQualifiedTableId(pExec->task[i], tbUidList, isAdd); - ASSERT(code == 0); - } + // tq update id } } while (1) { @@ -250,7 +278,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver, SRpcHandleInfo handleInfo) { if (msgType != TDMT_VND_SUBMIT) return 0; void* pIter = NULL; - STqExec* pExec = NULL; + STqHandle* pHandle = NULL; SSubmitReq* pReq = (SSubmitReq*)msg; int32_t workerId = 4; int64_t fetchOffset = ver; @@ -258,84 +286,27 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_ while (1) { pIter = taosHashIterate(pTq->pushMgr, pIter); if (pIter == NULL) break; - pExec = *(STqExec**)pIter; + pHandle = *(STqHandle**)pIter; - taosWLockLatch(&pExec->pushHandle.lock); + taosWLockLatch(&pHandle->pushHandle.lock); - SRpcMsg* pMsg = atomic_load_ptr(&pExec->pushHandle.handle); + SRpcMsg* pMsg = atomic_load_ptr(&pHandle->pushHandle.handle); ASSERT(pMsg); SMqDataBlkRsp rsp = {0}; - rsp.reqOffset = pExec->pushHandle.reqOffset; + rsp.reqOffset = pHandle->pushHandle.reqOffset; rsp.blockData = taosArrayInit(0, sizeof(void*)); rsp.blockDataLen = taosArrayInit(0, sizeof(int32_t)); - if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { - qTaskInfo_t task = pExec->task[workerId]; - ASSERT(task); - qSetStreamInput(task, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK, false); - while (1) { - SSDataBlock* pDataBlock = NULL; - uint64_t ts = 0; - if (qExecTask(task, &pDataBlock, &ts) < 0) { - ASSERT(0); - } - if (pDataBlock == NULL) break; - - ASSERT(pDataBlock->info.rows != 0); - ASSERT(pDataBlock->info.numOfCols != 0); - - int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pDataBlock); - void* buf = taosMemoryCalloc(1, dataStrLen); - SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf; - pRetrieve->useconds = ts; - pRetrieve->precision = TSDB_DEFAULT_PRECISION; - pRetrieve->compressed = 0; - pRetrieve->completed = 1; - pRetrieve->numOfRows = htonl(pDataBlock->info.rows); - - // TODO enable compress - int32_t actualLen = 0; - blockCompressEncode(pDataBlock, pRetrieve->data, &actualLen, pDataBlock->info.numOfCols, false); - actualLen += sizeof(SRetrieveTableRsp); - ASSERT(actualLen <= dataStrLen); - taosArrayPush(rsp.blockDataLen, &actualLen); - taosArrayPush(rsp.blockData, &buf); - rsp.blockNum++; - } - } else if (pExec->subType == TOPIC_SUB_TYPE__DB) { - STqReadHandle* pReader = pExec->pExecReader[workerId]; - tqReadHandleSetMsg(pReader, pReq, 0); - while (tqNextDataBlock(pReader)) { - SSDataBlock block = {0}; - if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows, - &block.info.numOfCols) < 0) { - ASSERT(0); - } - int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(&block); - void* buf = taosMemoryCalloc(1, dataStrLen); - SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf; - /*pRetrieve->useconds = 0;*/ - pRetrieve->precision = TSDB_DEFAULT_PRECISION; - pRetrieve->compressed = 0; - pRetrieve->completed = 1; - pRetrieve->numOfRows = htonl(block.info.rows); - - // TODO enable compress - int32_t actualLen = 0; - blockCompressEncode(&block, pRetrieve->data, &actualLen, block.info.numOfCols, false); - actualLen += sizeof(SRetrieveTableRsp); - ASSERT(actualLen <= dataStrLen); - taosArrayPush(rsp.blockDataLen, &actualLen); - taosArrayPush(rsp.blockData, &buf); - rsp.blockNum++; - } + if (msgType == TDMT_VND_SUBMIT) { + tqDataExec(pTq, &pHandle->execHandle, pReq, &rsp, workerId); } else { + // TODO ASSERT(0); } if (rsp.blockNum == 0) { - taosWUnLockLatch(&pExec->pushHandle.lock); + taosWUnLockLatch(&pHandle->pushHandle.lock); continue; } @@ -352,8 +323,8 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_ } ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP; - ((SMqRspHead*)buf)->epoch = pExec->pushHandle.epoch; - ((SMqRspHead*)buf)->consumerId = pExec->pushHandle.consumerId; + ((SMqRspHead*)buf)->epoch = pHandle->pushHandle.epoch; + ((SMqRspHead*)buf)->consumerId = pHandle->pushHandle.consumerId; void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead)); tEncodeSMqDataBlkRsp(&abuf, &rsp); @@ -361,11 +332,11 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_ SRpcMsg resp = {.info = handleInfo, .pCont = buf, .contLen = tlen, .code = 0}; tmsgSendRsp(&resp); - atomic_store_ptr(&pExec->pushHandle.handle, NULL); - taosWUnLockLatch(&pExec->pushHandle.lock); + atomic_store_ptr(&pHandle->pushHandle.handle, NULL); + taosWUnLockLatch(&pHandle->pushHandle.lock); tqDebug("vg %d offset %ld from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld", - TD_VID(pTq->pVnode), fetchOffset, pExec->pushHandle.consumerId, pExec->pushHandle.epoch, rsp.blockNum, + TD_VID(pTq->pVnode), fetchOffset, pHandle->pushHandle.consumerId, pHandle->pushHandle.epoch, rsp.blockNum, rsp.reqOffset, rsp.rspOffset); // TODO destroy @@ -419,12 +390,12 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { tqDebug("tmq poll: consumer %ld (epoch %d) recv poll req in vg %d, req %ld %ld", consumerId, pReq->epoch, TD_VID(pTq->pVnode), pReq->currentOffset, fetchOffset); - STqExec* pExec = taosHashGet(pTq->execs, pReq->subKey, strlen(pReq->subKey)); - ASSERT(pExec); + STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey)); + ASSERT(pHandle); - int32_t consumerEpoch = atomic_load_32(&pExec->epoch); + int32_t consumerEpoch = atomic_load_32(&pHandle->epoch); while (consumerEpoch < reqEpoch) { - consumerEpoch = atomic_val_compare_exchange_32(&pExec->epoch, consumerEpoch, reqEpoch); + consumerEpoch = atomic_val_compare_exchange_32(&pHandle->epoch, consumerEpoch, reqEpoch); } SWalHead* pHeadWithCkSum = taosMemoryMalloc(sizeof(SWalHead) + 2048); @@ -432,50 +403,46 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { return -1; } - walSetReaderCapacity(pExec->pWalReader, 2048); + walSetReaderCapacity(pHandle->pWalReader, 2048); SMqDataBlkRsp rsp = {0}; rsp.reqOffset = pReq->currentOffset; - rsp.withSchema = pExec->withSchema; rsp.blockData = taosArrayInit(0, sizeof(void*)); rsp.blockDataLen = taosArrayInit(0, sizeof(int32_t)); rsp.blockSchema = taosArrayInit(0, sizeof(void*)); rsp.blockTbName = taosArrayInit(0, sizeof(void*)); - int8_t withTbName = pExec->withTbName; - if (pReq->withTbName != -1) { - withTbName = pReq->withTbName; + rsp.withTbName = pReq->withTbName; + if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { + rsp.withSchema = false; + rsp.withTag = false; + } else { + rsp.withSchema = true; + rsp.withTag = false; } - rsp.withTbName = withTbName; + + /*int8_t withTbName = pExec->withTbName;*/ + /*if (pReq->withTbName != -1) {*/ + /*withTbName = pReq->withTbName;*/ + /*}*/ + /*rsp.withTbName = withTbName;*/ while (1) { - consumerEpoch = atomic_load_32(&pExec->epoch); + consumerEpoch = atomic_load_32(&pHandle->epoch); if (consumerEpoch > reqEpoch) { tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, found new consumer epoch %d discard req epoch %d", consumerId, pReq->epoch, TD_VID(pTq->pVnode), fetchOffset, consumerEpoch, reqEpoch); break; } - taosThreadMutexLock(&pExec->pWalReader->mutex); - - if (walFetchHead(pExec->pWalReader, fetchOffset, pHeadWithCkSum) < 0) { - tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", consumerId, pReq->epoch, - TD_VID(pTq->pVnode), fetchOffset); - taosThreadMutexUnlock(&pExec->pWalReader->mutex); + if (tqFetchLog(pTq, pHandle, &fetchOffset, &pHeadWithCkSum) < 0) { + // TODO add push mgr break; } - if (pHeadWithCkSum->head.msgType != TDMT_VND_SUBMIT) { - ASSERT(walSkipFetchBody(pExec->pWalReader, pHeadWithCkSum) == 0); - } else { - ASSERT(walFetchBody(pExec->pWalReader, &pHeadWithCkSum) == 0); - } - SWalReadHead* pHead = &pHeadWithCkSum->head; - taosThreadMutexUnlock(&pExec->pWalReader->mutex); - #if 0 SWalReadHead* pHead; if (walReadWithHandle_s(pExec->pWalReader, fetchOffset, &pHead) < 0) { @@ -515,122 +482,28 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { if (pHead->msgType == TDMT_VND_SUBMIT) { SSubmitReq* pCont = (SSubmitReq*)&pHead->body; - // table subscribe - if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { - qTaskInfo_t task = pExec->task[workerId]; - ASSERT(task); - qSetStreamInput(task, pCont, STREAM_DATA_TYPE_SUBMIT_BLOCK, false); - while (1) { - SSDataBlock* pDataBlock = NULL; - uint64_t ts = 0; - if (qExecTask(task, &pDataBlock, &ts) < 0) { - ASSERT(0); - } - if (pDataBlock == NULL) break; - - ASSERT(pDataBlock->info.rows != 0); - ASSERT(pDataBlock->info.numOfCols != 0); - - int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pDataBlock); - void* buf = taosMemoryCalloc(1, dataStrLen); - SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf; - pRetrieve->useconds = ts; - pRetrieve->precision = TSDB_DEFAULT_PRECISION; - pRetrieve->compressed = 0; - pRetrieve->completed = 1; - pRetrieve->numOfRows = htonl(pDataBlock->info.rows); - - // TODO enable compress - int32_t actualLen = 0; - blockCompressEncode(pDataBlock, pRetrieve->data, &actualLen, pDataBlock->info.numOfCols, false); - actualLen += sizeof(SRetrieveTableRsp); - ASSERT(actualLen <= dataStrLen); - taosArrayPush(rsp.blockDataLen, &actualLen); - taosArrayPush(rsp.blockData, &buf); - - if (pExec->withSchema) { - SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader[workerId]->pSchemaWrapper); - taosArrayPush(rsp.blockSchema, &pSW); - } - - if (withTbName) { - SMetaReader mr = {0}; - metaReaderInit(&mr, pTq->pVnode->pMeta, 0); - int64_t uid = pExec->pExecReader[workerId]->msgIter.uid; - if (metaGetTableEntryByUid(&mr, uid) < 0) { - ASSERT(0); - } - char* tbName = strdup(mr.me.name); - taosArrayPush(rsp.blockTbName, &tbName); - metaReaderClear(&mr); - } - - rsp.blockNum++; - } - // db subscribe - } else if (pExec->subType == TOPIC_SUB_TYPE__DB) { - rsp.withSchema = 1; - STqReadHandle* pReader = pExec->pExecReader[workerId]; - tqReadHandleSetMsg(pReader, pCont, 0); - while (tqNextDataBlockFilterOut(pReader, pExec->pDropTbUid)) { - SSDataBlock block = {0}; - if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows, - &block.info.numOfCols) < 0) { - if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue; - ASSERT(0); - } - int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(&block); - void* buf = taosMemoryCalloc(1, dataStrLen); - SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf; - /*pRetrieve->useconds = 0;*/ - pRetrieve->precision = TSDB_DEFAULT_PRECISION; - pRetrieve->compressed = 0; - pRetrieve->completed = 1; - pRetrieve->numOfRows = htonl(block.info.rows); - - // TODO enable compress - int32_t actualLen = 0; - blockCompressEncode(&block, pRetrieve->data, &actualLen, block.info.numOfCols, false); - actualLen += sizeof(SRetrieveTableRsp); - ASSERT(actualLen <= dataStrLen); - taosArrayPush(rsp.blockDataLen, &actualLen); - taosArrayPush(rsp.blockData, &buf); - if (withTbName) { - SMetaReader mr = {0}; - metaReaderInit(&mr, pTq->pVnode->pMeta, 0); - if (metaGetTableEntryByUid(&mr, block.info.uid) < 0) { - ASSERT(0); - } - char* tbName = strdup(mr.me.name); - taosArrayPush(rsp.blockTbName, &tbName); - metaReaderClear(&mr); - } - - SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader[workerId]->pSchemaWrapper); - taosArrayPush(rsp.blockSchema, &pSW); - - rsp.blockNum++; - } - } else { - ASSERT(0); - } + + tqDataExec(pTq, &pHandle->execHandle, pCont, &rsp, workerId); + } else { + // TODO + ASSERT(0); } // TODO batch optimization: // TODO continue scan until meeting batch requirement - if (rsp.blockNum != 0) break; - rsp.skipLogNum++; - fetchOffset++; + if (rsp.blockNum > 0 /* threshold */) { + break; + } else { + fetchOffset++; + } } taosMemoryFree(pHeadWithCkSum); + ASSERT(taosArrayGetSize(rsp.blockData) == rsp.blockNum); ASSERT(taosArrayGetSize(rsp.blockDataLen) == rsp.blockNum); - if (rsp.blockNum != 0) - rsp.rspOffset = fetchOffset; - else - rsp.rspOffset = fetchOffset - 1; + rsp.rspOffset = fetchOffset; int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqDataBlkRsp(NULL, &rsp); void* buf = rpcMallocCont(tlen); @@ -646,13 +519,18 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead)); tEncodeSMqDataBlkRsp(&abuf, &rsp); - SRpcMsg resp = {.info = pMsg->info, .pCont = buf, .contLen = tlen, .code = 0}; + SRpcMsg resp = { + .info = pMsg->info, + .pCont = buf, + .contLen = tlen, + .code = 0, + }; tmsgSendRsp(&resp); tqDebug("vg %d offset %ld from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld", TD_VID(pTq->pVnode), fetchOffset, consumerId, pReq->epoch, rsp.blockNum, rsp.reqOffset, rsp.rspOffset); - // TODO destroy + // TODO wrap in destroy func taosArrayDestroy(rsp.blockData); taosArrayDestroy(rsp.blockDataLen); taosArrayDestroyP(rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper); @@ -664,7 +542,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) { SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg; - int32_t code = taosHashRemove(pTq->execs, pReq->subKey, strlen(pReq->subKey)); + int32_t code = taosHashRemove(pTq->handles, pReq->subKey, strlen(pReq->subKey)); ASSERT(code == 0); TXN txn; @@ -693,63 +571,59 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) { SMqRebVgReq req = {0}; tDecodeSMqRebVgReq(msg, &req); // todo lock - STqExec* pExec = taosHashGet(pTq->execs, req.subKey, strlen(req.subKey)); - if (pExec == NULL) { + STqHandle* pHandle = taosHashGet(pTq->handles, req.subKey, strlen(req.subKey)); + if (pHandle == NULL) { ASSERT(req.oldConsumerId == -1); ASSERT(req.newConsumerId != -1); - STqExec exec = {0}; - pExec = &exec; + STqHandle tqHandle = {0}; + pHandle = &tqHandle; /*taosInitRWLatch(&pExec->lock);*/ - memcpy(pExec->subKey, req.subKey, TSDB_SUBSCRIBE_KEY_LEN); - pExec->consumerId = req.newConsumerId; - pExec->epoch = -1; + memcpy(pHandle->subKey, req.subKey, TSDB_SUBSCRIBE_KEY_LEN); + pHandle->consumerId = req.newConsumerId; + pHandle->epoch = -1; - pExec->subType = req.subType; - pExec->withTbName = req.withTbName; - pExec->withSchema = req.withSchema; - pExec->withTag = req.withTag; + pHandle->execHandle.subType = req.subType; + /*pExec->withTbName = req.withTbName;*/ + /*pExec->withSchema = req.withSchema;*/ + /*pExec->withTag = req.withTag;*/ - pExec->qmsg = req.qmsg; + pHandle->execHandle.exec.execCol.qmsg = req.qmsg; req.qmsg = NULL; - pExec->pWalReader = walOpenReadHandle(pTq->pVnode->pWal); - if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { + pHandle->pWalReader = walOpenReadHandle(pTq->pVnode->pWal); + for (int32_t i = 0; i < 5; i++) { + pHandle->execHandle.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); + } + if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { for (int32_t i = 0; i < 5; i++) { - pExec->pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); - SReadHandle handle = { - .reader = pExec->pExecReader[i], + .reader = pHandle->execHandle.pExecReader[i], .meta = pTq->pVnode->pMeta, .pMsgCb = &pTq->pVnode->msgCb, }; - pExec->task[i] = qCreateStreamExecTaskInfo(pExec->qmsg, &handle); - ASSERT(pExec->task[i]); + pHandle->execHandle.exec.execCol.task[i] = + qCreateStreamExecTaskInfo(pHandle->execHandle.exec.execCol.qmsg, &handle); + ASSERT(pHandle->execHandle.exec.execCol.task[i]); } - } else { - for (int32_t i = 0; i < 5; i++) { - pExec->pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta); - } - pExec->pDropTbUid = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - } - taosHashPut(pTq->execs, req.subKey, strlen(req.subKey), pExec, sizeof(STqExec)); - - if (tqStoreExec(pTq, req.subKey, pExec) < 0) { - // TODO + } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) { + pHandle->execHandle.exec.execDb.pFilterOutTbUid = + taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) { } - return 0; + taosHashPut(pTq->handles, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle)); } else { /*ASSERT(pExec->consumerId == req.oldConsumerId);*/ // TODO handle qmsg and exec modification - atomic_store_32(&pExec->epoch, -1); - atomic_store_64(&pExec->consumerId, req.newConsumerId); - atomic_add_fetch_32(&pExec->epoch, 1); + atomic_store_32(&pHandle->epoch, -1); + atomic_store_64(&pHandle->consumerId, req.newConsumerId); + atomic_add_fetch_32(&pHandle->epoch, 1); + } - if (tqStoreExec(pTq, req.subKey, pExec) < 0) { - // TODO - } - return 0; + if (tqStoreHandle(pTq, req.subKey, pHandle) < 0) { + // TODO } + return 0; } void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) { diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c new file mode 100644 index 0000000000000000000000000000000000000000..b8fec34b57f49ed732f3a2f3820ec50b367937fb --- /dev/null +++ b/source/dnode/vnode/src/tq/tqExec.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tq.h" + +static int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataBlkRsp* pRsp) { + int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock); + void* buf = taosMemoryCalloc(1, dataStrLen); + if (buf == NULL) return -1; + + SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf; + pRetrieve->useconds = 0; + pRetrieve->precision = TSDB_DEFAULT_PRECISION; + pRetrieve->compressed = 0; + pRetrieve->completed = 1; + pRetrieve->numOfRows = htonl(pBlock->info.rows); + + // TODO enable compress + int32_t actualLen = 0; + blockCompressEncode(pBlock, pRetrieve->data, &actualLen, pBlock->info.numOfCols, false); + actualLen += sizeof(SRetrieveTableRsp); + ASSERT(actualLen <= dataStrLen); + taosArrayPush(pRsp->blockDataLen, &actualLen); + taosArrayPush(pRsp->blockData, &buf); + return 0; +} + +static int32_t tqAddBlockSchemaToRsp(const STqExecHandle* pExec, int32_t workerId, SMqDataBlkRsp* pRsp) { + SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader[workerId]->pSchemaWrapper); + taosArrayPush(pRsp->blockSchema, &pSW); + return 0; +} + +static int32_t tqAddTbNameToRsp(const STQ* pTq, const STqExecHandle* pExec, SMqDataBlkRsp* pRsp, int32_t workerId) { + SMetaReader mr = {0}; + metaReaderInit(&mr, pTq->pVnode->pMeta, 0); + int64_t uid = pExec->pExecReader[workerId]->msgIter.uid; + if (metaGetTableEntryByUid(&mr, uid) < 0) { + ASSERT(0); + return -1; + } + char* tbName = strdup(mr.me.name); + taosArrayPush(pRsp->blockTbName, &tbName); + metaReaderClear(&mr); + return 0; +} + +int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkRsp* pRsp, int32_t workerId) { + if (pExec->subType == TOPIC_SUB_TYPE__COLUMN) { + qTaskInfo_t task = pExec->exec.execCol.task[workerId]; + ASSERT(task); + qSetStreamInput(task, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK, false); + while (1) { + SSDataBlock* pDataBlock = NULL; + uint64_t ts = 0; + if (qExecTask(task, &pDataBlock, &ts) < 0) { + ASSERT(0); + } + if (pDataBlock == NULL) break; + + ASSERT(pDataBlock->info.rows != 0); + ASSERT(pDataBlock->info.numOfCols != 0); + + tqAddBlockDataToRsp(pDataBlock, pRsp); + if (pRsp->withTbName) { + tqAddTbNameToRsp(pTq, pExec, pRsp, workerId); + } + pRsp->blockNum++; + } + } else if (pExec->subType == TOPIC_SUB_TYPE__TABLE) { + pRsp->withSchema = 1; + STqReadHandle* pReader = pExec->pExecReader[workerId]; + tqReadHandleSetMsg(pReader, pReq, 0); + while (tqNextDataBlock(pReader)) { + SSDataBlock block = {0}; + if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows, + &block.info.numOfCols) < 0) { + if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue; + ASSERT(0); + } + tqAddBlockDataToRsp(&block, pRsp); + if (pRsp->withTbName) { + tqAddTbNameToRsp(pTq, pExec, pRsp, workerId); + } + tqAddBlockSchemaToRsp(pExec, workerId, pRsp); + pRsp->blockNum++; + } + } else if (pExec->subType == TOPIC_SUB_TYPE__DB) { + pRsp->withSchema = 1; + STqReadHandle* pReader = pExec->pExecReader[workerId]; + tqReadHandleSetMsg(pReader, pReq, 0); + while (tqNextDataBlockFilterOut(pReader, pExec->exec.execDb.pFilterOutTbUid)) { + SSDataBlock block = {0}; + if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows, + &block.info.numOfCols) < 0) { + if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue; + ASSERT(0); + } + tqAddBlockDataToRsp(&block, pRsp); + if (pRsp->withTbName) { + tqAddTbNameToRsp(pTq, pExec, pRsp, workerId); + } + tqAddBlockSchemaToRsp(pExec, workerId, pRsp); + pRsp->blockNum++; + } + } + if (pRsp->blockNum == 0) { + pRsp->skipLogNum++; + return -1; + } + return 0; +} diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c new file mode 100644 index 0000000000000000000000000000000000000000..f2f48bbc8a69a022d0fc6b8a88c5a9a55d0b4ad6 --- /dev/null +++ b/source/dnode/vnode/src/tq/tqMeta.c @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 9f4c5fc81e05f7a39cd76612af0809f42f01700e..1f5d3b7f53cdc9f260905d41a39b84cf4c3a75e2 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -15,6 +15,48 @@ #include "tq.h" +int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead** ppHeadWithCkSum) { + int32_t code = 0; + taosThreadMutexLock(&pHandle->pWalReader->mutex); + int64_t offset = *fetchOffset; + + while (1) { + if (walFetchHead(pHandle->pWalReader, offset, *ppHeadWithCkSum) < 0) { + tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", pHandle->consumerId, + pHandle->epoch, TD_VID(pTq->pVnode), offset); + *fetchOffset = offset - 1; + code = -1; + goto END; + } + + if ((*ppHeadWithCkSum)->head.msgType == TDMT_VND_SUBMIT) { + code = walFetchBody(pHandle->pWalReader, ppHeadWithCkSum); + + if (code < 0) { + ASSERT(0); + *fetchOffset = offset; + code = -1; + goto END; + } + *fetchOffset = offset; + code = 0; + goto END; + } else { + code = walSkipFetchBody(pHandle->pWalReader, *ppHeadWithCkSum); + if (code < 0) { + ASSERT(0); + *fetchOffset = offset; + code = -1; + goto END; + } + offset++; + } + } +END: + taosThreadMutexUnlock(&pHandle->pWalReader->mutex); + return code; +} + STqReadHandle* tqInitSubmitMsgScanner(SMeta* pMeta) { STqReadHandle* pReadHandle = taosMemoryMalloc(sizeof(STqReadHandle)); if (pReadHandle == NULL) { @@ -24,7 +66,7 @@ STqReadHandle* tqInitSubmitMsgScanner(SMeta* pMeta) { pReadHandle->pMsg = NULL; pReadHandle->ver = -1; pReadHandle->pColIdList = NULL; - pReadHandle->sver = -1; + pReadHandle->cachedSchemaVer = -1; pReadHandle->cachedSchemaUid = -1; pReadHandle->pSchema = NULL; pReadHandle->pSchemaWrapper = NULL; @@ -88,11 +130,11 @@ int32_t tqRetrieveDataBlock(SArray** ppCols, STqReadHandle* pHandle, uint64_t* p // TODO set to real sversion /*int32_t sversion = 1;*/ int32_t sversion = htonl(pHandle->pBlock->sversion); - if (pHandle->sver != sversion || pHandle->cachedSchemaUid != pHandle->msgIter.suid) { + if (pHandle->cachedSchemaVer != sversion || pHandle->cachedSchemaUid != pHandle->msgIter.suid) { pHandle->pSchema = metaGetTbTSchema(pHandle->pVnodeMeta, pHandle->msgIter.uid, sversion); if (pHandle->pSchema == NULL) { tqWarn("cannot found tsschema for table: uid: %ld (suid: %ld), version %d, possibly dropped table", - pHandle->msgIter.uid, pHandle->msgIter.suid, pHandle->sver); + pHandle->msgIter.uid, pHandle->msgIter.suid, pHandle->cachedSchemaVer); /*ASSERT(0);*/ terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND; return -1; @@ -102,12 +144,12 @@ int32_t tqRetrieveDataBlock(SArray** ppCols, STqReadHandle* pHandle, uint64_t* p pHandle->pSchemaWrapper = metaGetTableSchema(pHandle->pVnodeMeta, pHandle->msgIter.suid, sversion, true); if (pHandle->pSchemaWrapper == NULL) { tqWarn("cannot found schema wrapper for table: suid: %ld, version %d, possibly dropped table", - pHandle->msgIter.suid, pHandle->sver); + pHandle->msgIter.suid, pHandle->cachedSchemaVer); /*ASSERT(0);*/ terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND; return -1; } - pHandle->sver = sversion; + pHandle->cachedSchemaVer = sversion; pHandle->cachedSchemaUid = pHandle->msgIter.suid; } diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index d462b7e046c0ace1f869ab5e0d0788ab43b9a915..88d8ee9f9250f0139c19f3f9e2b0f8a553dc0520 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -84,8 +84,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols static void tsdbResetCommitTable(SCommitH *pCommith); static void tsdbCloseCommitFile(SCommitH *pCommith, bool hasError); static bool tsdbCanAddSubBlock(SCommitH *pCommith, SBlock *pBlock, SMergeInfo *pInfo); -static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, - TSKEY maxKey, int maxRows, int8_t update); +static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, + SDataCols *pTarget, TSKEY maxKey, int maxRows, int8_t update); int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf); int tsdbApplyRtnOnFSet(STsdb *pRepo, SDFileSet *pSet, SRtn *pRtn) { @@ -466,7 +466,7 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) { pTbData = (STbData *)pNode->pData; pCommitIter = pCommith->iters + i; - pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, 1); // TODO: schema version + pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, -1); if (pTSchema) { pCommitIter->pIter = tSkipListCreateIter(pTbData->pData); @@ -475,7 +475,8 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) { pCommitIter->pTable = (STable *)taosMemoryMalloc(sizeof(STable)); pCommitIter->pTable->uid = pTbData->uid; pCommitIter->pTable->tid = pTbData->uid; - pCommitIter->pTable->pSchema = pTSchema; // metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, 0); + pCommitIter->pTable->pSchema = pTSchema; + pCommitIter->pTable->pCacheSchema = NULL; } } tSkipListDestroyIter(pSlIter); @@ -490,6 +491,7 @@ static void tsdbDestroyCommitIters(SCommitH *pCommith) { tSkipListDestroyIter(pCommith->iters[i].pIter); if (pCommith->iters[i].pTable) { tdFreeSchema(pCommith->iters[i].pTable->pSchema); + tdFreeSchema(pCommith->iters[i].pTable->pCacheSchema); taosMemoryFreeClear(pCommith->iters[i].pTable); } } @@ -914,7 +916,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) { while (bidx < nBlocks) { if (!pTSchema && !tsdbCommitIsSameFile(pCommith, bidx)) { // Set commit table - pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, 1); // TODO: schema version + pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, -1); // TODO: schema version if (!pTSchema) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; @@ -948,7 +950,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) { } static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable) { - STSchema *pSchema = tsdbGetTableSchemaImpl(TSDB_COMMIT_REPO(pCommith),pTable, false, false, -1); + STSchema *pSchema = tsdbGetTableSchemaImpl(TSDB_COMMIT_REPO(pCommith), pTable, false, false, -1); pCommith->pTable = pTable; @@ -1422,8 +1424,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols int biter = 0; while (true) { - tsdbLoadAndMergeFromCache(TSDB_COMMIT_REPO(pCommith), pCommith->readh.pDCols[0], &biter, pIter, pCommith->pDataCols, keyLimit, defaultRows, - pCfg->update); + tsdbLoadAndMergeFromCache(TSDB_COMMIT_REPO(pCommith), pCommith->readh.pDCols[0], &biter, pIter, pCommith->pDataCols, + keyLimit, defaultRows, pCfg->update); if (pCommith->pDataCols->numOfRows == 0) break; @@ -1447,8 +1449,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols return 0; } -static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, - TSKEY maxKey, int maxRows, int8_t update) { +static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, + SDataCols *pTarget, TSKEY maxKey, int maxRows, int8_t update) { TSKEY key1 = INT64_MAX; TSKEY key2 = INT64_MAX; TSKEY lastKey = TSKEY_INITIAL_VAL; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index fbfa70c1176a163ef3a3995ab48fe6010762dc77..62125b6dc7a5dadcbe534c4dab64b07e9964c3f2 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -1395,7 +1395,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* } if (pTsdbReadHandle->outputCapacity >= binfo.rows) { - ASSERT(cur->blockCompleted); + ASSERT(cur->blockCompleted || cur->mixBlock); } if (cur->rows == binfo.rows) { diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 621ba1187205a0dea417e02aa0f832e772605133..4b237bc7031ac39220e2a9464cf874222af9a79c 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -191,9 +191,9 @@ int vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { SReadHandle handle = {.meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb}; switch (pMsg->msgType) { case TDMT_VND_QUERY: - return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg); + return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0); case TDMT_VND_QUERY_CONTINUE: - return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg); + return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0); default: vError("unknown msg type:%d in query queue", pMsg->msgType); return TSDB_CODE_VND_APP_ERROR; @@ -206,13 +206,16 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); switch (pMsg->msgType) { case TDMT_VND_FETCH: - return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg, 0); case TDMT_VND_FETCH_RSP: - return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg, 0); case TDMT_VND_CANCEL_TASK: - return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg, 0); case TDMT_VND_DROP_TASK: - return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg, 0); + case TDMT_VND_QUERY_HEARTBEAT: + return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg, 0); + case TDMT_VND_TABLE_META: return vnodeGetTableMeta(pVnode, pMsg); case TDMT_VND_CONSUME: @@ -231,9 +234,6 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg); case TDMT_VND_TASK_RECOVER_RSP: return tqProcessTaskRecoverRsp(pVnode->pTq, pMsg); - - case TDMT_VND_QUERY_HEARTBEAT: - return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg); default: vError("unknown msg type:%d in fetch queue", pMsg->msgType); return TSDB_CODE_VND_APP_ERROR; @@ -260,7 +260,7 @@ int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) { SMsgHead *pHead = pMsg->pCont; - char logBuf[512]; + char logBuf[512] = {0}; char *syncNodeStr = sync2SimpleStr(pVnode->sync); snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr); syncRpcMsgLog2(logBuf, pMsg); @@ -360,7 +360,7 @@ static int vnodeProcessCreateStbReq(SVnode *pVnode, int64_t version, void *pReq, goto _err; } - tdProcessRSmaCreate(pVnode->pSma, pVnode->pMeta, &req, &pVnode->msgCb); + tdProcessRSmaCreate(pVnode, &req); tDecoderClear(&coder); return 0; diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index d8f3110a16fbd118e966a34d2d8d8d8c58519f54..d1468778531d08cb8f2744c7e953b452a28df810 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -80,7 +80,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) } if (cbMeta.index > beginIndex) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf( logBuf, sizeof(logBuf), "==callback== ==CommitCb== execute, pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, beginIndex :%ld\n", @@ -115,7 +115,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) tmsgPutToQueue(&(pVnode->msgCb), APPLY_QUEUE, &applyMsg); } else { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==CommitCb== do not execute, pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, " "beginIndex :%ld\n", @@ -126,7 +126,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) } void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state)); @@ -134,7 +134,7 @@ void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMet } void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state)); syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg); @@ -142,14 +142,13 @@ void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) { SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM)); + memset(pFsm, 0, sizeof(*pFsm)); pFsm->data = pVnode; pFsm->FpCommitCb = vnodeSyncCommitMsg; pFsm->FpPreCommitCb = vnodeSyncPreCommitMsg; pFsm->FpRollBackCb = vnodeSyncRollBackMsg; pFsm->FpGetSnapshot = vnodeSyncGetSnapshot; pFsm->FpRestoreFinishCb = NULL; - pFsm->FpSnapshotRead = NULL; - pFsm->FpSnapshotApply = NULL; pFsm->FpReConfigCb = NULL; return pFsm; diff --git a/source/libs/catalog/src/ctgDbg.c b/source/libs/catalog/src/ctgDbg.c index 849c66fd126dcbb0b0bdee1de1ec54ea8bd3697c..fdab50db0f65fd67d16d6f5b134f847dc0f882bc 100644 --- a/source/libs/catalog/src/ctgDbg.c +++ b/source/libs/catalog/src/ctgDbg.c @@ -71,6 +71,16 @@ void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) { qDebug("empty db vgroup"); } + if (pResult->pDbInfo && taosArrayGetSize(pResult->pDbInfo) > 0) { + num = taosArrayGetSize(pResult->pDbInfo); + for (int32_t i = 0; i < num; ++i) { + SDbInfo *pDb = taosArrayGet(pResult->pDbInfo, i); + qDebug("db %d dbInfo: vgVer:%d, tbNum:%d, dbId:%" PRIx64, i, pDb->vgVer, pDb->tbNum, pDb->dbId); + } + } else { + qDebug("empty db info"); + } + if (pResult->pTableHash && taosArrayGetSize(pResult->pTableHash) > 0) { num = taosArrayGetSize(pResult->pTableHash); for (int32_t i = 0; i < num; ++i) { @@ -127,6 +137,7 @@ int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps SCatalogReq req = {0}; req.pTableMeta = taosArrayInit(2, sizeof(SName)); req.pDbVgroup = taosArrayInit(2, TSDB_DB_FNAME_LEN); + req.pDbInfo = taosArrayInit(2, TSDB_DB_FNAME_LEN); req.pTableHash = taosArrayInit(2, sizeof(SName)); req.pUdf = taosArrayInit(2, TSDB_FUNC_NAME_LEN); req.pDbCfg = taosArrayInit(2, TSDB_DB_FNAME_LEN); @@ -149,9 +160,11 @@ int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps strcpy(dbFName, "1.db1"); taosArrayPush(req.pDbVgroup, dbFName); taosArrayPush(req.pDbCfg, dbFName); + taosArrayPush(req.pDbInfo, dbFName); strcpy(dbFName, "1.db2"); taosArrayPush(req.pDbVgroup, dbFName); taosArrayPush(req.pDbCfg, dbFName); + taosArrayPush(req.pDbInfo, dbFName); strcpy(funcName, "udf1"); taosArrayPush(req.pUdf, funcName); diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 26a0f3bf6cf85bfe4d81a0ab5d8913d7e1767eeb..831b7017b2632a3e52e3050c08b2c29ffa463eeb 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -560,8 +560,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length); - EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pAggNode->pAggFuncs) { + EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pAggNode->node.pOutputDataBlockDesc->totalRowSize); if (pAggNode->pGroupKeys) { EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 9ea327636df5813c8af193cf805a311645c6fd2f..8139e71f63dfa2a58be128fbc2ced05ab81466df 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -156,13 +156,11 @@ typedef struct STaskAttr { } STaskAttr; struct SOperatorInfo; -struct SAggSupporter; -struct SOptrBasicInfo; +//struct SAggSupporter; +//struct SOptrBasicInfo; -typedef void (*__optr_encode_fn_t)(struct SOperatorInfo* pOperator, struct SAggSupporter* pSup, - struct SOptrBasicInfo* pInfo, char** result, int32_t* length); -typedef bool (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, struct SAggSupporter* pSup, - struct SOptrBasicInfo* pInfo, char* result, int32_t length); +typedef int32_t (*__optr_encode_fn_t)(struct SOperatorInfo* pOperator, char** result, int32_t* length); +typedef int32_t (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, char* result); typedef int32_t (*__optr_open_fn_t)(struct SOperatorInfo* pOptr); typedef SSDataBlock* (*__optr_fn_t)(struct SOperatorInfo* pOptr); @@ -442,17 +440,20 @@ typedef struct STimeWindowSupp { int64_t waterMark; TSKEY maxTs; SColumnInfoData timeWindowData; // query time window info for scalar function execution. + SHashObj *winMap; } STimeWindowAggSupp; typedef struct SIntervalAggOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; // basic info + SAggSupporter aggSup; // aggregate supporter + SGroupResInfo groupResInfo; // multiple results build supporter SInterval interval; // interval info int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator. STimeWindow win; // query time range bool timeWindowInterpo; // interpolation needed or not char** pRow; // previous row/tuple of already processed datablock - SAggSupporter aggSup; // aggregate supporter STableQueryInfo* pCurrent; // current tableQueryInfo struct int32_t order; // current SSDataBlock scan order EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model] @@ -463,19 +464,23 @@ typedef struct SIntervalAggOperatorInfo { } SIntervalAggOperatorInfo; typedef struct SStreamFinalIntervalOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; // basic info + SAggSupporter aggSup; // aggregate supporter + SGroupResInfo groupResInfo; // multiple results build supporter SInterval interval; // interval info int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator. - SAggSupporter aggSup; // aggregate supporter int32_t order; // current SSDataBlock scan order STimeWindowAggSupp twAggSup; SArray* pChildren; } SStreamFinalIntervalOperatorInfo; typedef struct SAggOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; + STableQueryInfo *current; uint64_t groupId; SGroupResInfo groupResInfo; @@ -488,8 +493,10 @@ typedef struct SAggOperatorInfo { } SAggOperatorInfo; typedef struct SProjectOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; + SSDataBlock* existDataBlock; SArray* pPseudoColInfo; SLimit limit; @@ -513,7 +520,10 @@ typedef struct SFillOperatorInfo { } SFillOperatorInfo; typedef struct SGroupbyOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; + SAggSupporter aggSup; + SArray* pGroupCols; // group by columns, SArray SArray* pGroupColVals; // current group column values, SArray SNode* pCondition; @@ -521,7 +531,6 @@ typedef struct SGroupbyOperatorInfo { char* keyBuf; // group by keys for hash int32_t groupKeyLen; // total group by column width SGroupResInfo groupResInfo; - SAggSupporter aggSup; SExprInfo* pScalarExprInfo; int32_t numOfScalarExpr; // the number of scalar expression in group operator SqlFunctionCtx* pScalarFuncCtx; @@ -558,8 +567,10 @@ typedef struct SWindowRowsSup { } SWindowRowsSup; typedef struct SSessionAggOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; + SGroupResInfo groupResInfo; SWindowRowsSup winSup; bool reptScan; // next round scan @@ -598,8 +609,10 @@ typedef struct STimeSliceOperatorInfo { } STimeSliceOperatorInfo; typedef struct SStateWindowOperatorInfo { + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; SAggSupporter aggSup; + SGroupResInfo groupResInfo; SWindowRowsSup winSup; SColumn stateCol; // start row index @@ -611,8 +624,10 @@ typedef struct SStateWindowOperatorInfo { } SStateWindowOperatorInfo; typedef struct SSortedMergeOperatorInfo { - + // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode SOptrBasicInfo binfo; + SAggSupporter aggSup; + SArray* pSortInfo; int32_t numOfSources; SSortHandle *pSortHandle; @@ -624,7 +639,6 @@ typedef struct SSortedMergeOperatorInfo { int32_t numOfResPerPage; char** groupVal; SArray *groupInfo; - SAggSupporter aggSup; } SSortedMergeOperatorInfo; typedef struct SSortOperatorInfo { @@ -745,7 +759,7 @@ SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SExecTaskInfo* SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, - STimeWindowAggSupp* pTwSup, int16_t tsColId); + STimeWindowAggSupp* pTwSup); SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, @@ -786,16 +800,31 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo); void doDestroyTask(SExecTaskInfo* pTaskInfo); int32_t getMaximumIdleDurationSec(); +/* + * ops: root operator + * data: *data save the result of encode, need to be freed by caller + * length: *length save the length of *data + * return: result code, 0 means success + */ +int32_t encodeOperator(SOperatorInfo* ops, char** data, int32_t *length); + +/* + * ops: root operator, created by caller + * data: save the result of decode + * length: the length of data + * return: result code, 0 means success + */ +int32_t decodeOperator(SOperatorInfo* ops, char* data, int32_t length); + void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status); int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, EOPTR_EXEC_MODEL model); int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo** pRes, int32_t* capacity, int32_t* resNum); -bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char* result, - int32_t length); -void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char** result, - int32_t* length); +int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result); +int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length); + STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts, SInterval* pInterval, int32_t precision, STimeWindow* win); int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, @@ -809,6 +838,8 @@ SResultWindowInfo* getSessionTimeWindow(SArray* pWinInfos, TSKEY ts, int64_t gap int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pTs, int32_t rows, int32_t start, int64_t gap, SHashObj* pStDeleted); bool functionNeedToExecute(SqlFunctionCtx* pCtx); +int64_t getSmaWaterMark(int64_t interval, double filesFactor); +bool isSmaStream(int8_t triggerType); int32_t compareTimeWindow(const void* p1, const void* p2, const void* param); #ifdef __cplusplus diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index c36b3415461b5a2c5f0a1520e171385e556bb9ee..6f2be4e14ef0aee31912a20640f70faa94e73ae1 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -2601,6 +2601,7 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI pStart += sizeof(int32_t) * numOfRows; if (colLen[i] > 0) { + taosMemoryFreeClear(pColInfoData->pData); pColInfoData->pData = taosMemoryMalloc(colLen[i]); } } else { @@ -2758,6 +2759,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx pExchangeInfo->loadInfo.totalRows); pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED; completed += 1; + taosMemoryFreeClear(pDataInfo->pRsp); continue; } @@ -2765,6 +2767,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx code = setSDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data, pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL); if (code != 0) { + taosMemoryFreeClear(pDataInfo->pRsp); goto _error; } @@ -2785,10 +2788,12 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx pDataInfo->status = EX_SOURCE_DATA_NOT_READY; code = doSendFetchDataRequest(pExchangeInfo, pTaskInfo, i); if (code != TSDB_CODE_SUCCESS) { + taosMemoryFreeClear(pDataInfo->pRsp); goto _error; } } + taosMemoryFreeClear(pDataInfo->pRsp); return pExchangeInfo->pResult; } @@ -2891,6 +2896,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED; pExchangeInfo->current += 1; + taosMemoryFreeClear(pDataInfo->pRsp); continue; } @@ -2916,6 +2922,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { } pOperator->resultInfo.totalRows += pRes->info.rows; + taosMemoryFreeClear(pDataInfo->pRsp); return pExchangeInfo->pResult; } } @@ -3377,7 +3384,7 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan // todo add more information about exchange operation int32_t type = pOperator->operatorType; if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE || type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN || - type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { + type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN) { *order = TSDB_ORDER_ASC; *scanFlag = MAIN_SCAN; return TSDB_CODE_SUCCESS; @@ -3441,14 +3448,14 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) { } #if 0 // test for encode/decode result info - if(pOperator->encodeResultRow){ + if(pOperator->fpSet.encodeResultRow){ char *result = NULL; int32_t length = 0; - SAggSupporter *pSup = &pAggInfo->aggSup; - pOperator->encodeResultRow(pOperator, pSup, pInfo, &result, &length); + pOperator->fpSet.encodeResultRow(pOperator, &result, &length); + SAggSupporter* pSup = &pAggInfo->aggSup; taosHashClear(pSup->pResultRowHashTable); pInfo->resultRowInfo.size = 0; - pOperator->decodeResultRow(pOperator, pSup, pInfo, result, length); + pOperator->fpSet.decodeResultRow(pOperator, result); if(result){ taosMemoryFree(result); } @@ -3491,17 +3498,25 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) { return (rows == 0) ? NULL : pInfo->pRes; } -void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char** result, - int32_t* length) { - int32_t size = taosHashGetSize(pSup->pResultRowHashTable); - size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length - int32_t totalSize = sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize); - *result = taosMemoryCalloc(1, totalSize); +int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length) { + if (result == NULL || length == NULL) { + return TSDB_CODE_TSC_INVALID_INPUT; + } + SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info); + SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo)); + int32_t size = taosHashGetSize(pSup->pResultRowHashTable); + size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length + int32_t totalSize = + sizeof(int32_t) + sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize); + + *result = (char*)taosMemoryCalloc(1, totalSize); if (*result == NULL) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); + return TSDB_CODE_OUT_OF_MEMORY; } - *(int32_t*)(*result) = size; + int32_t offset = sizeof(int32_t); + *(int32_t*)(*result + offset) = size; + offset += sizeof(int32_t); // prepare memory SResultRowPosition* pos = &pInfo->resultRowInfo.cur; @@ -3523,12 +3538,11 @@ void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi // recalculate the result size int32_t realTotalSize = offset + sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize; if (realTotalSize > totalSize) { - char* tmp = taosMemoryRealloc(*result, realTotalSize); + char* tmp = (char*)taosMemoryRealloc(*result, realTotalSize); if (tmp == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; taosMemoryFree(*result); *result = NULL; - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); + return TSDB_CODE_OUT_OF_MEMORY; } else { *result = tmp; } @@ -3548,22 +3562,26 @@ void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi pIter = taosHashIterate(pSup->pResultRowHashTable, pIter); } - if (length) { - *length = offset; - } - return; + *(int32_t*)(*result) = offset; + *length = offset; + + return TDB_CODE_SUCCESS; } -bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char* result, - int32_t length) { - if (!result || length <= 0) { - return false; +int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) { + if (result == NULL) { + return TSDB_CODE_TSC_INVALID_INPUT; } + SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info); + SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo)); // int32_t size = taosHashGetSize(pSup->pResultRowHashTable); - int32_t count = *(int32_t*)(result); - + int32_t length = *(int32_t*)(result); int32_t offset = sizeof(int32_t); + + int32_t count = *(int32_t*)(result + offset); + offset += sizeof(int32_t); + while (count-- > 0 && length > offset) { int32_t keyLen = *(int32_t*)(result + offset); offset += sizeof(int32_t); @@ -3571,7 +3589,7 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi uint64_t tableGroupId = *(uint64_t*)(result + offset); SResultRow* resultRow = getNewResultRow_rv(pSup->pResultBuf, tableGroupId, pSup->resultRowSize); if (!resultRow) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT); + return TSDB_CODE_TSC_INVALID_INPUT; } // add a new result set for a new group @@ -3581,7 +3599,7 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi offset += keyLen; int32_t valueLen = *(int32_t*)(result + offset); if (valueLen != pSup->resultRowSize) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT); + return TSDB_CODE_TSC_INVALID_INPUT; } offset += sizeof(int32_t); int32_t pageId = resultRow->pageId; @@ -3600,9 +3618,9 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi } if (offset != length) { - longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT); + return TSDB_CODE_TSC_INVALID_INPUT; } - return true; + return TDB_CODE_SUCCESS; } enum { @@ -4496,7 +4514,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode; // simple child table. STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; STimeWindowAggSupp twSup = { - .waterMark = pTableScanNode->watermark, .calTrigger = pTableScanNode->triggerType, .maxTs = INT64_MIN}; + .waterMark = pTableScanNode->watermark, .calTrigger = pTableScanNode->triggerType, .maxTs = INT64_MIN}; tsdbReaderT pDataReader = NULL; if (pHandle->vnode) { pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); @@ -4511,8 +4529,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo qDebug("%s pDataReader is not NULL", GET_TASKID(pTaskInfo)); } SArray* tableIdList = extractTableIdList(pTableListInfo); - SOperatorInfo* pOperator = createStreamScanOperatorInfo(pDataReader, pHandle, tableIdList, pTableScanNode, - pTaskInfo, &twSup, pTableScanNode->tsColId); + + SOperatorInfo* pOperator = createStreamScanOperatorInfo(pDataReader, pHandle, + tableIdList, pTableScanNode, pTaskInfo, &twSup); taosArrayDestroy(tableIdList); return pOperator; @@ -4613,7 +4632,19 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo STimeWindowAggSupp as = {.waterMark = pIntervalPhyNode->window.watermark, .calTrigger = pIntervalPhyNode->window.triggerType, - .maxTs = INT64_MIN}; + .maxTs = INT64_MIN, + .winMap = NULL,}; + if (isSmaStream(pIntervalPhyNode->window.triggerType)) { + if (FLT_LESS(pIntervalPhyNode->window.filesFactor, 1.000000)) { + as.calTrigger = STREAM_TRIGGER_AT_ONCE_SMA; + } else { + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP); + as.winMap = taosHashInit(64, hashFn, true, HASH_NO_LOCK); + as.waterMark = getSmaWaterMark(interval.interval, + pIntervalPhyNode->window.filesFactor); + as.calTrigger = STREAM_TRIGGER_WINDOW_CLOSE_SMA; + } + } int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId; pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTaskInfo); @@ -4979,6 +5010,93 @@ _error: return NULL; } +int32_t encodeOperator(SOperatorInfo* ops, char** result, int32_t* length) { + int32_t code = TDB_CODE_SUCCESS; + char* pCurrent = NULL; + int32_t currLength = 0; + if (ops->fpSet.encodeResultRow) { + if (result == NULL || length == NULL) { + return TSDB_CODE_TSC_INVALID_INPUT; + } + code = ops->fpSet.encodeResultRow(ops, &pCurrent, &currLength); + + if (code != TDB_CODE_SUCCESS) { + if (*result != NULL) { + taosMemoryFree(*result); + *result = NULL; + } + return code; + } + + if (*result == NULL) { + *result = (char*)taosMemoryCalloc(1, currLength + sizeof(int32_t)); + if (*result == NULL) { + taosMemoryFree(pCurrent); + return TSDB_CODE_OUT_OF_MEMORY; + } + memcpy(*result + sizeof(int32_t), pCurrent, currLength); + *(int32_t*)(*result) = currLength + sizeof(int32_t); + } else { + int32_t sizePre = *(int32_t*)(*result); + char* tmp = (char*)taosMemoryRealloc(*result, sizePre + currLength); + if (tmp == NULL) { + taosMemoryFree(pCurrent); + taosMemoryFree(*result); + *result = NULL; + return TSDB_CODE_OUT_OF_MEMORY; + } + *result = tmp; + memcpy(*result + sizePre, pCurrent, currLength); + *(int32_t*)(*result) += currLength; + } + taosMemoryFree(pCurrent); + *length = *(int32_t*)(*result); + } + + for (int32_t i = 0; i < ops->numOfDownstream; ++i) { + code = encodeOperator(ops->pDownstream[i], result, length); + if (code != TDB_CODE_SUCCESS) { + return code; + } + } + return TDB_CODE_SUCCESS; +} + +int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length) { + int32_t code = TDB_CODE_SUCCESS; + if (ops->fpSet.decodeResultRow) { + if (result == NULL) { + return TSDB_CODE_TSC_INVALID_INPUT; + } + ASSERT(length == *(int32_t*)result); + char* data = result + sizeof(int32_t); + code = ops->fpSet.decodeResultRow(ops, data); + if (code != TDB_CODE_SUCCESS) { + return code; + } + + int32_t totalLength = *(int32_t*)result; + int32_t dataLength = *(int32_t*)data; + + if (totalLength == dataLength + sizeof(int32_t)) { // the last data + result = NULL; + length = 0; + } else { + result += dataLength; + *(int32_t*)(result) = totalLength - dataLength; + length = totalLength - dataLength; + } + } + + for (int32_t i = 0; i < ops->numOfDownstream; ++i) { + code = decodeOperator(ops->pDownstream[i], result, length); + if (code != TDB_CODE_SUCCESS) { + return code; + } + } + return TDB_CODE_SUCCESS; +} + int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, EOPTR_EXEC_MODEL model) { uint64_t queryId = pPlan->id.queryId; @@ -5195,3 +5313,18 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey) { } return createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, TD_TMP_DIR_PATH); } + +int64_t getSmaWaterMark(int64_t interval, double filesFactor) { + int64_t waterMark = 0; + ASSERT(FLT_GREATEREQUAL(filesFactor,0.000000)); + waterMark = -1 * filesFactor; + return waterMark; +} + +bool isSmaStream(int8_t triggerType) { + if (triggerType == STREAM_TRIGGER_AT_ONCE || + triggerType == STREAM_TRIGGER_WINDOW_CLOSE) { + return false; + } + return true; +} diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index ef770e8afc2b5f657187682cc9adf33235dedf48..d388b802f3ae22c7c0cf135c9eb70111be1d79a4 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -318,7 +318,20 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { // updateNumOfRowsInResultRows(pInfo->binfo.pCtx, pOperator->numOfExprs, &pInfo->binfo.resultRowInfo, // pInfo->binfo.rowCellInfoOffset); // } - +#if 0 + if(pOperator->fpSet.encodeResultRow){ + char *result = NULL; + int32_t length = 0; + pOperator->fpSet.encodeResultRow(pOperator, &result, &length); + SAggSupporter* pSup = &pInfo->aggSup; + taosHashClear(pSup->pResultRowHashTable); + pInfo->binfo.resultRowInfo.size = 0; + pOperator->fpSet.decodeResultRow(pOperator, result); + if(result){ + taosMemoryFree(result); + } + } +#endif blockDataEnsureCapacity(pRes, pOperator->resultInfo.capacity); initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, 0); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index b954eb3a221a187bc4fe96a3088125e149304ece..3e14589faae90de9fd03396ba525fd4b4a7154d9 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -875,7 +875,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { if (rows == 0) { pOperator->status = OP_EXEC_DONE; } else if (pInfo->pUpdateInfo) { - SSDataBlock* upRes = getUpdateDataBlock(pInfo, true); // TODO(liuyao) get invertible from plan + SSDataBlock* upRes = getUpdateDataBlock(pInfo, true); if (upRes) { pInfo->pUpdateRes = upRes; if (upRes->info.type == STREAM_REPROCESS) { @@ -894,7 +894,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, - STimeWindowAggSupp* pTwSup, int16_t tsColId) { + STimeWindowAggSupp* pTwSup) { SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -939,8 +939,12 @@ SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHan goto _error; } - pInfo->primaryTsIndex = tsColId; - if (pSTInfo->interval.interval > 0) { + if (isSmaStream(pTableScanNode->triggerType)) { + pTwSup->waterMark = getSmaWaterMark(pSTInfo->interval.interval, + pTableScanNode->filesFactor); + } + pInfo->primaryTsIndex = 0; // pTableScanNode->tsColId; + if (pSTInfo->interval.interval > 0 && pDataReader) { pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, pTwSup->waterMark); } else { pInfo->pUpdateInfo = NULL; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 829968d37f9a8a97cf1f256b493035ad0129f71a..14344daf8150799683bb8d5fac605347d611819e 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -748,10 +748,14 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && - (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE || - pInfo->twAggSup.calTrigger == 0) ) { - saveResult(pResult, tableGroupId, pUpdated); + if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE || + pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE_SMA) { + saveResult(pResult, tableGroupId, pUpdated); + } + if (pInfo->twAggSup.winMap) { + taosHashRemove(pInfo->twAggSup.winMap, &win.skey, sizeof(TSKEY)); + } } int32_t forwardStep = 0; @@ -824,10 +828,14 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && - (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE || - pInfo->twAggSup.calTrigger == 0) ) { - saveResult(pResult, tableGroupId, pUpdated); + if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE || + pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE_SMA) { + saveResult(pResult, tableGroupId, pUpdated); + } + if (pInfo->twAggSup.winMap) { + taosHashRemove(pInfo->twAggSup.winMap, &win.skey, sizeof(TSKEY)); + } } ekey = ascScan? nextWin.ekey:nextWin.skey; @@ -880,14 +888,14 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId, NULL); #if 0 // test for encode/decode result info - if(pOperator->encodeResultRow){ + if(pOperator->fpSet.encodeResultRow){ char *result = NULL; int32_t length = 0; SAggSupporter *pSup = &pInfo->aggSup; - pOperator->encodeResultRow(pOperator, pSup, &pInfo->binfo, &result, &length); + pOperator->fpSet.encodeResultRow(pOperator, &result, &length); taosHashClear(pSup->pResultRowHashTable); pInfo->binfo.resultRowInfo.size = 0; - pOperator->decodeResultRow(pOperator, pSup, &pInfo->binfo, result, length); + pOperator->fpSet.decodeResultRow(pOperator, result); if(result){ taosMemoryFree(result); } @@ -1172,15 +1180,23 @@ static int32_t closeIntervalWindow(SHashObj *pHashMap, STimeWindowAggSupp *pSup, void* key = taosHashGetKey(pIte, &keyLen); uint64_t groupId = *(uint64_t*) key; ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))); - TSKEY ts = *(uint64_t*) ((char*)key + sizeof(uint64_t)); + TSKEY ts = *(int64_t*) ((char*)key + sizeof(uint64_t)); SResultRowInfo dumyInfo; dumyInfo.cur.pageId = -1; STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, ts, pInterval, pInterval->precision, NULL); if (win.ekey < pSup->maxTs - pSup->waterMark) { + if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE_SMA) { + if (taosHashGet(pSup->winMap, &win.skey, sizeof(TSKEY))) { + continue; + } + } char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))]; SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId); - taosHashRemove(pHashMap, keyBuf, keyLen); + if (pSup->calTrigger != STREAM_TRIGGER_AT_ONCE_SMA && + pSup->calTrigger != STREAM_TRIGGER_WINDOW_CLOSE_SMA) { + taosHashRemove(pHashMap, keyBuf, keyLen); + } SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); if (pos == NULL) { return TSDB_CODE_OUT_OF_MEMORY; @@ -1192,6 +1208,7 @@ static int32_t closeIntervalWindow(SHashObj *pHashMap, STimeWindowAggSupp *pSup, taosMemoryFree(pos); return TSDB_CODE_OUT_OF_MEMORY; } + taosHashPut(pSup->winMap, &win.skey, sizeof(TSKEY), NULL, 0); } } return TSDB_CODE_SUCCESS; @@ -1248,7 +1265,8 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { &pInfo->interval, pClosed); finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pClosed, pInfo->binfo.rowCellInfoOffset); - if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) { + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE || + pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE_SMA) { taosArrayAddAll(pUpdated, pClosed); } taosArrayDestroy(pClosed); @@ -2412,7 +2430,7 @@ int32_t closeSessionWindow(SArray *pWins, STimeWindowAggSupp *pTwSup, SArray *pC return TSDB_CODE_OUT_OF_MEMORY; } pSeWin->isClosed = true; - if (calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) { + if (calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) { pSeWin->isOutput = true; } } @@ -2486,7 +2504,7 @@ static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) { SArray* pUpdated = taosArrayInit(16, POINTER_BYTES); copyUpdateResult(pStUpdated, pUpdated, pBInfo->pRes->info.groupId); taosHashCleanup(pStUpdated); - if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) { + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) { taosArrayAddAll(pUpdated, pClosed); } diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 24a781855ac2337863a381e7d01d22159ee78937..add94cb83ca106675f82839d7cff677c062c3cf4 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -183,7 +183,7 @@ static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t return TSDB_CODE_SUCCESS; } -static bool validAperventileAlgo(const SValueNode* pVal) { +static bool validateApercentileAlgo(const SValueNode* pVal) { if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) { return false; } @@ -231,7 +231,7 @@ static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t } SNode* pParamNode2 = nodesListGetNode(pFunc->pParameterList, 2); - if (QUERY_NODE_VALUE != nodeType(pParamNode2) || !validAperventileAlgo((SValueNode*)pParamNode2)) { + if (QUERY_NODE_VALUE != nodeType(pParamNode2) || !validateApercentileAlgo((SValueNode*)pParamNode2)) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "Third parameter algorithm of apercentile must be 'default' or 't-digest'"); } @@ -438,6 +438,18 @@ static int32_t translateHLL(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { return TSDB_CODE_SUCCESS; } +static bool validateStateOper(const SValueNode* pVal) { + if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) { + return false; + } + return (0 == strcasecmp(varDataVal(pVal->datum.p), "GT") || + 0 == strcasecmp(varDataVal(pVal->datum.p), "GE") || + 0 == strcasecmp(varDataVal(pVal->datum.p), "LT") || + 0 == strcasecmp(varDataVal(pVal->datum.p), "LE") || + 0 == strcasecmp(varDataVal(pVal->datum.p), "EQ") || + 0 == strcasecmp(varDataVal(pVal->datum.p), "NE")); +} + static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); if (3 != numOfParams) { @@ -445,6 +457,11 @@ static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t } // param0 + SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The input parameter of STATECOUNT function can only be column"); + } uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -459,6 +476,12 @@ static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t SValueNode* pValue = (SValueNode*)pParamNode; + if (i == 1 && !validateStateOper(pValue)) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "Second parameter of STATECOUNT function" + "must be one of the following: 'GE', 'GT', 'LE', 'LT', 'EQ', 'NE'"); + } + pValue->notReserved = true; } @@ -480,6 +503,11 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32 } // param0 + SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The input parameter of STATEDURATION function can only be column"); + } uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -494,6 +522,16 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32 SValueNode* pValue = (SValueNode*)pParamNode; + if (i == 1 && !validateStateOper(pValue)) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "Second parameter of STATEDURATION function" + "must be one of the following: 'GE', 'GT', 'LE', 'LT', 'EQ', 'NE'"); + } else if (i == 3 && pValue->datum.i == 0) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "STATEDURATION function time unit parameter should be greater than db precision"); + } + + pValue->notReserved = true; } @@ -693,7 +731,7 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return TSDB_CODE_SUCCESS; + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); @@ -1181,7 +1219,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .finalizeFunc = functionFinalize }, { - .name = "state_count", + .name = "statecount", .type = FUNCTION_TYPE_STATE_COUNT, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC, .translateFunc = translateStateCount, @@ -1191,7 +1229,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .finalizeFunc = NULL }, { - .name = "state_duration", + .name = "stateduration", .type = FUNCTION_TYPE_STATE_DURATION, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC, .translateFunc = translateStateDuration, diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 068d06fc31a9e73c05e84308a0b137386c69b060..8707946253e0fcaf634ff99a16ca26dba4626abc 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -3776,6 +3776,7 @@ static void tailAssignResult(STailItem* pItem, char *data, int32_t colBytes, TSK if (isNull) { pItem->isNull = true; } else { + pItem->isNull = false; memcpy(pItem->data, data, colBytes); } } diff --git a/source/libs/index/test/CMakeLists.txt b/source/libs/index/test/CMakeLists.txt index d7ae3aacb95d8cd41d62d702b59898c796080d99..2835084a81b87e358916c20ce0e6c70cf6884021 100644 --- a/source/libs/index/test/CMakeLists.txt +++ b/source/libs/index/test/CMakeLists.txt @@ -97,11 +97,11 @@ if(NOT TD_WINDOWS) NAME idxtest COMMAND idxTest ) + add_test( + NAME idxJsonUT + COMMAND idxJsonUT + ) endif(NOT TD_WINDOWS) -add_test( - NAME idxJsonUT - COMMAND idxJsonUT -) add_test( NAME idxUtilUT COMMAND idxUtilUT diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 68d3741b482105d02d4751847f01f3fbdc32986f..cb4a4f104c1d72fcd367ef268060e1e10e486d0b 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -305,6 +305,7 @@ static SNode* logicNodeCopy(const SLogicNode* pSrc, SLogicNode* pDst) { CLONE_NODE_FIELD(pConditions); CLONE_NODE_LIST_FIELD(pChildren); COPY_SCALAR_FIELD(optimizedFlag); + COPY_SCALAR_FIELD(precision); return (SNode*)pDst; } @@ -328,6 +329,10 @@ static SNode* logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) { COPY_SCALAR_FIELD(intervalUnit); COPY_SCALAR_FIELD(slidingUnit); CLONE_NODE_FIELD(pTagCond); + COPY_SCALAR_FIELD(triggerType); + COPY_SCALAR_FIELD(watermark); + COPY_SCALAR_FIELD(tsColId); + COPY_SCALAR_FIELD(filesFactor); return (SNode*)pDst; } @@ -384,6 +389,7 @@ static SNode* logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* pD CLONE_NODE_FIELD(pStateExpr); COPY_SCALAR_FIELD(triggerType); COPY_SCALAR_FIELD(watermark); + COPY_SCALAR_FIELD(filesFactor); return (SNode*)pDst; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 78710569cbe6718c6fa899448a1cab11edebaab3..c0c8168eb1d6fb41632003c3c7a21c7c29406e13 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1133,6 +1133,7 @@ static const char* jkTableScanPhysiPlanSlidingUnit = "slidingUnit"; static const char* jkTableScanPhysiPlanTriggerType = "triggerType"; static const char* jkTableScanPhysiPlanWatermark = "watermark"; static const char* jkTableScanPhysiPlanTsColId = "tsColId"; +static const char* jkTableScanPhysiPlanFilesFactor = "FilesFactor"; static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj; @@ -1183,6 +1184,9 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddDoubleToObject(pJson, jkTableScanPhysiPlanFilesFactor, pNode->filesFactor); + } return code; } @@ -1242,7 +1246,9 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId, code); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetDoubleValue(pJson, jkTableScanPhysiPlanFilesFactor, &pNode->filesFactor); + } return code; } @@ -1496,6 +1502,7 @@ static const char* jkWindowPhysiPlanFuncs = "Funcs"; static const char* jkWindowPhysiPlanTsPk = "TsPk"; static const char* jkWindowPhysiPlanTriggerType = "TriggerType"; static const char* jkWindowPhysiPlanWatermark = "Watermark"; +static const char* jkWindowPhysiPlanFilesFactor = "FilesFactor"; static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) { const SWinodwPhysiNode* pNode = (const SWinodwPhysiNode*)pObj; @@ -1516,6 +1523,9 @@ static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanWatermark, pNode->watermark); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddDoubleToObject(pJson, jkWindowPhysiPlanFilesFactor, pNode->filesFactor); + } return code; } @@ -1541,6 +1551,9 @@ static int32_t jsonToPhysiWindowNode(const SJson* pJson, void* pObj) { tjsonGetNumberValue(pJson, jkWindowPhysiPlanWatermark, pNode->watermark, code); ; } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetDoubleValue(pJson, jkWindowPhysiPlanFilesFactor, &pNode->filesFactor); + } return code; } @@ -3276,7 +3289,7 @@ static int32_t createTopicStmtToJson(const void* pObj, SJson* pJson) { int32_t code = tjsonAddStringToObject(pJson, jkCreateTopicStmtTopicName, pNode->topicName); if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddStringToObject(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subscribeDbName); + code = tjsonAddStringToObject(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subDbName); } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkCreateTopicStmtIgnoreExists, pNode->ignoreExists); @@ -3293,7 +3306,7 @@ static int32_t jsonToCreateTopicStmt(const SJson* pJson, void* pObj) { int32_t code = tjsonGetStringValue(pJson, jkCreateTopicStmtTopicName, pNode->topicName); if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetStringValue(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subscribeDbName); + code = tjsonGetStringValue(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subDbName); } if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkCreateTopicStmtIgnoreExists, &pNode->ignoreExists); diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index e28844f2e16f07c57232b073f0052411d60a2d0f..d29e89d266d7578d6e80dede38220a94163e7681 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -86,8 +86,6 @@ SNodeptr nodesMakeNode(ENodeType type) { return makeNode(type, sizeof(SExplainOptions)); case QUERY_NODE_STREAM_OPTIONS: return makeNode(type, sizeof(SStreamOptions)); - case QUERY_NODE_TOPIC_OPTIONS: - return makeNode(type, sizeof(STopicOptions)); case QUERY_NODE_LEFT_VALUE: return makeNode(type, sizeof(SLeftValueNode)); case QUERY_NODE_SET_OPERATOR: diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index a1c304118bfcdc5078bf0a19b73a8bde17e3c0cf..7dd0ef2616bf3fda27192fa7099906348753c163 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -59,7 +59,6 @@ typedef enum EDatabaseOptionType { typedef enum ETableOptionType { TABLE_OPTION_COMMENT = 1, - TABLE_OPTION_DELAY, TABLE_OPTION_FILE_FACTOR, TABLE_OPTION_ROLLUP, TABLE_OPTION_TTL, @@ -168,7 +167,7 @@ SNode* createCreateComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, co SNode* createDropComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pDnodeId); SNode* createTopicOptions(SAstCreateContext* pCxt); SNode* createCreateTopicStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pTopicName, SNode* pQuery, - const SToken* pSubscribeDbName, SNode* pOptions); + const SToken* pSubDbName, SNode* pRealTable); SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pTopicName); SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId, const SToken* pTopicName); diff --git a/source/libs/parser/inc/parInt.h b/source/libs/parser/inc/parInt.h index 184ed7d8b243ed2ec97e4c38b1f1e31de9e3f2c2..3efe6700d2339b2234d33f868c0b42fa993d1b64 100644 --- a/source/libs/parser/inc/parInt.h +++ b/source/libs/parser/inc/parInt.h @@ -24,6 +24,7 @@ extern "C" { #include "parUtil.h" #include "parser.h" +int32_t parseInsertSyntax(SParseContext* pContext, SQuery** pQuery); int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery); int32_t parse(SParseContext* pParseCxt, SQuery** pQuery); int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery); diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h index 80288dbc448a0cd35212da5e672b6b59bc021313..2468f7d75bd16acb21c2bb45dd74ae46c1b669d7 100644 --- a/source/libs/parser/inc/parUtil.h +++ b/source/libs/parser/inc/parUtil.h @@ -65,12 +65,15 @@ int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen); int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq); int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache); int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache); +int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache); int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache); +int32_t reserveTableVgroupInCacheExt(const SName* pName, SParseMetaCache* pMetaCache); int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, SParseMetaCache* pMetaCache); +int32_t reserveUserAuthInCacheExt(const char* pUser, const SName* pName, AUTH_TYPE type, SParseMetaCache* pMetaCache); int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache); int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta); int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo); @@ -78,7 +81,7 @@ int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId, int32_t* pTableNum); int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo); -int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDb, AUTH_TYPE type, +int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type, bool* pPass); int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo); diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 1fb60f83a5a822e627f8cbdf54b3a1e42c4daa5d..75eceedb1b1113b5b186fa8083a2b6899d1b7ac3 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -313,7 +313,6 @@ tags_def(A) ::= TAGS NK_LP column_def_list(B) NK_RP. table_options(A) ::= . { A = createDefaultTableOptions(pCxt); } table_options(A) ::= table_options(B) COMMENT NK_STRING(C). { A = setTableOption(pCxt, B, TABLE_OPTION_COMMENT, &C); } -table_options(A) ::= table_options(B) DELAY NK_INTEGER(C). { A = setTableOption(pCxt, B, TABLE_OPTION_DELAY, &C); } table_options(A) ::= table_options(B) FILE_FACTOR NK_FLOAT(C). { A = setTableOption(pCxt, B, TABLE_OPTION_FILE_FACTOR, &C); } table_options(A) ::= table_options(B) ROLLUP NK_LP func_name_list(C) NK_RP. { A = setTableOption(pCxt, B, TABLE_OPTION_ROLLUP, C); } table_options(A) ::= table_options(B) TTL NK_INTEGER(C). { A = setTableOption(pCxt, B, TABLE_OPTION_TTL, &C); } @@ -403,17 +402,12 @@ func_list(A) ::= func_list(B) NK_COMMA func(C). func(A) ::= function_name(B) NK_LP expression_list(C) NK_RP. { A = createFunctionNode(pCxt, &B, C); } /************************************************ create/drop topic ***************************************************/ -cmd ::= CREATE TOPIC not_exists_opt(A) - topic_name(B) topic_options(D) AS query_expression(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, C, NULL, D); } -cmd ::= CREATE TOPIC not_exists_opt(A) - topic_name(B) topic_options(D) AS db_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, &C, D); } +cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS query_expression(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, C, NULL, NULL); } +cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS DATABASE db_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, &C, NULL); } +cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) + AS STABLE full_table_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, NULL, C); } cmd ::= DROP TOPIC exists_opt(A) topic_name(B). { pCxt->pRootNode = createDropTopicStmt(pCxt, A, &B); } -cmd ::= DROP CGROUP exists_opt(A) cgroup_name(B) ON topic_name(C). { pCxt->pRootNode = createDropCGroupStmt(pCxt, A, &B, &C); } - -topic_options(A) ::= . { A = createTopicOptions(pCxt); } -topic_options(A) ::= topic_options(B) WITH TABLE. { ((STopicOptions*)B)->withTable = true; A = B; } -topic_options(A) ::= topic_options(B) WITH SCHEMA. { ((STopicOptions*)B)->withSchema = true; A = B; } -topic_options(A) ::= topic_options(B) WITH TAG. { ((STopicOptions*)B)->withTag = true; A = B; } +cmd ::= DROP CONSUMER GROUP exists_opt(A) cgroup_name(B) ON topic_name(C). { pCxt->pRootNode = createDropCGroupStmt(pCxt, A, &B, &C); } /************************************************ desc/describe *******************************************************/ cmd ::= DESC full_table_name(A). { pCxt->pRootNode = createDescribeStmt(pCxt, A); } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 836a0cb520684e264cecb3cd6425ae3c7688de68..72a88548d2270d6d4776e2614bf05fc8c2b7ebf6 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -857,7 +857,6 @@ SNode* createDefaultTableOptions(SAstCreateContext* pCxt) { CHECK_PARSER_STATUS(pCxt); STableOptions* pOptions = nodesMakeNode(QUERY_NODE_TABLE_OPTIONS); CHECK_OUT_OF_MEM(pOptions); - pOptions->delay = TSDB_DEFAULT_ROLLUP_DELAY; pOptions->filesFactor = TSDB_DEFAULT_ROLLUP_FILE_FACTOR; pOptions->ttl = TSDB_DEFAULT_TABLE_TTL; return (SNode*)pOptions; @@ -867,7 +866,6 @@ SNode* createAlterTableOptions(SAstCreateContext* pCxt) { CHECK_PARSER_STATUS(pCxt); STableOptions* pOptions = nodesMakeNode(QUERY_NODE_TABLE_OPTIONS); CHECK_OUT_OF_MEM(pOptions); - pOptions->delay = -1; pOptions->filesFactor = -1; pOptions->ttl = -1; return (SNode*)pOptions; @@ -882,11 +880,8 @@ SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType sizeof(((STableOptions*)pOptions)->comment)); } break; - case TABLE_OPTION_DELAY: - ((STableOptions*)pOptions)->delay = taosStr2Int32(((SToken*)pVal)->z, NULL, 10); - break; case TABLE_OPTION_FILE_FACTOR: - ((STableOptions*)pOptions)->filesFactor = taosStr2Float(((SToken*)pVal)->z, NULL); + ((STableOptions*)pOptions)->filesFactor = taosStr2Double(((SToken*)pVal)->z, NULL); break; case TABLE_OPTION_ROLLUP: ((STableOptions*)pOptions)->pRollupFuncs = pVal; @@ -1266,28 +1261,22 @@ SNode* createDropComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, cons return (SNode*)pStmt; } -SNode* createTopicOptions(SAstCreateContext* pCxt) { - CHECK_PARSER_STATUS(pCxt); - STopicOptions* pOptions = nodesMakeNode(QUERY_NODE_TOPIC_OPTIONS); - CHECK_OUT_OF_MEM(pOptions); - pOptions->withTable = false; - pOptions->withSchema = false; - pOptions->withTag = false; - return (SNode*)pOptions; -} - SNode* createCreateTopicStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pTopicName, SNode* pQuery, - const SToken* pSubscribeDbName, SNode* pOptions) { + const SToken* pSubDbName, SNode* pRealTable) { CHECK_PARSER_STATUS(pCxt); SCreateTopicStmt* pStmt = nodesMakeNode(QUERY_NODE_CREATE_TOPIC_STMT); CHECK_OUT_OF_MEM(pStmt); strncpy(pStmt->topicName, pTopicName->z, pTopicName->n); pStmt->ignoreExists = ignoreExists; - pStmt->pQuery = pQuery; - if (NULL != pSubscribeDbName) { - strncpy(pStmt->subscribeDbName, pSubscribeDbName->z, pSubscribeDbName->n); + if (NULL != pRealTable) { + strcpy(pStmt->subDbName, ((SRealTableNode*)pRealTable)->table.dbName); + strcpy(pStmt->subSTbName, ((SRealTableNode*)pRealTable)->table.tableName); + nodesDestroyNode(pRealTable); + } else if (NULL != pSubDbName) { + strncpy(pStmt->subDbName, pSubDbName->z, pSubDbName->n); + } else { + pStmt->pQuery = pQuery; } - pStmt->pOptions = (STopicOptions*)pOptions; return (SNode*)pStmt; } diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index 5d65a0b80bebc98a02e21458ae558661c4e5439b..68c9684c97ac8eba986a339b7618e51bc02d7d79 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -333,68 +333,22 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { return collectMetaKeyFromSetOperator(pCxt, (SSetOperator*)pStmt); case QUERY_NODE_SELECT_STMT: return collectMetaKeyFromSelect(pCxt, (SSelectStmt*)pStmt); - case QUERY_NODE_VNODE_MODIF_STMT: - case QUERY_NODE_CREATE_DATABASE_STMT: - case QUERY_NODE_DROP_DATABASE_STMT: - case QUERY_NODE_ALTER_DATABASE_STMT: - break; case QUERY_NODE_CREATE_TABLE_STMT: return collectMetaKeyFromCreateTable(pCxt, (SCreateTableStmt*)pStmt); - case QUERY_NODE_CREATE_SUBTABLE_CLAUSE: - break; case QUERY_NODE_CREATE_MULTI_TABLE_STMT: return collectMetaKeyFromCreateMultiTable(pCxt, (SCreateMultiTableStmt*)pStmt); - case QUERY_NODE_DROP_TABLE_CLAUSE: - case QUERY_NODE_DROP_TABLE_STMT: - case QUERY_NODE_DROP_SUPER_TABLE_STMT: - break; case QUERY_NODE_ALTER_TABLE_STMT: return collectMetaKeyFromAlterTable(pCxt, (SAlterTableStmt*)pStmt); - case QUERY_NODE_CREATE_USER_STMT: - case QUERY_NODE_ALTER_USER_STMT: - case QUERY_NODE_DROP_USER_STMT: - break; case QUERY_NODE_USE_DATABASE_STMT: return collectMetaKeyFromUseDatabase(pCxt, (SUseDatabaseStmt*)pStmt); - case QUERY_NODE_CREATE_DNODE_STMT: - case QUERY_NODE_DROP_DNODE_STMT: - case QUERY_NODE_ALTER_DNODE_STMT: - break; case QUERY_NODE_CREATE_INDEX_STMT: return collectMetaKeyFromCreateIndex(pCxt, (SCreateIndexStmt*)pStmt); - case QUERY_NODE_DROP_INDEX_STMT: - case QUERY_NODE_CREATE_QNODE_STMT: - case QUERY_NODE_DROP_QNODE_STMT: - case QUERY_NODE_CREATE_BNODE_STMT: - case QUERY_NODE_DROP_BNODE_STMT: - case QUERY_NODE_CREATE_SNODE_STMT: - case QUERY_NODE_DROP_SNODE_STMT: - case QUERY_NODE_CREATE_MNODE_STMT: - case QUERY_NODE_DROP_MNODE_STMT: - break; case QUERY_NODE_CREATE_TOPIC_STMT: return collectMetaKeyFromCreateTopic(pCxt, (SCreateTopicStmt*)pStmt); - case QUERY_NODE_DROP_TOPIC_STMT: - case QUERY_NODE_DROP_CGROUP_STMT: - case QUERY_NODE_ALTER_LOCAL_STMT: - break; case QUERY_NODE_EXPLAIN_STMT: return collectMetaKeyFromExplain(pCxt, (SExplainStmt*)pStmt); - case QUERY_NODE_DESCRIBE_STMT: - case QUERY_NODE_RESET_QUERY_CACHE_STMT: - case QUERY_NODE_COMPACT_STMT: - case QUERY_NODE_CREATE_FUNCTION_STMT: - case QUERY_NODE_DROP_FUNCTION_STMT: - break; case QUERY_NODE_CREATE_STREAM_STMT: return collectMetaKeyFromCreateStream(pCxt, (SCreateStreamStmt*)pStmt); - case QUERY_NODE_DROP_STREAM_STMT: - case QUERY_NODE_MERGE_VGROUP_STMT: - case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT: - case QUERY_NODE_SPLIT_VGROUP_STMT: - case QUERY_NODE_SYNCDB_STMT: - case QUERY_NODE_GRANT_STMT: - case QUERY_NODE_REVOKE_STMT: case QUERY_NODE_SHOW_DNODES_STMT: return collectMetaKeyFromShowDnodes(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_MNODES_STMT: @@ -407,8 +361,6 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { return collectMetaKeyFromShowSnodes(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_BNODES_STMT: return collectMetaKeyFromShowBnodes(pCxt, (SShowStmt*)pStmt); - case QUERY_NODE_SHOW_CLUSTER_STMT: - break; case QUERY_NODE_SHOW_DATABASES_STMT: return collectMetaKeyFromShowDatabases(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_FUNCTIONS_STMT: @@ -429,25 +381,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { return collectMetaKeyFromShowVgroups(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_TOPICS_STMT: return collectMetaKeyFromShowTopics(pCxt, (SShowStmt*)pStmt); - case QUERY_NODE_SHOW_CONSUMERS_STMT: - case QUERY_NODE_SHOW_SUBSCRIBES_STMT: - case QUERY_NODE_SHOW_SMAS_STMT: - case QUERY_NODE_SHOW_CONFIGS_STMT: - case QUERY_NODE_SHOW_CONNECTIONS_STMT: - case QUERY_NODE_SHOW_QUERIES_STMT: - case QUERY_NODE_SHOW_VNODES_STMT: - case QUERY_NODE_SHOW_APPS_STMT: - case QUERY_NODE_SHOW_SCORES_STMT: - case QUERY_NODE_SHOW_VARIABLE_STMT: - case QUERY_NODE_SHOW_CREATE_DATABASE_STMT: - case QUERY_NODE_SHOW_CREATE_TABLE_STMT: - case QUERY_NODE_SHOW_CREATE_STABLE_STMT: - break; case QUERY_NODE_SHOW_TRANSACTIONS_STMT: return collectMetaKeyFromShowTransactions(pCxt, (SShowStmt*)pStmt); - case QUERY_NODE_KILL_CONNECTION_STMT: - case QUERY_NODE_KILL_QUERY_STMT: - case QUERY_NODE_KILL_TRANSACTION_STMT: default: break; } diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index 58a6d1483f097e0db75a59c84d8e469d9deec4c1..047c2d15045f667d41319b6d7c14c475cd6273a1 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -64,6 +64,7 @@ typedef struct SInsertParseContext { int32_t totalNum; SVnodeModifOpStmt* pOutput; SStmtCallback* pStmtCb; + SParseMetaCache* pMetaCache; } SInsertParseContext; typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param); @@ -92,15 +93,15 @@ typedef struct SMemParam { } \ } while (0) -static int32_t skipInsertInto(SInsertParseContext* pCxt) { +static int32_t skipInsertInto(char** pSql, SMsgBuf* pMsg) { SToken sToken; - NEXT_TOKEN(pCxt->pSql, sToken); + NEXT_TOKEN(*pSql, sToken); if (TK_INSERT != sToken.type) { - return buildSyntaxErrMsg(&pCxt->msg, "keyword INSERT is expected", sToken.z); + return buildSyntaxErrMsg(pMsg, "keyword INSERT is expected", sToken.z); } - NEXT_TOKEN(pCxt->pSql, sToken); + NEXT_TOKEN(*pSql, sToken); if (TK_INTO != sToken.type) { - return buildSyntaxErrMsg(&pCxt->msg, "keyword INTO is expected", sToken.z); + return buildSyntaxErrMsg(pMsg, "keyword INTO is expected", sToken.z); } return TSDB_CODE_SUCCESS; } @@ -212,7 +213,7 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con return buildInvalidOperationMsg(pMsgBuf, msg4); } - char tbname[TSDB_TABLE_FNAME_LEN] = {0}; + char tbname[TSDB_TABLE_FNAME_LEN] = {0}; strncpy(tbname, p + 1, tbLen); /*tbLen = */ strdequote(tbname); @@ -250,25 +251,46 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con return code; } -static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) { +static int32_t checkAuth(SInsertParseContext* pCxt, char* pDbFname, bool* pPass) { + SParseContext* pBasicCtx = pCxt->pComCxt; + if (NULL != pCxt->pMetaCache) { + return getUserAuthFromCache(pCxt->pMetaCache, pBasicCtx->pUser, pDbFname, AUTH_TYPE_WRITE, pPass); + } + return catalogChkAuth(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pBasicCtx->pUser, pDbFname, + AUTH_TYPE_WRITE, pPass); +} + +static int32_t getTableSchema(SInsertParseContext* pCxt, SName* pTbName, bool isStb, STableMeta** pTableMeta) { + SParseContext* pBasicCtx = pCxt->pComCxt; + if (NULL != pCxt->pMetaCache) { + return getTableMetaFromCache(pCxt->pMetaCache, pTbName, pTableMeta); + } + if (isStb) { + return catalogGetSTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName, + pTableMeta); + } + return catalogGetTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName, pTableMeta); +} + +static int32_t getTableVgroup(SInsertParseContext* pCxt, SName* pTbName, SVgroupInfo* pVg) { SParseContext* pBasicCtx = pCxt->pComCxt; + if (NULL != pCxt->pMetaCache) { + return getTableVgroupFromCache(pCxt->pMetaCache, pTbName, pVg); + } + return catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName, pVg); +} +static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) { bool pass = false; - CHECK_CODE(catalogChkAuth(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pBasicCtx->pUser, - dbFname, AUTH_TYPE_WRITE, &pass)); + CHECK_CODE(checkAuth(pCxt, dbFname, &pass)); if (!pass) { return TSDB_CODE_PAR_PERMISSION_DENIED; } - if (isStb) { - CHECK_CODE(catalogGetSTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name, - &pCxt->pTableMeta)); - } else { - CHECK_CODE(catalogGetTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name, - &pCxt->pTableMeta)); - ASSERT(pCxt->pTableMeta->tableInfo.rowSize > 0); + + CHECK_CODE(getTableSchema(pCxt, name, isStb, &pCxt->pTableMeta)); + if (!isStb) { SVgroupInfo vg; - CHECK_CODE( - catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name, &vg)); + CHECK_CODE(getTableVgroup(pCxt, name, &vg)); CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); } return TSDB_CODE_SUCCESS; @@ -777,7 +799,7 @@ static int32_t KvRowAppend(SMsgBuf* pMsgBuf, const void* value, int32_t len, voi if (errno == E2BIG) { return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pa->schema->name); } - + char buf[512] = {0}; snprintf(buf, tListLen(buf), " taosMbsToUcs4 error:%s", strerror(errno)); return buildSyntaxErrMsg(pMsgBuf, buf, value); @@ -857,10 +879,8 @@ static int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) { static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, SName* pTableName, const char* pName, int32_t len, STableMeta* pMeta) { - SVgroupInfo vg; - SParseContext* pBasicCtx = pCxt->pComCxt; - CHECK_CODE( - catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTableName, &vg)); + SVgroupInfo vg; + CHECK_CODE(getTableVgroup(pCxt, pTableName, &vg)); CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); pMeta->uid = 0; @@ -1082,9 +1102,9 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) { // VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path // [...]; static int32_t parseInsertBody(SInsertParseContext* pCxt) { - int32_t tbNum = 0; - char tbFName[TSDB_TABLE_FNAME_LEN]; - bool autoCreateTbl = false; + int32_t tbNum = 0; + char tbFName[TSDB_TABLE_FNAME_LEN]; + bool autoCreateTbl = false; // for each table while (1) { @@ -1186,8 +1206,8 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } memcpy(tags, &pCxt->tags, sizeof(pCxt->tags)); - (*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pCxt->pTableMeta, tags, tbFName, autoCreateTbl, pCxt->pVgroupsHashObj, - pCxt->pTableBlockHashObj); + (*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pCxt->pTableMeta, tags, tbFName, autoCreateTbl, + pCxt->pVgroupsHashObj, pCxt->pTableBlockHashObj); memset(&pCxt->tags, 0, sizeof(pCxt->tags)); pCxt->pVgroupsHashObj = NULL; @@ -1245,12 +1265,11 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { if (NULL == *pQuery) { return TSDB_CODE_OUT_OF_MEMORY; } - - (*pQuery)->execMode = QUERY_EXEC_MODE_SCHEDULE; - (*pQuery)->haveResultSet = false; - (*pQuery)->msgType = TDMT_VND_SUBMIT; - (*pQuery)->pRoot = (SNode*)context.pOutput; } + (*pQuery)->execMode = QUERY_EXEC_MODE_SCHEDULE; + (*pQuery)->haveResultSet = false; + (*pQuery)->msgType = TDMT_VND_SUBMIT; + (*pQuery)->pRoot = (SNode*)context.pOutput; if (NULL == (*pQuery)->pTableList) { (*pQuery)->pTableList = taosArrayInit(taosHashGetSize(context.pTableNameHashObj), sizeof(SName)); @@ -1261,7 +1280,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { context.pOutput->payloadType = PAYLOAD_TYPE_KV; - int32_t code = skipInsertInto(&context); + int32_t code = skipInsertInto(&context.pSql, &context.msg); if (TSDB_CODE_SUCCESS == code) { code = parseInsertBody(&context); } @@ -1276,6 +1295,171 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { return code; } +typedef struct SInsertParseSyntaxCxt { + SParseContext* pComCxt; + char* pSql; + SMsgBuf msg; + SParseMetaCache* pMetaCache; +} SInsertParseSyntaxCxt; + +static int32_t skipParentheses(SInsertParseSyntaxCxt* pCxt) { + SToken sToken; + while (1) { + NEXT_TOKEN(pCxt->pSql, sToken); + if (TK_NK_RP == sToken.type) { + break; + } + if (0 == sToken.n) { + return buildSyntaxErrMsg(&pCxt->msg, ") expected", NULL); + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t skipBoundColumns(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); } + +// pSql -> (field1_value, ...) [(field1_value2, ...) ...] +static int32_t skipValuesClause(SInsertParseSyntaxCxt* pCxt) { + int32_t numOfRows = 0; + SToken sToken; + while (1) { + int32_t index = 0; + NEXT_TOKEN_KEEP_SQL(pCxt->pSql, sToken, index); + if (TK_NK_LP != sToken.type) { + break; + } + pCxt->pSql += index; + + CHECK_CODE(skipParentheses(pCxt)); + ++numOfRows; + } + if (0 == numOfRows) { + return buildSyntaxErrMsg(&pCxt->msg, "no any data points", NULL); + } + return TSDB_CODE_SUCCESS; +} + +static int32_t skipTagsClause(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); } + +// pSql -> [(tag1_name, ...)] TAGS (tag1_value, ...) +static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) { + SToken sToken; + NEXT_TOKEN(pCxt->pSql, sToken); + if (TK_NK_LP == sToken.type) { + CHECK_CODE(skipBoundColumns(pCxt)); + NEXT_TOKEN(pCxt->pSql, sToken); + } + + if (TK_TAGS != sToken.type) { + return buildSyntaxErrMsg(&pCxt->msg, "TAGS is expected", sToken.z); + } + // pSql -> (tag1_value, ...) + NEXT_TOKEN(pCxt->pSql, sToken); + if (TK_NK_LP != sToken.type) { + return buildSyntaxErrMsg(&pCxt->msg, "( is expected", sToken.z); + } + CHECK_CODE(skipTagsClause(pCxt)); + + return TSDB_CODE_SUCCESS; +} + +static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) { + SName name; + CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg)); + CHECK_CODE(reserveUserAuthInCacheExt(pCxt->pComCxt->pUser, &name, AUTH_TYPE_WRITE, pCxt->pMetaCache)); + CHECK_CODE(reserveTableMetaInCacheExt(&name, pCxt->pMetaCache)); + CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache)); + return TSDB_CODE_SUCCESS; +} + +static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) { + bool hasData = false; + // for each table + while (1) { + SToken sToken; + + // pSql -> tb_name ... + NEXT_TOKEN(pCxt->pSql, sToken); + + // no data in the sql string anymore. + if (sToken.n == 0) { + if (sToken.type && pCxt->pSql[0]) { + return buildSyntaxErrMsg(&pCxt->msg, "invalid charactor in SQL", sToken.z); + } + + if (!hasData) { + return buildInvalidOperationMsg(&pCxt->msg, "no data in sql"); + } + break; + } + + hasData = false; + + SToken tbnameToken = sToken; + NEXT_TOKEN(pCxt->pSql, sToken); + + // USING clause + if (TK_USING == sToken.type) { + NEXT_TOKEN(pCxt->pSql, sToken); + CHECK_CODE(collectTableMetaKey(pCxt, &sToken)); + CHECK_CODE(skipUsingClause(pCxt)); + NEXT_TOKEN(pCxt->pSql, sToken); + } else { + CHECK_CODE(collectTableMetaKey(pCxt, &tbnameToken)); + } + + if (TK_NK_LP == sToken.type) { + // pSql -> field1_name, ...) + CHECK_CODE(skipBoundColumns(pCxt)); + NEXT_TOKEN(pCxt->pSql, sToken); + } + + if (TK_VALUES == sToken.type) { + // pSql -> (field1_value, ...) [(field1_value2, ...) ...] + CHECK_CODE(skipValuesClause(pCxt)); + hasData = true; + continue; + } + + // FILE csv_file_path + if (TK_FILE == sToken.type) { + // pSql -> csv_file_path + NEXT_TOKEN(pCxt->pSql, sToken); + if (0 == sToken.n || (TK_NK_STRING != sToken.type && TK_NK_ID != sToken.type)) { + return buildSyntaxErrMsg(&pCxt->msg, "file path is required following keyword FILE", sToken.z); + } + hasData = true; + continue; + } + + return buildSyntaxErrMsg(&pCxt->msg, "keyword VALUES or FILE is expected", sToken.z); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t parseInsertSyntax(SParseContext* pContext, SQuery** pQuery) { + SInsertParseSyntaxCxt context = {.pComCxt = pContext, + .pSql = (char*)pContext->pSql, + .msg = {.buf = pContext->pMsg, .len = pContext->msgLen}, + .pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache))}; + if (NULL == context.pMetaCache) { + return TSDB_CODE_OUT_OF_MEMORY; + } + int32_t code = skipInsertInto(&context.pSql, &context.msg); + if (TSDB_CODE_SUCCESS == code) { + code = parseInsertBodySyntax(&context); + } + if (TSDB_CODE_SUCCESS == code) { + *pQuery = taosMemoryCalloc(1, sizeof(SQuery)); + if (NULL == *pQuery) { + return TSDB_CODE_OUT_OF_MEMORY; + } + TSWAP((*pQuery)->pMetaCache, context.pMetaCache); + } + return code; +} + int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf, int32_t msgBufLen) { SMsgBuf msg = {.buf = msgBuf, .len = msgBufLen}; diff --git a/source/libs/parser/src/parTokenizer.c b/source/libs/parser/src/parTokenizer.c index 540de2d639be9e69e798316e04bb4a46ff9dd58e..e9539073583c6d21a100efa5b33516eb9db18393 100644 --- a/source/libs/parser/src/parTokenizer.c +++ b/source/libs/parser/src/parTokenizer.c @@ -53,7 +53,6 @@ static SKeyword keywordTable[] = { {"CACHE", TK_CACHE}, {"CACHELAST", TK_CACHELAST}, {"CAST", TK_CAST}, - {"CGROUP", TK_CGROUP}, {"CLUSTER", TK_CLUSTER}, {"COLUMN", TK_COLUMN}, {"COMMENT", TK_COMMENT}, @@ -62,13 +61,13 @@ static SKeyword keywordTable[] = { {"CONNS", TK_CONNS}, {"CONNECTION", TK_CONNECTION}, {"CONNECTIONS", TK_CONNECTIONS}, + {"CONSUMER", TK_CONSUMER}, {"COUNT", TK_COUNT}, {"CREATE", TK_CREATE}, {"DATABASE", TK_DATABASE}, {"DATABASES", TK_DATABASES}, {"DAYS", TK_DAYS}, {"DBS", TK_DBS}, - {"DELAY", TK_DELAY}, {"DESC", TK_DESC}, {"DESCRIBE", TK_DESCRIBE}, {"DISTINCT", TK_DISTINCT}, @@ -156,7 +155,6 @@ static SKeyword keywordTable[] = { {"RETENTIONS", TK_RETENTIONS}, {"REVOKE", TK_REVOKE}, {"ROLLUP", TK_ROLLUP}, - {"SCHEMA", TK_SCHEMA}, {"SCHEMALESS", TK_SCHEMALESS}, {"SCORES", TK_SCORES}, {"SELECT", TK_SELECT}, @@ -214,7 +212,6 @@ static SKeyword keywordTable[] = { {"WATERMARK", TK_WATERMARK}, {"WHERE", TK_WHERE}, {"WINDOW_CLOSE", TK_WINDOW_CLOSE}, - {"WITH", TK_WITH}, {"WRITE", TK_WRITE}, {"_C0", TK_ROWTS}, {"_QENDTS", TK_QENDTS}, diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index d84b005f7f0cd8bd91a3f9bbd17e9a8e7fa81a78..66cc24e39cf88f4c69ce4a2ba7b88b4a1850fff3 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -465,20 +465,22 @@ static bool isPrimaryKey(STempTableNode* pTable, SNode* pExpr) { return isPrimaryKeyImpl(pTable, pExpr); } -static bool findAndSetColumn(SColumnNode** pColRef, const STableNode* pTable) { +static int32_t findAndSetColumn(STranslateContext* pCxt, SColumnNode** pColRef, const STableNode* pTable, + bool* pFound) { SColumnNode* pCol = *pColRef; - bool found = false; + *pFound = false; if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) { const STableMeta* pMeta = ((SRealTableNode*)pTable)->pMeta; if (isInternalPrimaryKey(pCol)) { setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema, false, pCol); - return true; + *pFound = true; + return TSDB_CODE_SUCCESS; } int32_t nums = pMeta->tableInfo.numOfTags + pMeta->tableInfo.numOfColumns; for (int32_t i = 0; i < nums; ++i) { if (0 == strcmp(pCol->colName, pMeta->schema[i].name)) { setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema + i, (i >= pMeta->tableInfo.numOfColumns), pCol); - found = true; + *pFound = true; break; } } @@ -489,13 +491,15 @@ static bool findAndSetColumn(SColumnNode** pColRef, const STableNode* pTable) { SExprNode* pExpr = (SExprNode*)pNode; if (0 == strcmp(pCol->colName, pExpr->aliasName) || (isPrimaryKey((STempTableNode*)pTable, pNode) && isInternalPrimaryKey(pCol))) { + if (*pFound) { + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, pCol->colName); + } setColumnInfoByExpr(pTable, pExpr, pColRef); - found = true; - break; + *pFound = true; } } } - return found; + return TSDB_CODE_SUCCESS; } static EDealRes translateColumnWithPrefix(STranslateContext* pCxt, SColumnNode** pCol) { @@ -506,7 +510,12 @@ static EDealRes translateColumnWithPrefix(STranslateContext* pCxt, SColumnNode** STableNode* pTable = taosArrayGetP(pTables, i); if (belongTable(pCxt->pParseCxt->db, (*pCol), pTable)) { foundTable = true; - if (findAndSetColumn(pCol, pTable)) { + bool foundCol = false; + pCxt->errCode = findAndSetColumn(pCxt, pCol, pTable, &foundCol); + if (TSDB_CODE_SUCCESS != pCxt->errCode) { + return DEAL_RES_ERROR; + } + if (foundCol) { break; } return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, (*pCol)->colName); @@ -525,14 +534,19 @@ static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNod bool isInternalPk = isInternalPrimaryKey(*pCol); for (size_t i = 0; i < nums; ++i) { STableNode* pTable = taosArrayGetP(pTables, i); - if (findAndSetColumn(pCol, pTable)) { + bool foundCol = false; + pCxt->errCode = findAndSetColumn(pCxt, pCol, pTable, &foundCol); + if (TSDB_CODE_SUCCESS != pCxt->errCode) { + return DEAL_RES_ERROR; + } + if (foundCol) { if (found) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, (*pCol)->colName); } found = true; - if (isInternalPk) { - break; - } + } + if (isInternalPk) { + break; } } if (!found) { @@ -752,18 +766,30 @@ static bool isMultiResFunc(SNode* pNode) { return (QUERY_NODE_COLUMN == nodeType(pParam) ? 0 == strcmp(((SColumnNode*)pParam)->colName, "*") : false); } -static EDealRes translateUnaryOperator(STranslateContext* pCxt, SOperatorNode* pOp) { +static int32_t rewriteNegativeOperator(SNode** pOp) { + SNode* pRes = NULL; + int32_t code = scalarCalculateConstants(*pOp, &pRes); + if (TSDB_CODE_SUCCESS == code) { + *pOp = pRes; + } + return code; +} + +static EDealRes translateUnaryOperator(STranslateContext* pCxt, SOperatorNode** pOpRef) { + SOperatorNode* pOp = *pOpRef; if (OP_TYPE_MINUS == pOp->opType) { if (!IS_MATHABLE_TYPE(((SExprNode*)(pOp->pLeft))->resType.type)) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName); } pOp->node.resType.type = TSDB_DATA_TYPE_DOUBLE; pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes; + + pCxt->errCode = rewriteNegativeOperator((SNode**)pOpRef); } else { pOp->node.resType.type = TSDB_DATA_TYPE_BOOL; pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes; } - return DEAL_RES_CONTINUE; + return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; } static EDealRes translateArithmeticOperator(STranslateContext* pCxt, SOperatorNode* pOp) { @@ -824,7 +850,9 @@ static EDealRes translateJsonOperator(STranslateContext* pCxt, SOperatorNode* pO return DEAL_RES_CONTINUE; } -static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) { +static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode** pOpRef) { + SOperatorNode* pOp = *pOpRef; + if (isMultiResFunc(pOp->pLeft)) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName); } @@ -833,7 +861,7 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) { } if (nodesIsUnaryOp(pOp)) { - return translateUnaryOperator(pCxt, pOp); + return translateUnaryOperator(pCxt, pOpRef); } else if (nodesIsArithmeticOp(pOp)) { return translateArithmeticOperator(pCxt, pOp); } else if (nodesIsComparisonOp(pOp)) { @@ -992,7 +1020,7 @@ static EDealRes doTranslateExpr(SNode** pNode, void* pContext) { case QUERY_NODE_VALUE: return translateValue(pCxt, (SValueNode*)*pNode); case QUERY_NODE_OPERATOR: - return translateOperator(pCxt, (SOperatorNode*)*pNode); + return translateOperator(pCxt, (SOperatorNode**)pNode); case QUERY_NODE_FUNCTION: return translateFunction(pCxt, (SFunctionNode*)*pNode); case QUERY_NODE_LOGIC_CONDITION: @@ -1891,9 +1919,9 @@ static int32_t translatePartitionBy(STranslateContext* pCxt, SNodeList* pPartiti return translateExprList(pCxt, pPartitionByList); } -static int32_t translateWhere(STranslateContext* pCxt, SNode* pWhere) { +static int32_t translateWhere(STranslateContext* pCxt, SNode** pWhere) { pCxt->currClause = SQL_CLAUSE_WHERE; - return translateExpr(pCxt, &pWhere); + return translateExpr(pCxt, pWhere); } static int32_t translateFrom(STranslateContext* pCxt, SSelectStmt* pSelect) { @@ -1925,7 +1953,9 @@ static int32_t createPrimaryKeyColByTable(STranslateContext* pCxt, STableNode* p } pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; strcpy(pCol->colName, PK_TS_COL_INTERNAL_NAME); - if (!findAndSetColumn(&pCol, pTable)) { + bool found = false; + int32_t code = findAndSetColumn(pCxt, &pCol, pTable, &found); + if (TSDB_CODE_SUCCESS != code || !found) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TIMELINE_FUNC); } *pPrimaryKey = (SNode*)pCol; @@ -1964,7 +1994,7 @@ static int32_t translateSelect(STranslateContext* pCxt, SSelectStmt* pSelect) { pCxt->pCurrStmt = pSelect; int32_t code = translateFrom(pCxt, pSelect); if (TSDB_CODE_SUCCESS == code) { - code = translateWhere(pCxt, pSelect->pWhere); + code = translateWhere(pCxt, &pSelect->pWhere); } if (TSDB_CODE_SUCCESS == code) { code = translatePartitionBy(pCxt, pSelect->pPartitionByList); @@ -2603,10 +2633,7 @@ static int32_t checkTableSchema(STranslateContext* pCxt, SCreateTableStmt* pStmt } static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) { - int32_t code = checkRangeOption(pCxt, "delay", pStmt->pOptions->delay, TSDB_MIN_ROLLUP_DELAY, TSDB_MAX_ROLLUP_DELAY); - if (TSDB_CODE_SUCCESS == code) { - code = checTableFactorOption(pCxt, pStmt->pOptions->filesFactor); - } + int32_t code = checTableFactorOption(pCxt, pStmt->pOptions->filesFactor); if (TSDB_CODE_SUCCESS == code) { code = checkTableRollupOption(pCxt, pStmt->pOptions->pRollupFuncs); } @@ -2847,7 +2874,6 @@ static int32_t buildRollupAst(STranslateContext* pCxt, SCreateTableStmt* pStmt, static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStmt, SMCreateStbReq* pReq) { pReq->igExists = pStmt->ignoreExists; pReq->xFilesFactor = pStmt->pOptions->filesFactor; - pReq->delay = pStmt->pOptions->delay; pReq->ttl = pStmt->pOptions->ttl; columnDefNodeToField(pStmt->pCols, &pReq->pColumns); columnDefNodeToField(pStmt->pTags, &pReq->pTags); @@ -3273,9 +3299,6 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->topicName, strlen(pStmt->topicName)); tNameGetFullDbName(&name, pReq->name); pReq->igExists = pStmt->ignoreExists; - pReq->withTbName = pStmt->pOptions->withTable; - pReq->withSchema = pStmt->pOptions->withSchema; - pReq->withTag = pStmt->pOptions->withTag; pReq->sql = strdup(pCxt->pParseCxt->pSql); if (NULL == pReq->sql) { @@ -3284,19 +3307,26 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS int32_t code = TSDB_CODE_SUCCESS; - const char* dbName; - if (NULL != pStmt->pQuery) { - dbName = ((SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable))->table.dbName; + if ('\0' != pStmt->subSTbName[0]) { + pReq->subType = TOPIC_SUB_TYPE__TABLE; + toName(pCxt->pParseCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name); + tNameGetFullDbName(&name, pReq->subDbName); + tNameExtractFullName(&name, pReq->subStbName); + } else if ('\0' != pStmt->subDbName[0]) { + pReq->subType = TOPIC_SUB_TYPE__DB; + tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->subDbName, strlen(pStmt->subDbName)); + tNameGetFullDbName(&name, pReq->subDbName); + } else { + pReq->subType = TOPIC_SUB_TYPE__COLUMN; + char* dbName = ((SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable))->table.dbName; + tNameSetDbName(&name, pCxt->pParseCxt->acctId, dbName, strlen(dbName)); + tNameGetFullDbName(&name, pReq->subDbName); pCxt->pParseCxt->topicQuery = true; code = translateQuery(pCxt, pStmt->pQuery); if (TSDB_CODE_SUCCESS == code) { code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL); } - } else { - dbName = pStmt->subscribeDbName; } - tNameSetDbName(&name, pCxt->pParseCxt->acctId, dbName, strlen(dbName)); - tNameGetFullDbName(&name, pReq->subscribeDbName); return code; } diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 34b01991545cdfdea46203b6edc73098e273fd39..9882440bbb632e2d989da9f8a5f2be880bb37eab 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -671,22 +671,32 @@ int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMet return code; } -static int32_t reserveTableReqInCache(int32_t acctId, const char* pDb, const char* pTable, SHashObj** pTables) { +static int32_t reserveTableReqInCacheImpl(const char* pTbFName, int32_t len, SHashObj** pTables) { if (NULL == *pTables) { *pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); if (NULL == *pTables) { return TSDB_CODE_OUT_OF_MEMORY; } } + return taosHashPut(*pTables, pTbFName, len, &pTables, POINTER_BYTES); +} + +static int32_t reserveTableReqInCache(int32_t acctId, const char* pDb, const char* pTable, SHashObj** pTables) { char fullName[TSDB_TABLE_FNAME_LEN]; int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s.%s", acctId, pDb, pTable); - return taosHashPut(*pTables, fullName, len, &pTables, POINTER_BYTES); + return reserveTableReqInCacheImpl(fullName, len, pTables); } int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) { return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableMeta); } +int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(pName, fullName); + return reserveTableReqInCacheImpl(fullName, strlen(fullName), &pMetaCache->pTableMeta); +} + int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta) { char fullName[TSDB_TABLE_FNAME_LEN]; tNameExtractFullName(pName, fullName); @@ -736,6 +746,12 @@ int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* p return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableVgroup); } +int32_t reserveTableVgroupInCacheExt(const SName* pName, SParseMetaCache* pMetaCache) { + char fullName[TSDB_TABLE_FNAME_LEN]; + tNameExtractFullName(pName, fullName); + return reserveTableReqInCacheImpl(fullName, strlen(fullName), &pMetaCache->pTableVgroup); +} + int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) { char fullName[TSDB_TABLE_FNAME_LEN]; tNameExtractFullName(pName, fullName); @@ -776,18 +792,30 @@ int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDb return TSDB_CODE_SUCCESS; } -int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, - SParseMetaCache* pMetaCache) { +static int32_t reserveUserAuthInCacheImpl(const char* pKey, int32_t len, SParseMetaCache* pMetaCache) { if (NULL == pMetaCache->pUserAuth) { pMetaCache->pUserAuth = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); if (NULL == pMetaCache->pUserAuth) { return TSDB_CODE_OUT_OF_MEMORY; } } + bool pass = false; + return taosHashPut(pMetaCache->pUserAuth, pKey, len, &pass, sizeof(pass)); +} + +int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, + SParseMetaCache* pMetaCache) { char key[USER_AUTH_KEY_MAX_LEN] = {0}; int32_t len = userAuthToString(acctId, pUser, pDb, type, key); - bool pass = false; - return taosHashPut(pMetaCache->pUserAuth, key, len, &pass, sizeof(pass)); + return reserveUserAuthInCacheImpl(key, len, pMetaCache); +} + +int32_t reserveUserAuthInCacheExt(const char* pUser, const SName* pName, AUTH_TYPE type, SParseMetaCache* pMetaCache) { + char dbFName[TSDB_DB_FNAME_LEN] = {0}; + tNameGetFullDbName(pName, dbFName); + char key[USER_AUTH_KEY_MAX_LEN] = {0}; + int32_t len = userAuthToStringExt(pUser, dbFName, type, key); + return reserveUserAuthInCacheImpl(key, len, pMetaCache); } int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type, diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c index deef477b5da5fe45748d030f28c67e9403cfb998..bb70458f983832533fe8fa18ab58b58ca38558a6 100644 --- a/source/libs/parser/src/parser.c +++ b/source/libs/parser/src/parser.c @@ -34,7 +34,7 @@ bool qIsInsertSql(const char* pStr, size_t length) { } while (1); } -static int32_t semanticAnalysis(SParseContext* pCxt, SQuery* pQuery) { +static int32_t analyseSemantic(SParseContext* pCxt, SQuery* pQuery) { int32_t code = authenticate(pCxt, pQuery); if (TSDB_CODE_SUCCESS == code && pQuery->placeholderNum > 0) { @@ -54,12 +54,12 @@ static int32_t semanticAnalysis(SParseContext* pCxt, SQuery* pQuery) { static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) { int32_t code = parse(pCxt, pQuery); if (TSDB_CODE_SUCCESS == code) { - code = semanticAnalysis(pCxt, *pQuery); + code = analyseSemantic(pCxt, *pQuery); } return code; } -static int32_t syntaxParseSql(SParseContext* pCxt, SQuery** pQuery) { +static int32_t parseSqlSyntax(SParseContext* pCxt, SQuery** pQuery) { int32_t code = parse(pCxt, pQuery); if (TSDB_CODE_SUCCESS == code) { code = collectMetaKey(pCxt, *pQuery); @@ -106,6 +106,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) { } varDataSetLen(pVal->datum.p, pVal->node.resType.bytes); strncpy(varDataVal(pVal->datum.p), (const char*)pParam->buffer, pVal->node.resType.bytes); + pVal->node.resType.bytes += VARSTR_HEADER_SIZE; break; case TSDB_DATA_TYPE_NCHAR: { pVal->node.resType.bytes *= TSDB_NCHAR_SIZE; @@ -120,7 +121,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) { return errno; } varDataSetLen(pVal->datum.p, output); - pVal->node.resType.bytes = output; + pVal->node.resType.bytes = output + VARSTR_HEADER_SIZE; break; } case TSDB_DATA_TYPE_TIMESTAMP: @@ -191,12 +192,12 @@ int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery) { return code; } -int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq) { +int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq) { int32_t code = TSDB_CODE_SUCCESS; if (qIsInsertSql(pCxt->pSql, pCxt->sqlLen)) { - // todo insert sql + code = parseInsertSyntax(pCxt, pQuery); } else { - code = syntaxParseSql(pCxt, pQuery); + code = parseSqlSyntax(pCxt, pQuery); } if (TSDB_CODE_SUCCESS == code) { code = buildCatalogReq((*pQuery)->pMetaCache, pCatalogReq); @@ -205,13 +206,13 @@ int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq return code; } -int32_t qSemanticAnalysisSql(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq, - const struct SMetaData* pMetaData, SQuery* pQuery) { +int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq, + const struct SMetaData* pMetaData, SQuery* pQuery) { int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, pQuery->pMetaCache); if (NULL == pQuery->pRoot) { - // todo insert sql + return parseInsertSql(pCxt, &pQuery); } - return semanticAnalysis(pCxt, pQuery); + return analyseSemantic(pCxt, pQuery); } void qDestroyQuery(SQuery* pQueryNode) { nodesDestroyNode(pQueryNode); } diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c index 262abac54bbd1c1ea9847c05507bb13fdedb0462..7fb89bdd7c820e2046d71287b331602172a2b315 100644 --- a/source/libs/parser/src/sql.c +++ b/source/libs/parser/src/sql.c @@ -100,25 +100,25 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 361 +#define YYNOCODE 357 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SToken typedef union { int yyinit; ParseTOKENTYPE yy0; - EFillMode yy18; - SAlterOption yy25; - SToken yy53; - EOperatorType yy136; - int32_t yy158; - ENullOrder yy185; - SNodeList* yy236; - EJoinType yy342; - EOrder yy430; - int64_t yy435; - SDataType yy450; - bool yy603; - SNode* yy636; + SAlterOption yy53; + ENullOrder yy109; + SToken yy113; + EJoinType yy120; + int64_t yy123; + bool yy131; + EOrder yy428; + SDataType yy490; + EFillMode yy522; + int32_t yy550; + EOperatorType yy632; + SNodeList* yy670; + SNode* yy686; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -134,17 +134,17 @@ typedef union { #define ParseCTX_FETCH #define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 611 -#define YYNRULE 455 -#define YYNTOKEN 240 -#define YY_MAX_SHIFT 610 -#define YY_MIN_SHIFTREDUCE 901 -#define YY_MAX_SHIFTREDUCE 1355 -#define YY_ERROR_ACTION 1356 -#define YY_ACCEPT_ACTION 1357 -#define YY_NO_ACTION 1358 -#define YY_MIN_REDUCE 1359 -#define YY_MAX_REDUCE 1813 +#define YYNSTATE 612 +#define YYNRULE 451 +#define YYNTOKEN 237 +#define YY_MAX_SHIFT 611 +#define YY_MIN_SHIFTREDUCE 898 +#define YY_MAX_SHIFTREDUCE 1348 +#define YY_ERROR_ACTION 1349 +#define YY_ACCEPT_ACTION 1350 +#define YY_NO_ACTION 1351 +#define YY_MIN_REDUCE 1352 +#define YY_MAX_REDUCE 1802 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -211,604 +211,598 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2153) +#define YY_ACTTAB_COUNT (2125) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 386, 1647, 387, 1391, 295, 394, 524, 387, 1391, 28, - /* 10 */ 226, 931, 35, 33, 130, 1676, 1371, 1660, 104, 1791, - /* 20 */ 304, 1644, 1169, 477, 523, 424, 36, 34, 32, 31, - /* 30 */ 30, 385, 1790, 62, 389, 1490, 1788, 1640, 1646, 36, - /* 40 */ 34, 32, 31, 30, 1535, 1676, 108, 1167, 527, 935, - /* 50 */ 936, 294, 1000, 508, 524, 1485, 1533, 154, 14, 476, - /* 60 */ 35, 33, 1296, 507, 1175, 24, 350, 1630, 304, 1002, - /* 70 */ 1169, 1418, 277, 488, 523, 36, 34, 32, 31, 30, - /* 80 */ 56, 1, 60, 1490, 1689, 59, 524, 80, 1661, 510, - /* 90 */ 1663, 1664, 506, 1359, 527, 1167, 1207, 1729, 104, 603, - /* 100 */ 602, 276, 1725, 607, 1258, 429, 14, 36, 34, 32, - /* 110 */ 31, 30, 1175, 1791, 1168, 1490, 140, 103, 102, 101, - /* 120 */ 100, 99, 98, 97, 96, 95, 147, 376, 1529, 2, - /* 130 */ 1788, 583, 582, 581, 319, 39, 580, 579, 578, 114, - /* 140 */ 573, 572, 571, 570, 569, 568, 567, 566, 121, 562, - /* 150 */ 511, 607, 1568, 307, 1259, 55, 1580, 55, 1170, 156, - /* 160 */ 94, 1791, 1168, 93, 92, 91, 90, 89, 88, 87, - /* 170 */ 86, 85, 158, 157, 146, 352, 1264, 1352, 1788, 393, - /* 180 */ 1173, 1174, 389, 1220, 1221, 1223, 1224, 1225, 1226, 1227, - /* 190 */ 503, 525, 1235, 1236, 1237, 1238, 1239, 1240, 1468, 36, - /* 200 */ 34, 32, 31, 30, 64, 292, 1170, 131, 191, 274, - /* 210 */ 148, 1447, 27, 302, 1253, 1254, 1255, 1256, 1257, 1261, - /* 220 */ 1262, 1263, 1421, 36, 34, 32, 31, 30, 1173, 1174, - /* 230 */ 484, 1220, 1221, 1223, 1224, 1225, 1226, 1227, 503, 525, - /* 240 */ 1235, 1236, 1237, 1238, 1239, 1240, 35, 33, 1467, 948, - /* 250 */ 70, 947, 438, 437, 304, 403, 1169, 436, 1351, 112, - /* 260 */ 109, 433, 308, 1791, 432, 431, 430, 35, 33, 1310, - /* 270 */ 128, 1483, 1660, 403, 523, 304, 1789, 1169, 949, 1492, - /* 280 */ 1788, 1167, 438, 437, 148, 1193, 148, 436, 62, 972, - /* 290 */ 109, 433, 14, 1207, 432, 431, 430, 110, 1175, 1360, - /* 300 */ 1676, 1303, 1167, 1382, 1660, 524, 973, 1193, 508, 524, - /* 310 */ 1486, 486, 142, 1736, 1737, 2, 1741, 351, 507, 1175, - /* 320 */ 94, 361, 1630, 93, 92, 91, 90, 89, 88, 87, - /* 330 */ 86, 85, 1676, 1381, 1490, 38, 8, 607, 1490, 1689, - /* 340 */ 487, 559, 82, 1661, 510, 1663, 1664, 506, 1168, 527, - /* 350 */ 507, 1191, 1729, 1630, 1630, 1535, 1728, 1725, 607, 128, - /* 360 */ 558, 557, 309, 556, 555, 554, 1380, 1533, 1493, 1168, - /* 370 */ 565, 1689, 1462, 1535, 81, 1661, 510, 1663, 1664, 506, - /* 380 */ 315, 527, 524, 1630, 1729, 1533, 1743, 26, 297, 1725, - /* 390 */ 141, 478, 1170, 54, 362, 435, 434, 36, 34, 32, - /* 400 */ 31, 30, 218, 36, 34, 32, 31, 30, 466, 1756, - /* 410 */ 1740, 1490, 55, 1170, 1173, 1174, 1630, 1220, 1221, 1223, - /* 420 */ 1224, 1225, 1226, 1227, 503, 525, 1235, 1236, 1237, 1238, - /* 430 */ 1239, 1240, 460, 577, 575, 1173, 1174, 1379, 1220, 1221, - /* 440 */ 1223, 1224, 1225, 1226, 1227, 503, 525, 1235, 1236, 1237, - /* 450 */ 1238, 1239, 1240, 35, 33, 1241, 1378, 443, 1195, 610, - /* 460 */ 316, 304, 1377, 1169, 148, 148, 249, 1571, 1573, 1520, - /* 470 */ 1246, 1222, 451, 243, 35, 33, 1193, 1481, 1376, 1660, - /* 480 */ 1647, 461, 304, 312, 1169, 105, 190, 1630, 1167, 524, - /* 490 */ 473, 599, 595, 591, 587, 242, 391, 1644, 446, 1357, - /* 500 */ 1644, 402, 1191, 440, 561, 1175, 1630, 1676, 189, 1167, - /* 510 */ 337, 484, 1630, 1640, 1646, 508, 1640, 1646, 1490, 484, - /* 520 */ 78, 1791, 9, 237, 527, 507, 1175, 527, 1630, 1630, - /* 530 */ 339, 335, 564, 51, 145, 488, 50, 127, 1788, 511, - /* 540 */ 112, 148, 576, 9, 607, 1581, 1689, 1194, 112, 80, - /* 550 */ 1661, 510, 1663, 1664, 506, 1168, 527, 520, 320, 1729, - /* 560 */ 1375, 479, 474, 276, 1725, 607, 36, 34, 32, 31, - /* 570 */ 30, 1648, 1130, 314, 1479, 1791, 1168, 428, 110, 553, - /* 580 */ 1132, 128, 465, 340, 217, 198, 110, 1465, 145, 55, - /* 590 */ 1492, 1644, 1788, 143, 1736, 1737, 77, 1741, 1791, 1170, - /* 600 */ 427, 144, 1736, 1737, 1146, 1741, 193, 1640, 1646, 113, - /* 610 */ 1630, 145, 277, 1572, 1573, 1788, 490, 1482, 527, 1295, - /* 620 */ 1170, 1173, 1174, 1374, 1220, 1221, 1223, 1224, 1225, 1226, - /* 630 */ 1227, 503, 525, 1235, 1236, 1237, 1238, 1239, 1240, 286, - /* 640 */ 1222, 1131, 1173, 1174, 1258, 1220, 1221, 1223, 1224, 1225, - /* 650 */ 1226, 1227, 503, 525, 1235, 1236, 1237, 1238, 1239, 1240, - /* 660 */ 35, 33, 273, 559, 1191, 345, 1320, 344, 304, 524, - /* 670 */ 1169, 369, 524, 1630, 381, 32, 31, 30, 1748, 1291, - /* 680 */ 559, 1487, 558, 557, 1610, 556, 555, 554, 287, 7, - /* 690 */ 285, 284, 382, 426, 1259, 1167, 947, 428, 1490, 558, - /* 700 */ 557, 1490, 556, 555, 554, 470, 1318, 1319, 1321, 1322, - /* 710 */ 1535, 317, 1175, 11, 10, 1373, 1264, 1743, 148, 128, - /* 720 */ 427, 422, 1534, 935, 936, 1743, 1154, 1155, 1492, 2, - /* 730 */ 1038, 550, 549, 548, 1042, 547, 1044, 1045, 546, 1047, - /* 740 */ 543, 1739, 1053, 540, 1055, 1056, 537, 534, 346, 1738, - /* 750 */ 1370, 607, 27, 302, 1253, 1254, 1255, 1256, 1257, 1261, - /* 760 */ 1262, 1263, 1168, 380, 1466, 1630, 375, 374, 373, 372, - /* 770 */ 371, 368, 367, 366, 365, 364, 360, 359, 358, 357, - /* 780 */ 356, 355, 354, 353, 524, 524, 129, 524, 1791, 1196, - /* 790 */ 492, 255, 1192, 1193, 1272, 1260, 521, 522, 1475, 239, - /* 800 */ 1630, 145, 1369, 253, 53, 1788, 1170, 52, 1368, 1367, - /* 810 */ 1366, 452, 1365, 1490, 1490, 1364, 1490, 1265, 524, 1363, - /* 820 */ 1660, 561, 1362, 47, 159, 275, 1294, 1477, 1173, 1174, - /* 830 */ 318, 1220, 1221, 1223, 1224, 1225, 1226, 1227, 503, 525, - /* 840 */ 1235, 1236, 1237, 1238, 1239, 1240, 495, 1490, 1676, 55, - /* 850 */ 1473, 1791, 1630, 25, 1619, 194, 487, 1408, 1630, 1630, - /* 860 */ 1630, 1291, 1630, 1403, 145, 1630, 507, 1401, 1788, 1630, - /* 870 */ 1630, 182, 1630, 184, 180, 186, 183, 188, 185, 439, - /* 880 */ 187, 1660, 500, 450, 502, 441, 79, 1689, 76, 444, - /* 890 */ 81, 1661, 510, 1663, 1664, 506, 448, 527, 72, 327, - /* 900 */ 1729, 11, 10, 552, 297, 1725, 141, 1372, 459, 1676, - /* 910 */ 1354, 1355, 1650, 1448, 1660, 202, 1178, 508, 58, 57, - /* 920 */ 349, 118, 46, 153, 471, 1757, 1177, 507, 343, 205, - /* 930 */ 221, 1630, 37, 37, 37, 453, 212, 1677, 1392, 228, - /* 940 */ 272, 421, 1676, 333, 1530, 329, 325, 150, 1689, 1652, - /* 950 */ 508, 81, 1661, 510, 1663, 1664, 506, 1222, 527, 1759, - /* 960 */ 507, 1729, 462, 1317, 1630, 297, 1725, 1804, 1191, 116, - /* 970 */ 207, 117, 485, 1266, 1228, 1124, 1763, 493, 148, 1660, - /* 980 */ 230, 1689, 220, 1181, 81, 1661, 510, 1663, 1664, 506, - /* 990 */ 223, 527, 118, 1180, 1729, 1660, 46, 532, 297, 1725, - /* 1000 */ 1804, 322, 117, 225, 1250, 3, 118, 1676, 326, 1786, - /* 1010 */ 516, 282, 236, 1000, 283, 508, 119, 117, 244, 155, - /* 1020 */ 1138, 363, 370, 1676, 1570, 507, 378, 1660, 377, 1630, - /* 1030 */ 379, 508, 383, 1031, 1197, 496, 384, 248, 1059, 392, - /* 1040 */ 1200, 507, 395, 1063, 162, 1630, 1689, 1070, 396, 82, - /* 1050 */ 1661, 510, 1663, 1664, 506, 1676, 527, 1068, 120, 1729, - /* 1060 */ 1199, 164, 1689, 508, 1726, 81, 1661, 510, 1663, 1664, - /* 1070 */ 506, 1201, 527, 507, 397, 1729, 398, 1630, 1660, 297, - /* 1080 */ 1725, 1804, 167, 488, 399, 169, 1198, 400, 401, 172, - /* 1090 */ 1747, 61, 404, 1660, 1689, 175, 423, 262, 1661, 510, - /* 1100 */ 1663, 1664, 506, 425, 527, 84, 1676, 1175, 1480, 179, - /* 1110 */ 1476, 291, 181, 1614, 508, 122, 123, 1478, 1474, 124, - /* 1120 */ 125, 1676, 245, 1791, 507, 192, 455, 195, 1630, 508, - /* 1130 */ 246, 197, 454, 464, 488, 463, 147, 200, 1196, 507, - /* 1140 */ 1788, 458, 472, 1630, 1660, 1689, 1770, 203, 262, 1661, - /* 1150 */ 510, 1663, 1664, 506, 514, 527, 6, 1750, 469, 1769, - /* 1160 */ 1689, 211, 481, 82, 1661, 510, 1663, 1664, 506, 206, - /* 1170 */ 527, 1760, 1676, 1729, 1791, 296, 475, 499, 1725, 1195, - /* 1180 */ 505, 468, 5, 1291, 111, 40, 497, 145, 1744, 1807, - /* 1190 */ 507, 1788, 298, 18, 1630, 512, 1660, 513, 494, 306, - /* 1200 */ 311, 310, 1579, 135, 1578, 1660, 214, 517, 518, 519, - /* 1210 */ 1183, 1689, 213, 1787, 270, 1661, 510, 1663, 1664, 506, - /* 1220 */ 504, 527, 501, 1701, 1676, 219, 232, 71, 491, 1710, - /* 1230 */ 234, 247, 508, 1676, 69, 1176, 250, 1491, 241, 222, - /* 1240 */ 606, 508, 507, 1463, 498, 48, 1630, 530, 224, 256, - /* 1250 */ 134, 507, 1175, 1660, 263, 1630, 257, 293, 467, 252, - /* 1260 */ 254, 1624, 1623, 1689, 321, 1620, 132, 1661, 510, 1663, - /* 1270 */ 1664, 506, 1689, 527, 323, 271, 1661, 510, 1663, 1664, - /* 1280 */ 506, 1676, 527, 324, 1163, 1660, 1164, 151, 1618, 508, - /* 1290 */ 328, 528, 330, 331, 1617, 332, 334, 1616, 336, 507, - /* 1300 */ 1615, 338, 1179, 1630, 1600, 152, 341, 1141, 342, 1140, - /* 1310 */ 489, 1805, 1594, 1676, 1593, 347, 348, 1660, 1592, 1591, - /* 1320 */ 1689, 508, 1107, 266, 1661, 510, 1663, 1664, 506, 1563, - /* 1330 */ 527, 507, 1562, 1561, 1560, 1630, 1559, 1558, 1557, 1556, - /* 1340 */ 1555, 1554, 1553, 1552, 1551, 1676, 1184, 1550, 1549, 1548, - /* 1350 */ 1547, 1546, 1689, 508, 1545, 132, 1661, 510, 1663, 1664, - /* 1360 */ 506, 480, 527, 507, 115, 1660, 1544, 1630, 1187, 1543, - /* 1370 */ 301, 1542, 1541, 1540, 1109, 1539, 1538, 1537, 1660, 525, - /* 1380 */ 1235, 1236, 1536, 1420, 1689, 1388, 160, 271, 1661, 510, - /* 1390 */ 1663, 1664, 506, 1676, 527, 938, 106, 138, 937, 388, - /* 1400 */ 1806, 505, 1387, 161, 390, 107, 1676, 1608, 1602, 1586, - /* 1410 */ 1585, 507, 1576, 1469, 508, 1630, 166, 171, 1660, 1419, - /* 1420 */ 966, 1417, 1415, 407, 507, 405, 1413, 411, 1630, 415, - /* 1430 */ 1411, 303, 1689, 419, 406, 270, 1661, 510, 1663, 1664, - /* 1440 */ 506, 409, 527, 410, 1702, 1689, 1676, 413, 271, 1661, - /* 1450 */ 510, 1663, 1664, 506, 508, 527, 414, 1400, 177, 1399, - /* 1460 */ 418, 417, 1386, 1471, 507, 1074, 1660, 1470, 1630, 1073, - /* 1470 */ 139, 305, 574, 576, 999, 1169, 420, 416, 412, 408, - /* 1480 */ 176, 45, 998, 178, 997, 1689, 996, 993, 271, 1661, - /* 1490 */ 510, 1663, 1664, 506, 1676, 527, 992, 991, 1409, 288, - /* 1500 */ 1167, 1404, 508, 289, 442, 63, 1402, 290, 174, 1385, - /* 1510 */ 447, 445, 507, 1384, 449, 83, 1630, 1175, 1607, 1148, - /* 1520 */ 49, 1601, 456, 1660, 1584, 126, 1583, 1575, 199, 65, - /* 1530 */ 196, 4, 133, 1689, 201, 37, 258, 1661, 510, 1663, - /* 1540 */ 1664, 506, 204, 527, 15, 457, 43, 1316, 1309, 208, - /* 1550 */ 22, 1676, 209, 23, 210, 66, 607, 1288, 1650, 508, - /* 1560 */ 1287, 216, 1345, 42, 136, 41, 173, 1168, 165, 507, - /* 1570 */ 170, 1660, 168, 1630, 17, 1340, 1339, 16, 13, 1334, - /* 1580 */ 10, 299, 1344, 1343, 300, 1251, 19, 137, 149, 1230, - /* 1590 */ 1689, 163, 1215, 265, 1661, 510, 1663, 1664, 506, 1676, - /* 1600 */ 527, 1660, 509, 1574, 29, 515, 12, 508, 1649, 233, - /* 1610 */ 72, 1170, 1229, 20, 235, 1185, 531, 507, 238, 21, - /* 1620 */ 229, 1630, 227, 529, 1314, 964, 313, 231, 67, 1676, - /* 1630 */ 68, 1660, 1692, 1173, 1174, 1232, 526, 508, 1689, 44, - /* 1640 */ 533, 267, 1661, 510, 1663, 1664, 506, 507, 527, 1060, - /* 1650 */ 1057, 1630, 535, 538, 536, 541, 544, 1054, 539, 1676, - /* 1660 */ 1037, 1052, 1048, 542, 1069, 1046, 545, 508, 1689, 551, - /* 1670 */ 1051, 259, 1661, 510, 1663, 1664, 506, 507, 527, 1660, - /* 1680 */ 73, 1630, 74, 75, 1066, 1065, 1050, 560, 1660, 1049, - /* 1690 */ 988, 1006, 563, 240, 986, 985, 984, 983, 1689, 981, - /* 1700 */ 1067, 268, 1661, 510, 1663, 1664, 506, 1676, 527, 982, - /* 1710 */ 1003, 980, 979, 1001, 976, 508, 1676, 975, 974, 971, - /* 1720 */ 970, 969, 1416, 584, 508, 507, 585, 586, 1414, 1630, - /* 1730 */ 588, 589, 590, 1412, 507, 592, 1660, 593, 1630, 594, - /* 1740 */ 1410, 596, 597, 598, 1398, 600, 1689, 601, 1397, 260, - /* 1750 */ 1661, 510, 1663, 1664, 506, 1689, 527, 1383, 269, 1661, - /* 1760 */ 510, 1663, 1664, 506, 1676, 527, 609, 604, 605, 1358, - /* 1770 */ 1358, 1171, 508, 251, 608, 1358, 1358, 1358, 1358, 1358, - /* 1780 */ 1358, 1358, 507, 1358, 1660, 1358, 1630, 1358, 1358, 1358, - /* 1790 */ 1358, 1358, 1358, 1358, 1358, 1660, 1358, 1358, 1358, 1358, - /* 1800 */ 1358, 1358, 1358, 1689, 1358, 1358, 261, 1661, 510, 1663, - /* 1810 */ 1664, 506, 1676, 527, 1660, 1358, 1358, 1358, 1358, 1358, - /* 1820 */ 508, 1358, 1358, 1676, 1358, 1358, 1358, 1358, 1358, 1358, - /* 1830 */ 507, 508, 1358, 1358, 1630, 1358, 1358, 1358, 1358, 1358, - /* 1840 */ 1358, 507, 1676, 1358, 1358, 1630, 1358, 1358, 1358, 1358, - /* 1850 */ 508, 1689, 1358, 1358, 1672, 1661, 510, 1663, 1664, 506, - /* 1860 */ 507, 527, 1689, 1358, 1630, 1671, 1661, 510, 1663, 1664, - /* 1870 */ 506, 1358, 527, 1660, 1358, 1358, 1358, 1358, 1358, 1358, - /* 1880 */ 1358, 1689, 1660, 1358, 1670, 1661, 510, 1663, 1664, 506, - /* 1890 */ 1358, 527, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, - /* 1900 */ 1358, 1676, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 508, - /* 1910 */ 1676, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 508, 507, - /* 1920 */ 1358, 1358, 1358, 1630, 1358, 1358, 1358, 1358, 507, 1358, - /* 1930 */ 1358, 1358, 1630, 1358, 1358, 1358, 1358, 1358, 1358, 1358, - /* 1940 */ 1689, 1358, 1660, 280, 1661, 510, 1663, 1664, 506, 1689, - /* 1950 */ 527, 1660, 279, 1661, 510, 1663, 1664, 506, 1358, 527, - /* 1960 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, - /* 1970 */ 1676, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 508, 1676, - /* 1980 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 508, 507, 1358, - /* 1990 */ 1358, 1358, 1630, 1358, 1358, 1358, 1358, 507, 1358, 1660, - /* 2000 */ 1358, 1630, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1689, - /* 2010 */ 484, 1358, 281, 1661, 510, 1663, 1664, 506, 1689, 527, - /* 2020 */ 1358, 278, 1661, 510, 1663, 1664, 506, 1676, 527, 1358, - /* 2030 */ 1358, 1358, 1358, 1358, 1358, 508, 1358, 1358, 1358, 112, - /* 2040 */ 1358, 1358, 1358, 1358, 1358, 507, 484, 1358, 1358, 1630, - /* 2050 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 488, - /* 2060 */ 1358, 1358, 1358, 1358, 1358, 1358, 1689, 1358, 1358, 264, - /* 2070 */ 1661, 510, 1663, 1664, 506, 112, 527, 110, 1358, 1358, - /* 2080 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, - /* 2090 */ 1358, 1358, 215, 1736, 483, 488, 482, 1358, 1358, 1791, - /* 2100 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, - /* 2110 */ 1358, 1358, 147, 110, 1358, 1358, 1788, 1358, 1358, 1358, - /* 2120 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 215, 1736, - /* 2130 */ 483, 1358, 482, 1358, 1358, 1791, 1358, 1358, 1358, 1358, - /* 2140 */ 1358, 1358, 1358, 1358, 1358, 1358, 1358, 1358, 145, 1358, - /* 2150 */ 1358, 1358, 1788, + /* 0 */ 132, 1780, 345, 1636, 1440, 1636, 294, 385, 311, 386, + /* 10 */ 1384, 78, 35, 33, 1779, 1472, 24, 1649, 1777, 131, + /* 20 */ 303, 1364, 1162, 1633, 114, 1633, 36, 34, 32, 31, + /* 30 */ 30, 1780, 1475, 36, 34, 32, 31, 30, 1629, 1635, + /* 40 */ 1629, 1635, 1780, 525, 147, 1665, 928, 1160, 1777, 529, + /* 50 */ 525, 529, 1350, 489, 393, 146, 386, 1384, 14, 1777, + /* 60 */ 35, 33, 1289, 509, 1168, 56, 384, 1619, 303, 388, + /* 70 */ 1162, 36, 34, 32, 31, 30, 36, 34, 32, 31, + /* 80 */ 30, 1, 77, 1678, 932, 933, 82, 1650, 512, 1652, + /* 90 */ 1653, 508, 73, 529, 1375, 1160, 1718, 1414, 1780, 1296, + /* 100 */ 296, 1714, 142, 608, 39, 1186, 14, 1353, 35, 33, + /* 110 */ 319, 1778, 1168, 1161, 220, 1777, 303, 277, 1162, 462, + /* 120 */ 468, 1745, 36, 34, 32, 31, 30, 71, 95, 2, + /* 130 */ 1374, 94, 93, 92, 91, 90, 89, 88, 87, 86, + /* 140 */ 525, 1200, 55, 1160, 1619, 315, 1303, 307, 1476, 1251, + /* 150 */ 1780, 608, 1563, 1565, 14, 129, 1163, 438, 437, 1780, + /* 160 */ 1168, 1161, 436, 146, 1485, 110, 433, 1777, 277, 432, + /* 170 */ 431, 430, 146, 945, 497, 944, 1777, 2, 1166, 1167, + /* 180 */ 1619, 1213, 1214, 1216, 1217, 1218, 1219, 1220, 505, 527, + /* 190 */ 1228, 1229, 1230, 1231, 1232, 1233, 286, 1239, 1252, 608, + /* 200 */ 1251, 38, 946, 1186, 1163, 55, 62, 95, 149, 1161, + /* 210 */ 94, 93, 92, 91, 90, 89, 88, 87, 86, 1257, + /* 220 */ 1732, 36, 34, 32, 31, 30, 1166, 1167, 1479, 1213, + /* 230 */ 1214, 1216, 1217, 1218, 1219, 1220, 505, 527, 1228, 1229, + /* 240 */ 1230, 1231, 1232, 1233, 1729, 287, 1373, 285, 284, 1252, + /* 250 */ 426, 403, 1163, 1372, 428, 27, 301, 1246, 1247, 1248, + /* 260 */ 1249, 1250, 1254, 1255, 1256, 513, 1188, 1215, 306, 149, + /* 270 */ 1257, 1572, 28, 228, 1166, 1167, 427, 1213, 1214, 1216, + /* 280 */ 1217, 1218, 1219, 1220, 505, 527, 1228, 1229, 1230, 1231, + /* 290 */ 1232, 1233, 35, 33, 1352, 1313, 1619, 64, 292, 1461, + /* 300 */ 303, 192, 1162, 1619, 526, 351, 27, 301, 1246, 1247, + /* 310 */ 1248, 1249, 1250, 1254, 1255, 1256, 349, 1189, 104, 103, + /* 320 */ 102, 101, 100, 99, 98, 97, 96, 1160, 149, 452, + /* 330 */ 560, 1649, 149, 1483, 472, 1311, 1312, 1314, 1315, 275, + /* 340 */ 35, 33, 1234, 1162, 1168, 486, 313, 1665, 303, 559, + /* 350 */ 1162, 558, 557, 556, 129, 479, 403, 1345, 526, 1665, + /* 360 */ 498, 8, 1371, 1485, 1560, 1215, 526, 489, 1160, 1780, + /* 370 */ 350, 157, 1527, 392, 113, 1160, 388, 509, 360, 293, + /* 380 */ 1186, 1619, 146, 608, 1525, 1168, 1777, 1483, 35, 33, + /* 390 */ 478, 219, 1168, 1161, 1370, 1483, 303, 1678, 1162, 1459, + /* 400 */ 82, 1650, 512, 1652, 1653, 508, 55, 529, 26, 9, + /* 410 */ 1718, 111, 1619, 1288, 296, 1714, 142, 141, 36, 34, + /* 420 */ 32, 31, 30, 1160, 608, 488, 143, 1725, 1726, 1521, + /* 430 */ 1730, 608, 62, 1369, 1161, 1746, 1163, 1344, 438, 437, + /* 440 */ 1168, 1161, 204, 436, 1619, 109, 110, 433, 11, 10, + /* 450 */ 432, 431, 430, 480, 1478, 1368, 562, 9, 1166, 1167, + /* 460 */ 475, 1213, 1214, 1216, 1217, 1218, 1219, 1220, 505, 527, + /* 470 */ 1228, 1229, 1230, 1231, 1232, 1233, 1187, 1163, 316, 608, + /* 480 */ 344, 336, 343, 1619, 1163, 1460, 129, 997, 149, 1161, + /* 490 */ 36, 34, 32, 31, 30, 1485, 1527, 604, 603, 1166, + /* 500 */ 1167, 338, 334, 308, 999, 1619, 1166, 1167, 1525, 1213, + /* 510 */ 1214, 1216, 1217, 1218, 1219, 1220, 505, 527, 1228, 1229, + /* 520 */ 1230, 1231, 1232, 1233, 36, 34, 32, 31, 30, 1265, + /* 530 */ 481, 476, 1163, 149, 7, 1035, 552, 551, 550, 1039, + /* 540 */ 549, 1041, 1042, 548, 1044, 545, 1367, 1050, 542, 1052, + /* 550 */ 1053, 539, 536, 1366, 1166, 1167, 1649, 1213, 1214, 1216, + /* 560 */ 1217, 1218, 1219, 1220, 505, 527, 1228, 1229, 1230, 1231, + /* 570 */ 1232, 1233, 35, 33, 274, 375, 1184, 1411, 560, 526, + /* 580 */ 303, 526, 1162, 368, 1665, 1732, 380, 250, 155, 390, + /* 590 */ 1513, 105, 507, 361, 1200, 1184, 1619, 559, 424, 558, + /* 600 */ 557, 556, 509, 1619, 381, 1527, 1619, 1160, 1483, 1728, + /* 610 */ 1483, 1363, 314, 60, 1253, 513, 59, 1525, 1287, 159, + /* 620 */ 158, 1573, 1678, 128, 1168, 270, 1650, 512, 1652, 1653, + /* 630 */ 508, 506, 529, 503, 1690, 1258, 486, 584, 583, 582, + /* 640 */ 318, 2, 581, 580, 579, 115, 574, 573, 572, 571, + /* 650 */ 570, 569, 568, 567, 122, 563, 1362, 32, 31, 30, + /* 660 */ 1186, 1619, 1361, 608, 1360, 113, 435, 434, 562, 578, + /* 670 */ 576, 25, 1359, 1161, 379, 1458, 1358, 374, 373, 372, + /* 680 */ 371, 370, 367, 366, 365, 364, 363, 359, 358, 357, + /* 690 */ 356, 355, 354, 353, 352, 486, 1564, 1565, 526, 932, + /* 700 */ 933, 1732, 111, 1527, 1284, 198, 1619, 54, 1357, 1356, + /* 710 */ 402, 526, 1619, 1355, 1619, 1526, 1163, 144, 1725, 1726, + /* 720 */ 1185, 1730, 1619, 105, 113, 1727, 1619, 1483, 129, 1649, + /* 730 */ 429, 55, 566, 65, 1455, 1365, 1474, 1486, 1166, 1167, + /* 740 */ 1483, 1213, 1214, 1216, 1217, 1218, 1219, 1220, 505, 527, + /* 750 */ 1228, 1229, 1230, 1231, 1232, 1233, 1633, 1665, 1619, 1619, + /* 760 */ 1649, 111, 1608, 1619, 428, 510, 969, 944, 560, 1737, + /* 770 */ 1284, 1629, 1635, 1147, 1148, 509, 145, 1725, 1726, 1619, + /* 780 */ 1730, 555, 529, 970, 490, 526, 427, 559, 1665, 558, + /* 790 */ 557, 556, 422, 502, 577, 1678, 510, 1480, 81, 1650, + /* 800 */ 512, 1652, 1653, 508, 494, 529, 509, 326, 1718, 1468, + /* 810 */ 1619, 526, 276, 1714, 1483, 490, 183, 185, 1637, 181, + /* 820 */ 184, 1649, 1215, 1599, 1780, 187, 1678, 1470, 186, 81, + /* 830 */ 1650, 512, 1652, 1653, 508, 339, 529, 148, 1633, 1718, + /* 840 */ 1483, 1777, 1639, 276, 1714, 130, 310, 309, 526, 1665, + /* 850 */ 256, 565, 450, 1629, 1635, 1780, 1176, 510, 149, 1466, + /* 860 */ 460, 195, 254, 53, 529, 448, 52, 509, 146, 504, + /* 870 */ 526, 1619, 1777, 443, 526, 189, 119, 1483, 188, 1641, + /* 880 */ 46, 1169, 523, 160, 1649, 207, 524, 1678, 451, 526, + /* 890 */ 82, 1650, 512, 1652, 1653, 508, 1171, 529, 1168, 1483, + /* 900 */ 1718, 241, 191, 1483, 296, 1714, 1793, 1401, 55, 1396, + /* 910 */ 1394, 526, 1665, 1243, 446, 1752, 554, 464, 1483, 440, + /* 920 */ 510, 1310, 1441, 317, 190, 37, 209, 492, 1170, 439, + /* 930 */ 509, 441, 444, 37, 1619, 1347, 1348, 530, 46, 223, + /* 940 */ 1483, 37, 11, 10, 80, 230, 117, 1172, 459, 51, + /* 950 */ 1678, 473, 50, 82, 1650, 512, 1652, 1653, 508, 453, + /* 960 */ 529, 214, 1174, 1718, 1666, 421, 1259, 296, 1714, 1793, + /* 970 */ 1649, 1385, 118, 119, 1221, 58, 57, 348, 1775, 249, + /* 980 */ 154, 1522, 1120, 222, 1748, 342, 232, 518, 495, 534, + /* 990 */ 1177, 118, 119, 487, 1173, 225, 1184, 273, 1665, 3, + /* 1000 */ 332, 1649, 328, 324, 151, 321, 510, 227, 325, 120, + /* 1010 */ 282, 997, 1180, 238, 1028, 1131, 509, 246, 118, 283, + /* 1020 */ 1619, 362, 1562, 527, 1228, 1229, 156, 369, 377, 1665, + /* 1030 */ 1056, 376, 1060, 1066, 378, 149, 1678, 510, 382, 82, + /* 1040 */ 1650, 512, 1652, 1653, 508, 1190, 529, 509, 383, 1718, + /* 1050 */ 1064, 1619, 391, 296, 1714, 1793, 490, 1193, 486, 121, + /* 1060 */ 394, 163, 1649, 395, 1736, 165, 1192, 1678, 1194, 397, + /* 1070 */ 261, 1650, 512, 1652, 1653, 508, 396, 529, 168, 399, + /* 1080 */ 170, 400, 1191, 401, 173, 61, 425, 113, 404, 176, + /* 1090 */ 1665, 1473, 423, 180, 1168, 291, 1780, 1469, 510, 85, + /* 1100 */ 247, 454, 1603, 455, 182, 193, 490, 458, 509, 148, + /* 1110 */ 123, 124, 1619, 1777, 1471, 1467, 125, 490, 196, 126, + /* 1120 */ 461, 199, 202, 1189, 111, 1649, 1759, 466, 1678, 474, + /* 1130 */ 516, 261, 1650, 512, 1652, 1653, 508, 1758, 529, 217, + /* 1140 */ 1725, 485, 465, 484, 6, 483, 1780, 471, 463, 205, + /* 1150 */ 208, 470, 295, 1665, 477, 213, 1649, 1780, 1739, 148, + /* 1160 */ 1284, 510, 5, 1777, 1749, 1188, 112, 1733, 40, 136, + /* 1170 */ 146, 509, 499, 496, 1777, 1619, 215, 18, 1571, 1570, + /* 1180 */ 1796, 514, 519, 297, 1665, 515, 305, 520, 234, 216, + /* 1190 */ 521, 1678, 510, 236, 83, 1650, 512, 1652, 1653, 508, + /* 1200 */ 1699, 529, 509, 248, 1718, 70, 1619, 72, 1717, 1714, + /* 1210 */ 1649, 1484, 251, 607, 532, 1456, 1776, 221, 47, 1649, + /* 1220 */ 135, 493, 1678, 243, 224, 83, 1650, 512, 1652, 1653, + /* 1230 */ 508, 500, 529, 226, 262, 1718, 272, 263, 1665, 501, + /* 1240 */ 1714, 253, 255, 1613, 1612, 320, 510, 1665, 1609, 322, + /* 1250 */ 323, 1156, 1157, 152, 327, 510, 509, 1607, 329, 330, + /* 1260 */ 1619, 331, 1606, 333, 1605, 509, 335, 1604, 337, 1619, + /* 1270 */ 1589, 153, 340, 341, 1134, 1133, 1678, 346, 347, 133, + /* 1280 */ 1650, 512, 1652, 1653, 508, 1678, 529, 1583, 83, 1650, + /* 1290 */ 512, 1652, 1653, 508, 1582, 529, 611, 1649, 1718, 1103, + /* 1300 */ 1555, 1554, 1553, 1715, 1581, 1580, 1552, 1551, 1649, 1550, + /* 1310 */ 245, 1549, 1548, 1547, 1546, 1545, 1544, 1543, 1542, 1541, + /* 1320 */ 1540, 1539, 106, 491, 1794, 1665, 1538, 1537, 600, 596, + /* 1330 */ 592, 588, 244, 510, 1536, 116, 1665, 1535, 1534, 1533, + /* 1340 */ 1532, 1531, 1530, 509, 510, 1105, 1529, 1619, 161, 935, + /* 1350 */ 469, 1528, 1413, 1381, 509, 1380, 1597, 79, 1619, 107, + /* 1360 */ 239, 934, 108, 1678, 1591, 139, 271, 1650, 512, 1652, + /* 1370 */ 1653, 508, 387, 529, 1678, 389, 1649, 266, 1650, 512, + /* 1380 */ 1652, 1653, 508, 162, 529, 1579, 167, 169, 1578, 1568, + /* 1390 */ 1462, 172, 963, 522, 1412, 1410, 407, 405, 1408, 411, + /* 1400 */ 1406, 1404, 406, 415, 1665, 409, 410, 413, 414, 419, + /* 1410 */ 418, 1393, 510, 1392, 417, 482, 1070, 179, 467, 1379, + /* 1420 */ 1464, 200, 509, 1463, 1069, 1649, 1619, 996, 995, 994, + /* 1430 */ 993, 990, 575, 45, 577, 1402, 1649, 288, 1397, 1139, + /* 1440 */ 289, 194, 1678, 989, 988, 133, 1650, 512, 1652, 1653, + /* 1450 */ 508, 442, 529, 1665, 1395, 290, 445, 1378, 447, 1377, + /* 1460 */ 1596, 510, 449, 84, 1665, 201, 456, 1577, 1141, 1590, + /* 1470 */ 1576, 509, 507, 1575, 1567, 1619, 212, 49, 300, 41, + /* 1480 */ 66, 457, 509, 4, 15, 134, 1619, 1649, 37, 48, + /* 1490 */ 1795, 1678, 206, 43, 271, 1650, 512, 1652, 1653, 508, + /* 1500 */ 1639, 529, 1678, 211, 1309, 270, 1650, 512, 1652, 1653, + /* 1510 */ 508, 210, 529, 197, 1691, 1665, 203, 10, 22, 23, + /* 1520 */ 42, 1302, 67, 510, 178, 218, 1649, 1281, 1280, 127, + /* 1530 */ 137, 1338, 1327, 509, 17, 1333, 140, 1619, 19, 1649, + /* 1540 */ 302, 1332, 420, 416, 412, 408, 177, 298, 1337, 1336, + /* 1550 */ 299, 1244, 29, 1678, 1665, 138, 271, 1650, 512, 1652, + /* 1560 */ 1653, 508, 510, 529, 1223, 1222, 12, 1665, 20, 1208, + /* 1570 */ 150, 63, 509, 21, 175, 510, 1619, 229, 1307, 304, + /* 1580 */ 231, 1566, 16, 235, 1178, 509, 13, 511, 1649, 1619, + /* 1590 */ 233, 517, 1678, 68, 69, 271, 1650, 512, 1652, 1653, + /* 1600 */ 508, 237, 529, 1638, 240, 1678, 1225, 73, 257, 1650, + /* 1610 */ 512, 1652, 1653, 508, 1681, 529, 1665, 1649, 528, 44, + /* 1620 */ 531, 1057, 533, 312, 510, 535, 537, 1054, 1049, 538, + /* 1630 */ 540, 174, 1051, 166, 509, 171, 541, 398, 1619, 543, + /* 1640 */ 1045, 546, 544, 1034, 547, 1665, 1043, 1048, 1047, 553, + /* 1650 */ 74, 75, 1065, 510, 1678, 164, 1649, 265, 1650, 512, + /* 1660 */ 1652, 1653, 508, 509, 529, 76, 1063, 1619, 1062, 961, + /* 1670 */ 1046, 561, 985, 1003, 564, 242, 983, 982, 981, 980, + /* 1680 */ 979, 978, 977, 1678, 1665, 976, 267, 1650, 512, 1652, + /* 1690 */ 1653, 508, 510, 529, 998, 973, 972, 971, 968, 967, + /* 1700 */ 966, 1000, 509, 1409, 585, 1649, 1619, 586, 587, 1407, + /* 1710 */ 589, 590, 591, 1405, 593, 594, 1649, 595, 1403, 597, + /* 1720 */ 599, 598, 1678, 1391, 601, 258, 1650, 512, 1652, 1653, + /* 1730 */ 508, 1390, 529, 1665, 602, 1376, 605, 606, 1351, 1351, + /* 1740 */ 609, 510, 1164, 252, 1665, 610, 1351, 1351, 1351, 1351, + /* 1750 */ 1351, 509, 510, 1351, 1351, 1619, 1351, 1351, 1351, 1351, + /* 1760 */ 1351, 1351, 509, 1351, 1351, 1649, 1619, 1351, 1351, 1351, + /* 1770 */ 1351, 1678, 1351, 1351, 268, 1650, 512, 1652, 1653, 508, + /* 1780 */ 1649, 529, 1678, 1351, 1351, 259, 1650, 512, 1652, 1653, + /* 1790 */ 508, 1351, 529, 1665, 1351, 1351, 1351, 1649, 1351, 1351, + /* 1800 */ 1351, 510, 1351, 1351, 1351, 1351, 1351, 1351, 1665, 1351, + /* 1810 */ 1351, 509, 1649, 1351, 1351, 1619, 510, 1351, 1351, 1351, + /* 1820 */ 1351, 1351, 1351, 1351, 1351, 1665, 509, 1351, 1351, 1649, + /* 1830 */ 1619, 1678, 1351, 510, 269, 1650, 512, 1652, 1653, 508, + /* 1840 */ 1665, 529, 1351, 509, 1351, 1351, 1678, 1619, 510, 260, + /* 1850 */ 1650, 512, 1652, 1653, 508, 1351, 529, 1665, 509, 1351, + /* 1860 */ 1351, 1351, 1619, 1678, 1351, 510, 1661, 1650, 512, 1652, + /* 1870 */ 1653, 508, 1351, 529, 1351, 509, 1649, 1351, 1678, 1619, + /* 1880 */ 1351, 1660, 1650, 512, 1652, 1653, 508, 1351, 529, 1351, + /* 1890 */ 1351, 1351, 1351, 1351, 1351, 1678, 1351, 1351, 1659, 1650, + /* 1900 */ 512, 1652, 1653, 508, 1665, 529, 1351, 1351, 1649, 1351, + /* 1910 */ 1351, 1351, 510, 1351, 1351, 1351, 1351, 1351, 1351, 1351, + /* 1920 */ 1351, 1351, 509, 1351, 1351, 1649, 1619, 1351, 1351, 1351, + /* 1930 */ 1351, 1351, 1351, 1351, 1351, 1351, 1665, 1351, 1351, 1351, + /* 1940 */ 1351, 1351, 1678, 1351, 510, 280, 1650, 512, 1652, 1653, + /* 1950 */ 508, 1351, 529, 1665, 509, 1351, 1351, 1649, 1619, 1351, + /* 1960 */ 1351, 510, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, + /* 1970 */ 1351, 509, 1351, 1351, 1678, 1619, 1351, 279, 1650, 512, + /* 1980 */ 1652, 1653, 508, 1351, 529, 1665, 1351, 1351, 1351, 1351, + /* 1990 */ 1351, 1678, 1351, 510, 281, 1650, 512, 1652, 1653, 508, + /* 2000 */ 1351, 529, 1351, 509, 1351, 1351, 1649, 1619, 1351, 1351, + /* 2010 */ 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 486, + /* 2020 */ 1351, 1351, 1351, 1678, 1351, 1351, 278, 1650, 512, 1652, + /* 2030 */ 1653, 508, 1351, 529, 1665, 1351, 1351, 1351, 1351, 1351, + /* 2040 */ 1351, 1351, 510, 1351, 1351, 1351, 1351, 1351, 113, 1351, + /* 2050 */ 1351, 1351, 509, 1351, 1351, 1351, 1619, 1351, 1351, 1351, + /* 2060 */ 1351, 1351, 1351, 1351, 1351, 1351, 1351, 490, 1351, 1351, + /* 2070 */ 1351, 1351, 1678, 1351, 1351, 264, 1650, 512, 1652, 1653, + /* 2080 */ 508, 1351, 529, 1351, 1351, 111, 1351, 1351, 1351, 1351, + /* 2090 */ 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, + /* 2100 */ 217, 1725, 485, 1351, 484, 1351, 1351, 1780, 1351, 1351, + /* 2110 */ 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, 1351, + /* 2120 */ 146, 1351, 1351, 1351, 1777, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 246, 273, 248, 249, 276, 246, 250, 248, 249, 324, - /* 10 */ 325, 4, 12, 13, 242, 271, 244, 243, 262, 339, - /* 20 */ 20, 293, 22, 279, 20, 269, 12, 13, 14, 15, - /* 30 */ 16, 247, 352, 255, 250, 279, 356, 309, 310, 12, - /* 40 */ 13, 14, 15, 16, 271, 271, 268, 47, 320, 42, - /* 50 */ 43, 278, 47, 279, 250, 277, 283, 55, 58, 315, - /* 60 */ 12, 13, 14, 289, 64, 2, 262, 293, 20, 64, - /* 70 */ 22, 0, 58, 299, 20, 12, 13, 14, 15, 16, - /* 80 */ 4, 81, 80, 279, 310, 83, 250, 313, 314, 315, - /* 90 */ 316, 317, 318, 0, 320, 47, 82, 323, 262, 251, - /* 100 */ 252, 327, 328, 103, 90, 269, 58, 12, 13, 14, - /* 110 */ 15, 16, 64, 339, 114, 279, 270, 24, 25, 26, - /* 120 */ 27, 28, 29, 30, 31, 32, 352, 75, 282, 81, - /* 130 */ 356, 60, 61, 62, 63, 81, 65, 66, 67, 68, - /* 140 */ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, - /* 150 */ 289, 103, 279, 292, 140, 81, 295, 81, 158, 286, - /* 160 */ 21, 339, 114, 24, 25, 26, 27, 28, 29, 30, - /* 170 */ 31, 32, 120, 121, 352, 250, 162, 150, 356, 247, - /* 180 */ 180, 181, 250, 183, 184, 185, 186, 187, 188, 189, - /* 190 */ 190, 191, 192, 193, 194, 195, 196, 197, 0, 12, - /* 200 */ 13, 14, 15, 16, 167, 168, 158, 256, 171, 284, - /* 210 */ 210, 260, 198, 199, 200, 201, 202, 203, 204, 205, - /* 220 */ 206, 207, 0, 12, 13, 14, 15, 16, 180, 181, - /* 230 */ 250, 183, 184, 185, 186, 187, 188, 189, 190, 191, - /* 240 */ 192, 193, 194, 195, 196, 197, 12, 13, 0, 20, - /* 250 */ 253, 22, 60, 61, 20, 57, 22, 65, 231, 279, - /* 260 */ 68, 69, 263, 339, 72, 73, 74, 12, 13, 82, - /* 270 */ 271, 274, 243, 57, 20, 20, 352, 22, 49, 280, - /* 280 */ 356, 47, 60, 61, 210, 20, 210, 65, 255, 47, - /* 290 */ 68, 69, 58, 82, 72, 73, 74, 317, 64, 0, - /* 300 */ 271, 14, 47, 243, 243, 250, 64, 20, 279, 250, - /* 310 */ 277, 331, 332, 333, 334, 81, 336, 262, 289, 64, - /* 320 */ 21, 262, 293, 24, 25, 26, 27, 28, 29, 30, - /* 330 */ 31, 32, 271, 243, 279, 81, 81, 103, 279, 310, - /* 340 */ 279, 93, 313, 314, 315, 316, 317, 318, 114, 320, - /* 350 */ 289, 20, 323, 293, 293, 271, 327, 328, 103, 271, - /* 360 */ 112, 113, 278, 115, 116, 117, 243, 283, 280, 114, - /* 370 */ 259, 310, 261, 271, 313, 314, 315, 316, 317, 318, - /* 380 */ 278, 320, 250, 293, 323, 283, 311, 2, 327, 328, - /* 390 */ 329, 20, 158, 3, 262, 257, 258, 12, 13, 14, - /* 400 */ 15, 16, 341, 12, 13, 14, 15, 16, 347, 348, - /* 410 */ 335, 279, 81, 158, 180, 181, 293, 183, 184, 185, - /* 420 */ 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, - /* 430 */ 196, 197, 250, 257, 258, 180, 181, 243, 183, 184, - /* 440 */ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, - /* 450 */ 195, 196, 197, 12, 13, 14, 243, 4, 20, 19, - /* 460 */ 281, 20, 243, 22, 210, 210, 264, 288, 289, 267, - /* 470 */ 14, 184, 19, 33, 12, 13, 20, 273, 243, 243, - /* 480 */ 273, 299, 20, 276, 22, 45, 33, 293, 47, 250, - /* 490 */ 144, 51, 52, 53, 54, 55, 14, 293, 45, 240, - /* 500 */ 293, 262, 20, 50, 57, 64, 293, 271, 55, 47, - /* 510 */ 153, 250, 293, 309, 310, 279, 309, 310, 279, 250, - /* 520 */ 80, 339, 81, 83, 320, 289, 64, 320, 293, 293, - /* 530 */ 173, 174, 64, 80, 352, 299, 83, 146, 356, 289, - /* 540 */ 279, 210, 41, 81, 103, 295, 310, 20, 279, 313, - /* 550 */ 314, 315, 316, 317, 318, 114, 320, 117, 299, 323, - /* 560 */ 243, 215, 216, 327, 328, 103, 12, 13, 14, 15, - /* 570 */ 16, 273, 80, 263, 272, 339, 114, 93, 317, 92, - /* 580 */ 88, 271, 142, 82, 146, 145, 317, 0, 352, 81, - /* 590 */ 280, 293, 356, 332, 333, 334, 253, 336, 339, 158, - /* 600 */ 116, 332, 333, 334, 164, 336, 166, 309, 310, 266, - /* 610 */ 293, 352, 58, 288, 289, 356, 226, 274, 320, 4, - /* 620 */ 158, 180, 181, 243, 183, 184, 185, 186, 187, 188, - /* 630 */ 189, 190, 191, 192, 193, 194, 195, 196, 197, 35, - /* 640 */ 184, 149, 180, 181, 90, 183, 184, 185, 186, 187, - /* 650 */ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, - /* 660 */ 12, 13, 18, 93, 20, 157, 180, 159, 20, 250, - /* 670 */ 22, 27, 250, 293, 30, 14, 15, 16, 208, 209, - /* 680 */ 93, 262, 112, 113, 262, 115, 116, 117, 84, 37, - /* 690 */ 86, 87, 48, 89, 140, 47, 22, 93, 279, 112, - /* 700 */ 113, 279, 115, 116, 117, 219, 220, 221, 222, 223, - /* 710 */ 271, 263, 64, 1, 2, 243, 162, 311, 210, 271, - /* 720 */ 116, 47, 283, 42, 43, 311, 169, 170, 280, 81, - /* 730 */ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, - /* 740 */ 104, 335, 106, 107, 108, 109, 110, 111, 299, 335, - /* 750 */ 243, 103, 198, 199, 200, 201, 202, 203, 204, 205, - /* 760 */ 206, 207, 114, 119, 0, 293, 122, 123, 124, 125, - /* 770 */ 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, - /* 780 */ 136, 137, 138, 139, 250, 250, 18, 250, 339, 20, - /* 790 */ 41, 23, 20, 20, 82, 140, 262, 262, 272, 262, - /* 800 */ 293, 352, 243, 35, 36, 356, 158, 39, 243, 243, - /* 810 */ 243, 299, 243, 279, 279, 243, 279, 162, 250, 243, - /* 820 */ 243, 57, 243, 146, 56, 148, 211, 272, 180, 181, - /* 830 */ 262, 183, 184, 185, 186, 187, 188, 189, 190, 191, - /* 840 */ 192, 193, 194, 195, 196, 197, 41, 279, 271, 81, - /* 850 */ 272, 339, 293, 198, 0, 272, 279, 0, 293, 293, - /* 860 */ 293, 209, 293, 0, 352, 293, 289, 0, 356, 293, - /* 870 */ 293, 85, 293, 85, 88, 85, 88, 85, 88, 22, - /* 880 */ 88, 243, 58, 21, 272, 22, 118, 310, 81, 22, - /* 890 */ 313, 314, 315, 316, 317, 318, 34, 320, 91, 45, - /* 900 */ 323, 1, 2, 272, 327, 328, 329, 244, 303, 271, - /* 910 */ 195, 196, 44, 260, 243, 146, 47, 279, 150, 151, - /* 920 */ 152, 41, 41, 155, 350, 348, 47, 289, 160, 41, - /* 930 */ 359, 293, 41, 41, 41, 307, 344, 271, 249, 41, - /* 940 */ 172, 251, 271, 175, 282, 177, 178, 179, 310, 81, - /* 950 */ 279, 313, 314, 315, 316, 317, 318, 184, 320, 312, - /* 960 */ 289, 323, 82, 82, 293, 327, 328, 329, 20, 41, - /* 970 */ 82, 41, 337, 82, 82, 82, 338, 228, 210, 243, - /* 980 */ 82, 310, 353, 114, 313, 314, 315, 316, 317, 318, - /* 990 */ 353, 320, 41, 114, 323, 243, 41, 41, 327, 328, - /* 1000 */ 329, 250, 41, 353, 180, 340, 41, 271, 45, 338, - /* 1010 */ 82, 308, 82, 47, 257, 279, 41, 41, 301, 40, - /* 1020 */ 156, 250, 287, 271, 250, 289, 140, 243, 285, 293, - /* 1030 */ 285, 279, 250, 82, 20, 230, 245, 82, 82, 245, - /* 1040 */ 20, 289, 305, 82, 255, 293, 310, 82, 289, 313, - /* 1050 */ 314, 315, 316, 317, 318, 271, 320, 82, 82, 323, - /* 1060 */ 20, 255, 310, 279, 328, 313, 314, 315, 316, 317, - /* 1070 */ 318, 20, 320, 289, 297, 323, 300, 293, 243, 327, - /* 1080 */ 328, 329, 255, 299, 297, 255, 20, 279, 290, 255, - /* 1090 */ 338, 255, 250, 243, 310, 255, 245, 313, 314, 315, - /* 1100 */ 316, 317, 318, 271, 320, 250, 271, 64, 271, 271, - /* 1110 */ 271, 245, 271, 293, 279, 271, 271, 271, 271, 271, - /* 1120 */ 271, 271, 305, 339, 289, 253, 304, 253, 293, 279, - /* 1130 */ 297, 253, 165, 290, 299, 279, 352, 253, 20, 289, - /* 1140 */ 356, 289, 218, 293, 243, 310, 349, 294, 313, 314, - /* 1150 */ 315, 316, 317, 318, 217, 320, 225, 346, 293, 349, - /* 1160 */ 310, 345, 224, 313, 314, 315, 316, 317, 318, 294, - /* 1170 */ 320, 312, 271, 323, 339, 293, 293, 327, 328, 20, - /* 1180 */ 279, 213, 212, 209, 279, 40, 229, 352, 311, 360, - /* 1190 */ 289, 356, 232, 81, 293, 293, 243, 293, 227, 293, - /* 1200 */ 12, 13, 294, 343, 294, 243, 330, 143, 291, 290, - /* 1210 */ 22, 310, 342, 355, 313, 314, 315, 316, 317, 318, - /* 1220 */ 319, 320, 321, 322, 271, 354, 279, 81, 355, 326, - /* 1230 */ 253, 267, 279, 271, 253, 47, 250, 279, 253, 354, - /* 1240 */ 245, 279, 289, 261, 355, 302, 293, 275, 354, 265, - /* 1250 */ 306, 289, 64, 243, 265, 293, 265, 298, 296, 254, - /* 1260 */ 241, 0, 0, 310, 72, 0, 313, 314, 315, 316, - /* 1270 */ 317, 318, 310, 320, 47, 313, 314, 315, 316, 317, - /* 1280 */ 318, 271, 320, 176, 47, 243, 47, 47, 0, 279, - /* 1290 */ 176, 103, 47, 47, 0, 176, 47, 0, 47, 289, - /* 1300 */ 0, 47, 114, 293, 0, 81, 162, 114, 161, 158, - /* 1310 */ 357, 358, 0, 271, 0, 154, 153, 243, 0, 0, - /* 1320 */ 310, 279, 44, 313, 314, 315, 316, 317, 318, 0, - /* 1330 */ 320, 289, 0, 0, 0, 293, 0, 0, 0, 0, - /* 1340 */ 0, 0, 0, 0, 0, 271, 158, 0, 0, 0, - /* 1350 */ 0, 0, 310, 279, 0, 313, 314, 315, 316, 317, - /* 1360 */ 318, 351, 320, 289, 40, 243, 0, 293, 180, 0, - /* 1370 */ 296, 0, 0, 0, 22, 0, 0, 0, 243, 191, - /* 1380 */ 192, 193, 0, 0, 310, 0, 40, 313, 314, 315, - /* 1390 */ 316, 317, 318, 271, 320, 14, 37, 41, 14, 44, - /* 1400 */ 358, 279, 0, 38, 44, 37, 271, 0, 0, 0, - /* 1410 */ 0, 289, 0, 0, 279, 293, 37, 37, 243, 0, - /* 1420 */ 59, 0, 0, 37, 289, 47, 0, 37, 293, 37, - /* 1430 */ 0, 296, 310, 37, 45, 313, 314, 315, 316, 317, - /* 1440 */ 318, 47, 320, 45, 322, 310, 271, 47, 313, 314, - /* 1450 */ 315, 316, 317, 318, 279, 320, 45, 0, 33, 0, - /* 1460 */ 45, 47, 0, 0, 289, 47, 243, 0, 293, 22, - /* 1470 */ 45, 296, 41, 41, 47, 22, 51, 52, 53, 54, - /* 1480 */ 55, 90, 47, 88, 47, 310, 47, 47, 313, 314, - /* 1490 */ 315, 316, 317, 318, 271, 320, 47, 47, 0, 22, - /* 1500 */ 47, 0, 279, 22, 48, 80, 0, 22, 83, 0, - /* 1510 */ 22, 47, 289, 0, 22, 20, 293, 64, 0, 47, - /* 1520 */ 146, 0, 22, 243, 0, 163, 0, 0, 37, 81, - /* 1530 */ 143, 41, 81, 310, 141, 41, 313, 314, 315, 316, - /* 1540 */ 317, 318, 82, 320, 214, 146, 41, 82, 82, 81, - /* 1550 */ 81, 271, 41, 41, 44, 81, 103, 82, 44, 279, - /* 1560 */ 82, 44, 82, 41, 44, 208, 141, 114, 143, 289, - /* 1570 */ 145, 243, 147, 293, 41, 47, 47, 214, 214, 82, - /* 1580 */ 2, 47, 47, 47, 47, 180, 41, 44, 44, 82, - /* 1590 */ 310, 166, 22, 313, 314, 315, 316, 317, 318, 271, - /* 1600 */ 320, 243, 182, 0, 81, 144, 81, 279, 44, 37, - /* 1610 */ 91, 158, 82, 81, 141, 22, 47, 289, 44, 81, - /* 1620 */ 81, 293, 82, 92, 82, 59, 47, 81, 81, 271, - /* 1630 */ 81, 243, 81, 180, 181, 82, 81, 279, 310, 81, - /* 1640 */ 81, 313, 314, 315, 316, 317, 318, 289, 320, 82, - /* 1650 */ 82, 293, 47, 47, 81, 47, 47, 82, 81, 271, - /* 1660 */ 22, 105, 82, 81, 47, 82, 81, 279, 310, 93, - /* 1670 */ 105, 313, 314, 315, 316, 317, 318, 289, 320, 243, - /* 1680 */ 81, 293, 81, 81, 47, 22, 105, 58, 243, 105, - /* 1690 */ 47, 64, 79, 41, 47, 47, 47, 47, 310, 22, - /* 1700 */ 114, 313, 314, 315, 316, 317, 318, 271, 320, 47, - /* 1710 */ 64, 47, 47, 47, 47, 279, 271, 47, 47, 47, - /* 1720 */ 47, 47, 0, 47, 279, 289, 45, 37, 0, 293, - /* 1730 */ 47, 45, 37, 0, 289, 47, 243, 45, 293, 37, - /* 1740 */ 0, 47, 45, 37, 0, 47, 310, 46, 0, 313, - /* 1750 */ 314, 315, 316, 317, 318, 310, 320, 0, 313, 314, - /* 1760 */ 315, 316, 317, 318, 271, 320, 20, 22, 21, 361, - /* 1770 */ 361, 22, 279, 22, 21, 361, 361, 361, 361, 361, - /* 1780 */ 361, 361, 289, 361, 243, 361, 293, 361, 361, 361, - /* 1790 */ 361, 361, 361, 361, 361, 243, 361, 361, 361, 361, - /* 1800 */ 361, 361, 361, 310, 361, 361, 313, 314, 315, 316, - /* 1810 */ 317, 318, 271, 320, 243, 361, 361, 361, 361, 361, - /* 1820 */ 279, 361, 361, 271, 361, 361, 361, 361, 361, 361, - /* 1830 */ 289, 279, 361, 361, 293, 361, 361, 361, 361, 361, - /* 1840 */ 361, 289, 271, 361, 361, 293, 361, 361, 361, 361, - /* 1850 */ 279, 310, 361, 361, 313, 314, 315, 316, 317, 318, - /* 1860 */ 289, 320, 310, 361, 293, 313, 314, 315, 316, 317, - /* 1870 */ 318, 361, 320, 243, 361, 361, 361, 361, 361, 361, - /* 1880 */ 361, 310, 243, 361, 313, 314, 315, 316, 317, 318, - /* 1890 */ 361, 320, 361, 361, 361, 361, 361, 361, 361, 361, - /* 1900 */ 361, 271, 361, 361, 361, 361, 361, 361, 361, 279, - /* 1910 */ 271, 361, 361, 361, 361, 361, 361, 361, 279, 289, - /* 1920 */ 361, 361, 361, 293, 361, 361, 361, 361, 289, 361, - /* 1930 */ 361, 361, 293, 361, 361, 361, 361, 361, 361, 361, - /* 1940 */ 310, 361, 243, 313, 314, 315, 316, 317, 318, 310, - /* 1950 */ 320, 243, 313, 314, 315, 316, 317, 318, 361, 320, - /* 1960 */ 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, - /* 1970 */ 271, 361, 361, 361, 361, 361, 361, 361, 279, 271, - /* 1980 */ 361, 361, 361, 361, 361, 361, 361, 279, 289, 361, - /* 1990 */ 361, 361, 293, 361, 361, 361, 361, 289, 361, 243, - /* 2000 */ 361, 293, 361, 361, 361, 361, 361, 361, 361, 310, - /* 2010 */ 250, 361, 313, 314, 315, 316, 317, 318, 310, 320, - /* 2020 */ 361, 313, 314, 315, 316, 317, 318, 271, 320, 361, - /* 2030 */ 361, 361, 361, 361, 361, 279, 361, 361, 361, 279, - /* 2040 */ 361, 361, 361, 361, 361, 289, 250, 361, 361, 293, - /* 2050 */ 361, 361, 361, 361, 361, 361, 361, 361, 361, 299, - /* 2060 */ 361, 361, 361, 361, 361, 361, 310, 361, 361, 313, - /* 2070 */ 314, 315, 316, 317, 318, 279, 320, 317, 361, 361, - /* 2080 */ 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, - /* 2090 */ 361, 361, 332, 333, 334, 299, 336, 361, 361, 339, - /* 2100 */ 361, 361, 361, 361, 361, 361, 361, 361, 361, 361, - /* 2110 */ 361, 361, 352, 317, 361, 361, 356, 361, 361, 361, - /* 2120 */ 361, 361, 361, 361, 361, 361, 361, 361, 332, 333, - /* 2130 */ 334, 361, 336, 361, 361, 339, 361, 361, 361, 361, - /* 2140 */ 361, 361, 361, 361, 361, 361, 361, 361, 352, 361, - /* 2150 */ 361, 361, 356, + /* 0 */ 253, 335, 295, 270, 257, 270, 273, 243, 273, 245, + /* 10 */ 246, 250, 12, 13, 348, 269, 2, 240, 352, 239, + /* 20 */ 20, 241, 22, 290, 263, 290, 12, 13, 14, 15, + /* 30 */ 16, 335, 271, 12, 13, 14, 15, 16, 305, 306, + /* 40 */ 305, 306, 335, 20, 348, 268, 4, 47, 352, 316, + /* 50 */ 20, 316, 237, 276, 243, 348, 245, 246, 58, 352, + /* 60 */ 12, 13, 14, 286, 64, 4, 244, 290, 20, 247, + /* 70 */ 22, 12, 13, 14, 15, 16, 12, 13, 14, 15, + /* 80 */ 16, 81, 81, 306, 42, 43, 309, 310, 311, 312, + /* 90 */ 313, 314, 91, 316, 240, 47, 319, 0, 335, 14, + /* 100 */ 323, 324, 325, 103, 81, 20, 58, 0, 12, 13, + /* 110 */ 295, 348, 64, 113, 337, 352, 20, 58, 22, 295, + /* 120 */ 343, 344, 12, 13, 14, 15, 16, 250, 21, 81, + /* 130 */ 240, 24, 25, 26, 27, 28, 29, 30, 31, 32, + /* 140 */ 20, 82, 81, 47, 290, 278, 82, 260, 271, 90, + /* 150 */ 335, 103, 285, 286, 58, 268, 156, 60, 61, 335, + /* 160 */ 64, 113, 65, 348, 277, 68, 69, 352, 58, 72, + /* 170 */ 73, 74, 348, 20, 41, 22, 352, 81, 178, 179, + /* 180 */ 290, 181, 182, 183, 184, 185, 186, 187, 188, 189, + /* 190 */ 190, 191, 192, 193, 194, 195, 35, 14, 139, 103, + /* 200 */ 90, 81, 49, 20, 156, 81, 252, 21, 208, 113, + /* 210 */ 24, 25, 26, 27, 28, 29, 30, 31, 32, 160, + /* 220 */ 307, 12, 13, 14, 15, 16, 178, 179, 274, 181, + /* 230 */ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + /* 240 */ 192, 193, 194, 195, 331, 84, 240, 86, 87, 139, + /* 250 */ 89, 57, 156, 240, 93, 196, 197, 198, 199, 200, + /* 260 */ 201, 202, 203, 204, 205, 286, 20, 182, 289, 208, + /* 270 */ 160, 292, 320, 321, 178, 179, 115, 181, 182, 183, + /* 280 */ 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + /* 290 */ 194, 195, 12, 13, 0, 178, 290, 165, 166, 0, + /* 300 */ 20, 169, 22, 290, 247, 247, 196, 197, 198, 199, + /* 310 */ 200, 201, 202, 203, 204, 205, 259, 20, 24, 25, + /* 320 */ 26, 27, 28, 29, 30, 31, 32, 47, 208, 295, + /* 330 */ 93, 240, 208, 276, 217, 218, 219, 220, 221, 281, + /* 340 */ 12, 13, 14, 22, 64, 247, 260, 268, 20, 112, + /* 350 */ 22, 114, 115, 116, 268, 276, 57, 148, 247, 268, + /* 360 */ 227, 81, 240, 277, 276, 182, 247, 276, 47, 335, + /* 370 */ 259, 283, 268, 244, 276, 47, 247, 286, 259, 275, + /* 380 */ 20, 290, 348, 103, 280, 64, 352, 276, 12, 13, + /* 390 */ 311, 145, 64, 113, 240, 276, 20, 306, 22, 0, + /* 400 */ 309, 310, 311, 312, 313, 314, 81, 316, 2, 81, + /* 410 */ 319, 313, 290, 4, 323, 324, 325, 267, 12, 13, + /* 420 */ 14, 15, 16, 47, 103, 327, 328, 329, 330, 279, + /* 430 */ 332, 103, 252, 240, 113, 344, 156, 228, 60, 61, + /* 440 */ 64, 113, 145, 65, 290, 265, 68, 69, 1, 2, + /* 450 */ 72, 73, 74, 20, 274, 240, 57, 81, 178, 179, + /* 460 */ 143, 181, 182, 183, 184, 185, 186, 187, 188, 189, + /* 470 */ 190, 191, 192, 193, 194, 195, 20, 156, 260, 103, + /* 480 */ 155, 151, 157, 290, 156, 0, 268, 47, 208, 113, + /* 490 */ 12, 13, 14, 15, 16, 277, 268, 248, 249, 178, + /* 500 */ 179, 171, 172, 275, 64, 290, 178, 179, 280, 181, + /* 510 */ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + /* 520 */ 192, 193, 194, 195, 12, 13, 14, 15, 16, 82, + /* 530 */ 213, 214, 156, 208, 37, 94, 95, 96, 97, 98, + /* 540 */ 99, 100, 101, 102, 103, 104, 240, 106, 107, 108, + /* 550 */ 109, 110, 111, 240, 178, 179, 240, 181, 182, 183, + /* 560 */ 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + /* 570 */ 194, 195, 12, 13, 18, 75, 20, 0, 93, 247, + /* 580 */ 20, 247, 22, 27, 268, 307, 30, 261, 55, 14, + /* 590 */ 264, 259, 276, 259, 82, 20, 290, 112, 266, 114, + /* 600 */ 115, 116, 286, 290, 48, 268, 290, 47, 276, 331, + /* 610 */ 276, 240, 275, 80, 139, 286, 83, 280, 209, 119, + /* 620 */ 120, 292, 306, 145, 64, 309, 310, 311, 312, 313, + /* 630 */ 314, 315, 316, 317, 318, 160, 247, 60, 61, 62, + /* 640 */ 63, 81, 65, 66, 67, 68, 69, 70, 71, 72, + /* 650 */ 73, 74, 75, 76, 77, 78, 240, 14, 15, 16, + /* 660 */ 20, 290, 240, 103, 240, 276, 254, 255, 57, 254, + /* 670 */ 255, 196, 240, 113, 118, 0, 240, 121, 122, 123, + /* 680 */ 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + /* 690 */ 134, 135, 136, 137, 138, 247, 285, 286, 247, 42, + /* 700 */ 43, 307, 313, 268, 207, 55, 290, 3, 240, 240, + /* 710 */ 259, 247, 290, 240, 290, 280, 156, 328, 329, 330, + /* 720 */ 20, 332, 290, 259, 276, 331, 290, 276, 268, 240, + /* 730 */ 266, 81, 256, 83, 258, 241, 270, 277, 178, 179, + /* 740 */ 276, 181, 182, 183, 184, 185, 186, 187, 188, 189, + /* 750 */ 190, 191, 192, 193, 194, 195, 290, 268, 290, 290, + /* 760 */ 240, 313, 0, 290, 93, 276, 47, 22, 93, 206, + /* 770 */ 207, 305, 306, 167, 168, 286, 328, 329, 330, 290, + /* 780 */ 332, 92, 316, 64, 295, 247, 115, 112, 268, 114, + /* 790 */ 115, 116, 47, 58, 41, 306, 276, 259, 309, 310, + /* 800 */ 311, 312, 313, 314, 41, 316, 286, 45, 319, 269, + /* 810 */ 290, 247, 323, 324, 276, 295, 85, 85, 270, 88, + /* 820 */ 88, 240, 182, 259, 335, 85, 306, 269, 88, 309, + /* 830 */ 310, 311, 312, 313, 314, 82, 316, 348, 290, 319, + /* 840 */ 276, 352, 44, 323, 324, 18, 12, 13, 247, 268, + /* 850 */ 23, 64, 21, 305, 306, 335, 22, 276, 208, 269, + /* 860 */ 259, 269, 35, 36, 316, 34, 39, 286, 348, 269, + /* 870 */ 247, 290, 352, 4, 247, 85, 41, 276, 88, 81, + /* 880 */ 41, 47, 259, 56, 240, 41, 259, 306, 19, 247, + /* 890 */ 309, 310, 311, 312, 313, 314, 47, 316, 64, 276, + /* 900 */ 319, 259, 33, 276, 323, 324, 325, 0, 81, 0, + /* 910 */ 0, 247, 268, 178, 45, 334, 269, 82, 276, 50, + /* 920 */ 276, 82, 257, 259, 55, 41, 82, 223, 47, 22, + /* 930 */ 286, 22, 22, 41, 290, 193, 194, 103, 41, 355, + /* 940 */ 276, 41, 1, 2, 117, 41, 41, 113, 299, 80, + /* 950 */ 306, 346, 83, 309, 310, 311, 312, 313, 314, 303, + /* 960 */ 316, 340, 113, 319, 268, 248, 82, 323, 324, 325, + /* 970 */ 240, 246, 41, 41, 82, 148, 149, 150, 334, 82, + /* 980 */ 153, 279, 82, 349, 308, 158, 82, 82, 225, 41, + /* 990 */ 156, 41, 41, 333, 113, 349, 20, 170, 268, 336, + /* 1000 */ 173, 240, 175, 176, 177, 247, 276, 349, 45, 41, + /* 1010 */ 304, 47, 178, 82, 82, 154, 286, 297, 41, 254, + /* 1020 */ 290, 247, 247, 189, 190, 191, 40, 284, 139, 268, + /* 1030 */ 82, 282, 82, 82, 282, 208, 306, 276, 247, 309, + /* 1040 */ 310, 311, 312, 313, 314, 20, 316, 286, 242, 319, + /* 1050 */ 82, 290, 242, 323, 324, 325, 295, 20, 247, 82, + /* 1060 */ 301, 252, 240, 286, 334, 252, 20, 306, 20, 296, + /* 1070 */ 309, 310, 311, 312, 313, 314, 294, 316, 252, 294, + /* 1080 */ 252, 276, 20, 287, 252, 252, 268, 276, 247, 252, + /* 1090 */ 268, 268, 242, 268, 64, 242, 335, 268, 276, 247, + /* 1100 */ 301, 163, 290, 300, 268, 250, 295, 286, 286, 348, + /* 1110 */ 268, 268, 290, 352, 268, 268, 268, 295, 250, 268, + /* 1120 */ 247, 250, 250, 20, 313, 240, 345, 287, 306, 216, + /* 1130 */ 215, 309, 310, 311, 312, 313, 314, 345, 316, 328, + /* 1140 */ 329, 330, 276, 332, 222, 147, 335, 290, 294, 291, + /* 1150 */ 291, 211, 290, 268, 290, 341, 240, 335, 342, 348, + /* 1160 */ 207, 276, 210, 352, 308, 20, 276, 307, 40, 339, + /* 1170 */ 348, 286, 226, 224, 352, 290, 338, 81, 291, 291, + /* 1180 */ 356, 290, 142, 229, 268, 290, 290, 288, 276, 326, + /* 1190 */ 287, 306, 276, 250, 309, 310, 311, 312, 313, 314, + /* 1200 */ 322, 316, 286, 264, 319, 250, 290, 81, 323, 324, + /* 1210 */ 240, 276, 247, 242, 272, 258, 351, 350, 298, 240, + /* 1220 */ 302, 351, 306, 250, 350, 309, 310, 311, 312, 313, + /* 1230 */ 314, 351, 316, 350, 262, 319, 262, 262, 268, 323, + /* 1240 */ 324, 251, 238, 0, 0, 72, 276, 268, 0, 47, + /* 1250 */ 174, 47, 47, 47, 174, 276, 286, 0, 47, 47, + /* 1260 */ 290, 174, 0, 47, 0, 286, 47, 0, 47, 290, + /* 1270 */ 0, 81, 160, 159, 113, 156, 306, 152, 151, 309, + /* 1280 */ 310, 311, 312, 313, 314, 306, 316, 0, 309, 310, + /* 1290 */ 311, 312, 313, 314, 0, 316, 19, 240, 319, 44, + /* 1300 */ 0, 0, 0, 324, 0, 0, 0, 0, 240, 0, + /* 1310 */ 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* 1320 */ 0, 0, 45, 353, 354, 268, 0, 0, 51, 52, + /* 1330 */ 53, 54, 55, 276, 0, 40, 268, 0, 0, 0, + /* 1340 */ 0, 0, 0, 286, 276, 22, 0, 290, 40, 14, + /* 1350 */ 293, 0, 0, 0, 286, 0, 0, 80, 290, 37, + /* 1360 */ 83, 14, 37, 306, 0, 41, 309, 310, 311, 312, + /* 1370 */ 313, 314, 44, 316, 306, 44, 240, 309, 310, 311, + /* 1380 */ 312, 313, 314, 38, 316, 0, 37, 147, 0, 0, + /* 1390 */ 0, 37, 59, 116, 0, 0, 37, 47, 0, 37, + /* 1400 */ 0, 0, 45, 37, 268, 47, 45, 47, 45, 37, + /* 1410 */ 45, 0, 276, 0, 47, 347, 47, 88, 141, 0, + /* 1420 */ 0, 144, 286, 0, 22, 240, 290, 47, 47, 47, + /* 1430 */ 47, 47, 41, 90, 41, 0, 240, 22, 0, 162, + /* 1440 */ 22, 164, 306, 47, 47, 309, 310, 311, 312, 313, + /* 1450 */ 314, 48, 316, 268, 0, 22, 47, 0, 22, 0, + /* 1460 */ 0, 276, 22, 20, 268, 37, 22, 0, 47, 0, + /* 1470 */ 0, 286, 276, 0, 0, 290, 44, 145, 293, 206, + /* 1480 */ 81, 145, 286, 41, 212, 81, 290, 240, 41, 145, + /* 1490 */ 354, 306, 82, 41, 309, 310, 311, 312, 313, 314, + /* 1500 */ 44, 316, 306, 41, 82, 309, 310, 311, 312, 313, + /* 1510 */ 314, 81, 316, 142, 318, 268, 140, 2, 81, 41, + /* 1520 */ 41, 82, 81, 276, 33, 44, 240, 82, 82, 161, + /* 1530 */ 44, 82, 82, 286, 41, 47, 45, 290, 41, 240, + /* 1540 */ 293, 47, 51, 52, 53, 54, 55, 47, 47, 47, + /* 1550 */ 47, 178, 81, 306, 268, 44, 309, 310, 311, 312, + /* 1560 */ 313, 314, 276, 316, 82, 82, 81, 268, 81, 22, + /* 1570 */ 44, 80, 286, 81, 83, 276, 290, 82, 82, 293, + /* 1580 */ 81, 0, 212, 37, 22, 286, 212, 180, 240, 290, + /* 1590 */ 81, 143, 306, 81, 81, 309, 310, 311, 312, 313, + /* 1600 */ 314, 140, 316, 44, 44, 306, 82, 91, 309, 310, + /* 1610 */ 311, 312, 313, 314, 81, 316, 268, 240, 81, 81, + /* 1620 */ 92, 82, 47, 47, 276, 81, 47, 82, 105, 81, + /* 1630 */ 47, 140, 82, 142, 286, 144, 81, 146, 290, 47, + /* 1640 */ 82, 47, 81, 22, 81, 268, 82, 105, 105, 93, + /* 1650 */ 81, 81, 47, 276, 306, 164, 240, 309, 310, 311, + /* 1660 */ 312, 313, 314, 286, 316, 81, 113, 290, 22, 59, + /* 1670 */ 105, 58, 47, 64, 79, 41, 47, 47, 47, 47, + /* 1680 */ 47, 22, 47, 306, 268, 47, 309, 310, 311, 312, + /* 1690 */ 313, 314, 276, 316, 47, 47, 47, 47, 47, 47, + /* 1700 */ 47, 64, 286, 0, 47, 240, 290, 45, 37, 0, + /* 1710 */ 47, 45, 37, 0, 47, 45, 240, 37, 0, 47, + /* 1720 */ 37, 45, 306, 0, 47, 309, 310, 311, 312, 313, + /* 1730 */ 314, 0, 316, 268, 46, 0, 22, 21, 357, 357, + /* 1740 */ 21, 276, 22, 22, 268, 20, 357, 357, 357, 357, + /* 1750 */ 357, 286, 276, 357, 357, 290, 357, 357, 357, 357, + /* 1760 */ 357, 357, 286, 357, 357, 240, 290, 357, 357, 357, + /* 1770 */ 357, 306, 357, 357, 309, 310, 311, 312, 313, 314, + /* 1780 */ 240, 316, 306, 357, 357, 309, 310, 311, 312, 313, + /* 1790 */ 314, 357, 316, 268, 357, 357, 357, 240, 357, 357, + /* 1800 */ 357, 276, 357, 357, 357, 357, 357, 357, 268, 357, + /* 1810 */ 357, 286, 240, 357, 357, 290, 276, 357, 357, 357, + /* 1820 */ 357, 357, 357, 357, 357, 268, 286, 357, 357, 240, + /* 1830 */ 290, 306, 357, 276, 309, 310, 311, 312, 313, 314, + /* 1840 */ 268, 316, 357, 286, 357, 357, 306, 290, 276, 309, + /* 1850 */ 310, 311, 312, 313, 314, 357, 316, 268, 286, 357, + /* 1860 */ 357, 357, 290, 306, 357, 276, 309, 310, 311, 312, + /* 1870 */ 313, 314, 357, 316, 357, 286, 240, 357, 306, 290, + /* 1880 */ 357, 309, 310, 311, 312, 313, 314, 357, 316, 357, + /* 1890 */ 357, 357, 357, 357, 357, 306, 357, 357, 309, 310, + /* 1900 */ 311, 312, 313, 314, 268, 316, 357, 357, 240, 357, + /* 1910 */ 357, 357, 276, 357, 357, 357, 357, 357, 357, 357, + /* 1920 */ 357, 357, 286, 357, 357, 240, 290, 357, 357, 357, + /* 1930 */ 357, 357, 357, 357, 357, 357, 268, 357, 357, 357, + /* 1940 */ 357, 357, 306, 357, 276, 309, 310, 311, 312, 313, + /* 1950 */ 314, 357, 316, 268, 286, 357, 357, 240, 290, 357, + /* 1960 */ 357, 276, 357, 357, 357, 357, 357, 357, 357, 357, + /* 1970 */ 357, 286, 357, 357, 306, 290, 357, 309, 310, 311, + /* 1980 */ 312, 313, 314, 357, 316, 268, 357, 357, 357, 357, + /* 1990 */ 357, 306, 357, 276, 309, 310, 311, 312, 313, 314, + /* 2000 */ 357, 316, 357, 286, 357, 357, 240, 290, 357, 357, + /* 2010 */ 357, 357, 357, 357, 357, 357, 357, 357, 357, 247, + /* 2020 */ 357, 357, 357, 306, 357, 357, 309, 310, 311, 312, + /* 2030 */ 313, 314, 357, 316, 268, 357, 357, 357, 357, 357, + /* 2040 */ 357, 357, 276, 357, 357, 357, 357, 357, 276, 357, + /* 2050 */ 357, 357, 286, 357, 357, 357, 290, 357, 357, 357, + /* 2060 */ 357, 357, 357, 357, 357, 357, 357, 295, 357, 357, + /* 2070 */ 357, 357, 306, 357, 357, 309, 310, 311, 312, 313, + /* 2080 */ 314, 357, 316, 357, 357, 313, 357, 357, 357, 357, + /* 2090 */ 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, + /* 2100 */ 328, 329, 330, 357, 332, 357, 357, 335, 357, 357, + /* 2110 */ 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, + /* 2120 */ 348, 357, 357, 357, 352, }; -#define YY_SHIFT_COUNT (610) +#define YY_SHIFT_COUNT (611) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (1757) +#define YY_SHIFT_MAX (1735) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 768, 0, 0, 48, 234, 234, 234, 234, 255, 255, - /* 10 */ 234, 234, 441, 462, 648, 462, 462, 462, 462, 462, - /* 20 */ 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, - /* 30 */ 462, 462, 462, 462, 462, 462, 462, 462, 254, 254, - /* 40 */ 54, 54, 54, 1188, 1188, 1188, 1188, 331, 508, 74, - /* 50 */ 4, 4, 7, 7, 76, 74, 74, 4, 4, 4, - /* 60 */ 4, 4, 4, 216, 4, 265, 371, 527, 265, 4, - /* 70 */ 4, 265, 4, 265, 265, 527, 265, 4, 447, 644, - /* 80 */ 14, 554, 554, 139, 192, 1453, 1453, 1453, 1453, 1453, - /* 90 */ 1453, 1453, 1453, 1453, 1453, 1453, 1453, 1453, 1453, 1453, - /* 100 */ 1453, 1453, 1453, 1453, 604, 229, 482, 482, 198, 5, - /* 110 */ 438, 438, 438, 764, 5, 772, 527, 265, 265, 527, - /* 120 */ 487, 468, 636, 636, 636, 636, 636, 636, 636, 440, - /* 130 */ 299, 222, 27, 486, 37, 346, 287, 456, 681, 674, - /* 140 */ 484, 769, 470, 652, 470, 390, 390, 390, 615, 773, - /* 150 */ 948, 963, 966, 864, 948, 948, 979, 886, 886, 948, - /* 160 */ 1014, 1014, 1020, 216, 527, 216, 1040, 1051, 216, 1040, - /* 170 */ 216, 772, 1066, 216, 216, 948, 216, 1014, 265, 265, - /* 180 */ 265, 265, 265, 265, 265, 265, 265, 265, 265, 948, - /* 190 */ 1014, 1043, 1020, 447, 967, 527, 447, 1040, 447, 772, - /* 200 */ 1066, 447, 1118, 924, 937, 1043, 924, 937, 1043, 1043, - /* 210 */ 265, 931, 938, 968, 970, 974, 772, 1159, 1145, 957, - /* 220 */ 971, 960, 957, 971, 957, 971, 1112, 937, 1043, 1043, - /* 230 */ 937, 1043, 1064, 772, 1066, 447, 487, 447, 772, 1146, - /* 240 */ 468, 948, 447, 1014, 2153, 2153, 2153, 2153, 2153, 2153, - /* 250 */ 2153, 2153, 71, 1425, 93, 453, 248, 587, 187, 63, - /* 260 */ 385, 391, 211, 570, 95, 95, 95, 95, 95, 95, - /* 270 */ 95, 95, 357, 2, 52, 492, 712, 655, 661, 661, - /* 280 */ 661, 661, 854, 501, 786, 788, 790, 792, 857, 863, - /* 290 */ 867, 862, 557, 677, 880, 881, 888, 900, 715, 749, - /* 300 */ 805, 891, 824, 892, 868, 893, 898, 928, 930, 951, - /* 310 */ 869, 879, 955, 956, 961, 965, 975, 976, 807, 242, - /* 320 */ 1261, 1262, 1192, 1265, 1227, 1107, 1237, 1239, 1240, 1114, - /* 330 */ 1288, 1245, 1246, 1119, 1294, 1249, 1297, 1251, 1300, 1254, - /* 340 */ 1304, 1224, 1144, 1147, 1193, 1151, 1312, 1314, 1161, 1163, - /* 350 */ 1318, 1319, 1278, 1329, 1332, 1333, 1334, 1336, 1337, 1338, - /* 360 */ 1339, 1340, 1341, 1342, 1343, 1344, 1347, 1348, 1349, 1350, - /* 370 */ 1351, 1324, 1354, 1366, 1369, 1371, 1372, 1373, 1352, 1375, - /* 380 */ 1376, 1377, 1382, 1383, 1385, 1346, 1359, 1356, 1381, 1355, - /* 390 */ 1384, 1360, 1402, 1365, 1368, 1407, 1408, 1409, 1379, 1410, - /* 400 */ 1412, 1380, 1413, 1361, 1419, 1421, 1378, 1389, 1386, 1422, - /* 410 */ 1394, 1398, 1390, 1426, 1400, 1411, 1392, 1430, 1414, 1415, - /* 420 */ 1396, 1457, 1459, 1462, 1463, 1391, 1395, 1418, 1447, 1467, - /* 430 */ 1427, 1435, 1437, 1439, 1431, 1432, 1440, 1449, 1450, 1498, - /* 440 */ 1477, 1501, 1481, 1456, 1506, 1485, 1464, 1509, 1488, 1513, - /* 450 */ 1492, 1495, 1518, 1374, 1472, 1521, 1362, 1500, 1399, 1387, - /* 460 */ 1524, 1526, 1527, 1448, 1491, 1393, 1490, 1494, 1330, 1460, - /* 470 */ 1505, 1465, 1451, 1468, 1469, 1466, 1511, 1510, 1514, 1474, - /* 480 */ 1512, 1363, 1475, 1478, 1517, 1357, 1522, 1520, 1480, 1533, - /* 490 */ 1364, 1497, 1528, 1529, 1534, 1535, 1536, 1537, 1497, 1578, - /* 500 */ 1405, 1545, 1507, 1523, 1530, 1543, 1525, 1532, 1544, 1570, - /* 510 */ 1420, 1538, 1540, 1542, 1539, 1546, 1461, 1547, 1603, 1572, - /* 520 */ 1473, 1549, 1519, 1564, 1574, 1551, 1553, 1555, 1593, 1558, - /* 530 */ 1531, 1567, 1569, 1579, 1559, 1568, 1605, 1573, 1575, 1606, - /* 540 */ 1577, 1580, 1608, 1582, 1583, 1609, 1585, 1556, 1565, 1581, - /* 550 */ 1584, 1638, 1576, 1599, 1601, 1617, 1602, 1586, 1637, 1663, - /* 560 */ 1566, 1629, 1643, 1627, 1613, 1652, 1647, 1648, 1649, 1650, - /* 570 */ 1662, 1677, 1664, 1665, 1646, 1431, 1666, 1432, 1667, 1670, - /* 580 */ 1671, 1672, 1673, 1674, 1722, 1676, 1681, 1690, 1728, 1683, - /* 590 */ 1686, 1695, 1733, 1688, 1692, 1702, 1740, 1694, 1697, 1706, - /* 600 */ 1744, 1698, 1701, 1748, 1757, 1745, 1747, 1749, 1751, 1753, - /* 610 */ 1746, + /* 0 */ 827, 0, 0, 48, 96, 96, 96, 96, 280, 280, + /* 10 */ 96, 96, 328, 376, 560, 376, 376, 376, 376, 376, + /* 20 */ 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, + /* 30 */ 376, 376, 376, 376, 376, 376, 376, 376, 120, 120, + /* 40 */ 23, 23, 23, 834, 834, 834, 834, 325, 650, 124, + /* 50 */ 30, 30, 42, 42, 61, 124, 124, 30, 30, 30, + /* 60 */ 30, 30, 30, 194, 30, 30, 360, 433, 456, 360, + /* 70 */ 30, 30, 360, 30, 360, 360, 456, 360, 30, 611, + /* 80 */ 556, 59, 110, 110, 186, 378, 321, 321, 321, 321, + /* 90 */ 321, 321, 321, 321, 321, 321, 321, 321, 321, 321, + /* 100 */ 321, 321, 321, 321, 321, 161, 153, 575, 575, 299, + /* 110 */ 440, 246, 246, 246, 399, 440, 700, 456, 360, 360, + /* 120 */ 456, 689, 787, 441, 441, 441, 441, 441, 441, 441, + /* 130 */ 1277, 107, 97, 209, 117, 132, 317, 85, 183, 657, + /* 140 */ 745, 671, 297, 563, 497, 563, 704, 704, 704, 409, + /* 150 */ 640, 976, 963, 964, 861, 976, 976, 986, 889, 889, + /* 160 */ 976, 1025, 1025, 1037, 194, 456, 194, 1046, 1048, 194, + /* 170 */ 1046, 194, 700, 1062, 194, 194, 976, 194, 1025, 360, + /* 180 */ 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, + /* 190 */ 976, 1025, 1030, 1037, 611, 938, 456, 611, 976, 1046, + /* 200 */ 611, 700, 1062, 611, 1103, 913, 915, 1030, 913, 915, + /* 210 */ 1030, 1030, 360, 922, 998, 940, 952, 953, 700, 1145, + /* 220 */ 1128, 946, 949, 954, 946, 949, 946, 949, 1096, 915, + /* 230 */ 1030, 1030, 915, 1030, 1040, 700, 1062, 611, 689, 611, + /* 240 */ 700, 1126, 787, 976, 611, 1025, 2125, 2125, 2125, 2125, + /* 250 */ 2125, 2125, 2125, 577, 1491, 294, 869, 64, 14, 406, + /* 260 */ 478, 512, 485, 675, 21, 21, 21, 21, 21, 21, + /* 270 */ 21, 21, 237, 330, 533, 500, 447, 475, 643, 643, + /* 280 */ 643, 643, 762, 753, 731, 732, 740, 790, 907, 909, + /* 290 */ 910, 831, 606, 835, 839, 844, 941, 742, 763, 133, + /* 300 */ 884, 735, 892, 798, 900, 904, 905, 931, 932, 849, + /* 310 */ 881, 897, 948, 950, 951, 968, 977, 1, 719, 1243, + /* 320 */ 1244, 1173, 1248, 1202, 1076, 1204, 1205, 1206, 1080, 1257, + /* 330 */ 1211, 1212, 1087, 1262, 1216, 1264, 1219, 1267, 1221, 1270, + /* 340 */ 1190, 1112, 1114, 1161, 1119, 1287, 1294, 1125, 1127, 1304, + /* 350 */ 1305, 1255, 1300, 1301, 1302, 1306, 1307, 1309, 1311, 1312, + /* 360 */ 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1326, + /* 370 */ 1295, 1327, 1334, 1337, 1338, 1339, 1340, 1323, 1341, 1342, + /* 380 */ 1346, 1351, 1352, 1353, 1308, 1322, 1324, 1335, 1328, 1347, + /* 390 */ 1331, 1355, 1345, 1325, 1356, 1364, 1385, 1349, 1240, 1388, + /* 400 */ 1389, 1354, 1390, 1333, 1394, 1395, 1350, 1357, 1359, 1398, + /* 410 */ 1358, 1361, 1362, 1400, 1360, 1363, 1366, 1401, 1367, 1365, + /* 420 */ 1372, 1411, 1413, 1419, 1420, 1343, 1329, 1369, 1402, 1423, + /* 430 */ 1380, 1381, 1382, 1383, 1391, 1393, 1384, 1396, 1397, 1435, + /* 440 */ 1415, 1438, 1418, 1403, 1454, 1433, 1409, 1457, 1436, 1459, + /* 450 */ 1440, 1443, 1460, 1332, 1421, 1469, 1368, 1444, 1336, 1371, + /* 460 */ 1467, 1470, 1473, 1344, 1474, 1399, 1428, 1376, 1442, 1447, + /* 470 */ 1272, 1410, 1452, 1422, 1404, 1430, 1437, 1439, 1462, 1432, + /* 480 */ 1456, 1441, 1478, 1370, 1445, 1446, 1481, 1273, 1479, 1486, + /* 490 */ 1449, 1493, 1374, 1450, 1488, 1494, 1500, 1501, 1502, 1503, + /* 500 */ 1450, 1515, 1373, 1497, 1482, 1471, 1483, 1511, 1485, 1487, + /* 510 */ 1526, 1547, 1407, 1492, 1495, 1496, 1499, 1509, 1448, 1512, + /* 520 */ 1581, 1546, 1461, 1513, 1516, 1559, 1560, 1533, 1524, 1537, + /* 530 */ 1562, 1538, 1528, 1539, 1575, 1576, 1544, 1545, 1579, 1548, + /* 540 */ 1550, 1583, 1555, 1558, 1592, 1561, 1564, 1594, 1563, 1523, + /* 550 */ 1542, 1543, 1565, 1621, 1556, 1569, 1570, 1605, 1584, 1553, + /* 560 */ 1646, 1610, 1613, 1625, 1609, 1595, 1634, 1629, 1630, 1631, + /* 570 */ 1632, 1633, 1659, 1635, 1638, 1637, 1391, 1647, 1393, 1648, + /* 580 */ 1649, 1650, 1651, 1652, 1653, 1703, 1657, 1662, 1671, 1709, + /* 590 */ 1663, 1666, 1675, 1713, 1667, 1670, 1680, 1718, 1672, 1676, + /* 600 */ 1683, 1723, 1677, 1688, 1731, 1735, 1714, 1716, 1720, 1721, + /* 610 */ 1719, 1725, }; -#define YY_REDUCE_COUNT (251) -#define YY_REDUCE_MIN (-320) -#define YY_REDUCE_MAX (1796) +#define YY_REDUCE_COUNT (252) +#define YY_REDUCE_MIN (-334) +#define YY_REDUCE_MAX (1772) static const short yy_reduce_ofst[] = { - /* 0 */ 259, -226, 236, 61, 577, 638, 671, 752, 784, 835, - /* 10 */ 29, 850, 901, 953, 736, 962, 1010, 1042, 1074, 1122, - /* 20 */ 1135, 1175, 1223, 1280, 1328, 1358, 1388, 1436, 1445, 1493, - /* 30 */ 1541, 1552, 1571, 1630, 1639, 1699, 1708, 1756, 1760, 1796, - /* 40 */ -20, 261, 269, -272, 207, 204, 298, 182, 449, 512, - /* 50 */ -244, -164, -246, -241, -320, -178, -76, -196, 55, 59, - /* 60 */ 132, 239, 419, -222, 422, -227, -256, -139, -1, 534, - /* 70 */ 535, 84, 537, 310, 102, 179, 448, 568, 343, -75, - /* 80 */ -315, -315, -315, -228, -49, 60, 90, 123, 194, 213, - /* 90 */ 219, 235, 317, 380, 472, 507, 559, 565, 566, 567, - /* 100 */ 569, 572, 576, 579, -154, -152, -216, -68, 33, 138, - /* 110 */ 75, 406, 414, -3, 176, -127, 250, 88, 439, 325, - /* 120 */ 202, 111, 302, 526, 555, 578, 583, 612, 631, 605, - /* 130 */ 663, 653, 571, 574, 628, 592, 666, 666, 689, 690, - /* 140 */ 662, 647, 635, 635, 635, 629, 637, 650, 665, 666, - /* 150 */ 751, 703, 757, 717, 771, 774, 735, 743, 745, 782, - /* 160 */ 791, 794, 737, 789, 759, 806, 777, 776, 827, 787, - /* 170 */ 830, 808, 798, 834, 836, 842, 840, 851, 832, 837, - /* 180 */ 838, 839, 841, 844, 845, 846, 847, 848, 849, 855, - /* 190 */ 866, 820, 817, 872, 822, 852, 874, 833, 878, 856, - /* 200 */ 843, 884, 859, 797, 853, 865, 810, 875, 882, 883, - /* 210 */ 666, 811, 816, 860, 870, 635, 905, 877, 876, 858, - /* 220 */ 871, 829, 873, 885, 889, 894, 903, 908, 902, 904, - /* 230 */ 910, 906, 917, 947, 919, 977, 964, 981, 958, 972, - /* 240 */ 982, 986, 985, 995, 943, 944, 959, 984, 989, 991, - /* 250 */ 1005, 1019, + /* 0 */ -185, 489, 520, -223, 91, 581, 644, 730, 761, 822, + /* 10 */ 885, 916, 316, 970, 979, 1057, 1068, 1136, 1185, 1196, + /* 20 */ 1247, 1286, 1299, 1348, 1377, 1416, 1465, 1476, 1525, 1540, + /* 30 */ 1557, 1572, 1589, 1636, 1668, 1685, 1717, 1766, 811, 1772, + /* 40 */ 98, 389, 448, -267, -265, 466, 548, -293, -176, 34, + /* 50 */ 332, 464, -236, -189, -334, -304, -237, 57, 111, 119, + /* 60 */ 334, 451, 538, 180, 564, 601, 104, 79, -21, -113, + /* 70 */ 623, 627, 228, 642, 86, 337, -133, 218, 664, -239, + /* 80 */ 58, -48, -48, -48, -220, -253, -146, -110, 6, 13, + /* 90 */ 122, 154, 193, 215, 306, 313, 371, 416, 422, 424, + /* 100 */ 432, 436, 468, 469, 473, 150, 249, -178, 129, -46, + /* 110 */ 412, -87, 278, 394, -123, 415, 88, 329, 460, 435, + /* 120 */ 411, 326, 476, -254, 540, 558, 590, 592, 600, 647, + /* 130 */ 649, 494, 665, 584, 605, 656, 621, 696, 696, 725, + /* 140 */ 717, 702, 676, 660, 660, 660, 634, 646, 658, 663, + /* 150 */ 696, 758, 706, 765, 720, 774, 775, 743, 749, 752, + /* 160 */ 791, 806, 810, 759, 809, 777, 813, 782, 773, 826, + /* 170 */ 785, 828, 805, 796, 832, 833, 841, 837, 850, 818, + /* 180 */ 823, 825, 829, 836, 842, 843, 846, 847, 848, 851, + /* 190 */ 852, 853, 812, 799, 855, 803, 821, 868, 873, 854, + /* 200 */ 871, 866, 840, 872, 856, 781, 858, 857, 792, 859, + /* 210 */ 862, 864, 696, 816, 814, 830, 838, 660, 890, 860, + /* 220 */ 863, 865, 867, 824, 870, 874, 880, 883, 878, 887, + /* 230 */ 891, 895, 888, 896, 899, 912, 903, 943, 939, 955, + /* 240 */ 935, 942, 957, 965, 973, 971, 920, 918, 972, 974, + /* 250 */ 975, 990, 1004, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 10 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 20 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 30 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 40 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 50 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 60 */ 1356, 1356, 1356, 1425, 1356, 1356, 1356, 1356, 1356, 1356, - /* 70 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1423, 1564, - /* 80 */ 1356, 1731, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 90 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 100 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1425, 1356, - /* 110 */ 1742, 1742, 1742, 1423, 1356, 1356, 1356, 1356, 1356, 1356, - /* 120 */ 1519, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1603, - /* 130 */ 1356, 1356, 1808, 1356, 1609, 1766, 1356, 1356, 1356, 1356, - /* 140 */ 1472, 1758, 1734, 1748, 1735, 1793, 1793, 1793, 1751, 1356, - /* 150 */ 1356, 1356, 1356, 1595, 1356, 1356, 1569, 1566, 1566, 1356, - /* 160 */ 1356, 1356, 1356, 1425, 1356, 1425, 1356, 1356, 1425, 1356, - /* 170 */ 1425, 1356, 1356, 1425, 1425, 1356, 1425, 1356, 1356, 1356, - /* 180 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 190 */ 1356, 1356, 1356, 1423, 1605, 1356, 1423, 1356, 1423, 1356, - /* 200 */ 1356, 1423, 1356, 1773, 1771, 1356, 1773, 1771, 1356, 1356, - /* 210 */ 1356, 1785, 1781, 1764, 1762, 1748, 1356, 1356, 1356, 1799, - /* 220 */ 1795, 1811, 1799, 1795, 1799, 1795, 1356, 1771, 1356, 1356, - /* 230 */ 1771, 1356, 1577, 1356, 1356, 1423, 1356, 1423, 1356, 1488, - /* 240 */ 1356, 1356, 1423, 1356, 1597, 1611, 1587, 1522, 1522, 1522, - /* 250 */ 1426, 1361, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 260 */ 1356, 1356, 1356, 1484, 1675, 1784, 1783, 1707, 1706, 1705, - /* 270 */ 1703, 1674, 1356, 1356, 1356, 1356, 1356, 1356, 1668, 1669, - /* 280 */ 1667, 1666, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 290 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1732, 1356, 1796, - /* 300 */ 1800, 1356, 1356, 1356, 1651, 1356, 1356, 1356, 1356, 1356, - /* 310 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 320 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 330 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 340 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 350 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 360 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 370 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 380 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1390, 1356, 1356, - /* 390 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 400 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 410 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 420 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 430 */ 1356, 1356, 1356, 1356, 1453, 1452, 1356, 1356, 1356, 1356, - /* 440 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 450 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 460 */ 1356, 1356, 1356, 1356, 1356, 1356, 1755, 1765, 1356, 1356, - /* 470 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1651, 1356, - /* 480 */ 1782, 1356, 1741, 1737, 1356, 1356, 1733, 1356, 1356, 1794, - /* 490 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1727, - /* 500 */ 1356, 1700, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 510 */ 1662, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 520 */ 1356, 1356, 1356, 1650, 1356, 1691, 1356, 1356, 1356, 1356, - /* 530 */ 1356, 1356, 1356, 1356, 1516, 1356, 1356, 1356, 1356, 1356, - /* 540 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1501, 1499, 1498, - /* 550 */ 1497, 1356, 1494, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 560 */ 1356, 1356, 1356, 1356, 1356, 1445, 1356, 1356, 1356, 1356, - /* 570 */ 1356, 1356, 1356, 1356, 1356, 1436, 1356, 1435, 1356, 1356, - /* 580 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 590 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 600 */ 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, 1356, - /* 610 */ 1356, + /* 0 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 10 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 20 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 30 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 40 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 50 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 60 */ 1349, 1349, 1349, 1418, 1349, 1349, 1349, 1349, 1349, 1349, + /* 70 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1416, + /* 80 */ 1556, 1349, 1720, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 90 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 100 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1418, + /* 110 */ 1349, 1731, 1731, 1731, 1416, 1349, 1349, 1349, 1349, 1349, + /* 120 */ 1349, 1512, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 130 */ 1592, 1349, 1349, 1797, 1349, 1598, 1755, 1349, 1349, 1349, + /* 140 */ 1349, 1465, 1747, 1723, 1737, 1724, 1782, 1782, 1782, 1740, + /* 150 */ 1349, 1349, 1349, 1349, 1584, 1349, 1349, 1561, 1558, 1558, + /* 160 */ 1349, 1349, 1349, 1349, 1418, 1349, 1418, 1349, 1349, 1418, + /* 170 */ 1349, 1418, 1349, 1349, 1418, 1418, 1349, 1418, 1349, 1349, + /* 180 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 190 */ 1349, 1349, 1349, 1349, 1416, 1594, 1349, 1416, 1349, 1349, + /* 200 */ 1416, 1349, 1349, 1416, 1349, 1762, 1760, 1349, 1762, 1760, + /* 210 */ 1349, 1349, 1349, 1774, 1770, 1753, 1751, 1737, 1349, 1349, + /* 220 */ 1349, 1788, 1784, 1800, 1788, 1784, 1788, 1784, 1349, 1760, + /* 230 */ 1349, 1349, 1760, 1349, 1569, 1349, 1349, 1416, 1349, 1416, + /* 240 */ 1349, 1481, 1349, 1349, 1416, 1349, 1586, 1600, 1515, 1515, + /* 250 */ 1515, 1419, 1354, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 260 */ 1349, 1349, 1349, 1349, 1664, 1773, 1772, 1696, 1695, 1694, + /* 270 */ 1692, 1663, 1477, 1349, 1349, 1349, 1349, 1349, 1657, 1658, + /* 280 */ 1656, 1655, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 290 */ 1349, 1349, 1349, 1349, 1349, 1349, 1721, 1349, 1785, 1789, + /* 300 */ 1349, 1349, 1349, 1640, 1349, 1349, 1349, 1349, 1349, 1349, + /* 310 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 320 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 330 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 340 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 350 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 360 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 370 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 380 */ 1349, 1349, 1349, 1349, 1349, 1349, 1383, 1349, 1349, 1349, + /* 390 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 400 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 410 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 420 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 430 */ 1349, 1349, 1349, 1349, 1446, 1445, 1349, 1349, 1349, 1349, + /* 440 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 450 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 460 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1744, 1754, + /* 470 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 480 */ 1640, 1349, 1771, 1349, 1730, 1726, 1349, 1349, 1722, 1349, + /* 490 */ 1349, 1783, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 500 */ 1349, 1716, 1349, 1689, 1349, 1349, 1349, 1349, 1349, 1349, + /* 510 */ 1349, 1349, 1651, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 520 */ 1349, 1349, 1349, 1349, 1349, 1639, 1349, 1680, 1349, 1349, + /* 530 */ 1349, 1349, 1349, 1349, 1349, 1349, 1509, 1349, 1349, 1349, + /* 540 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1494, + /* 550 */ 1492, 1491, 1490, 1349, 1487, 1349, 1349, 1349, 1349, 1349, + /* 560 */ 1349, 1349, 1349, 1349, 1349, 1349, 1438, 1349, 1349, 1349, + /* 570 */ 1349, 1349, 1349, 1349, 1349, 1349, 1429, 1349, 1428, 1349, + /* 580 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 590 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 600 */ 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + /* 610 */ 1349, 1349, }; /********** End of lemon-generated parsing tables *****************************/ @@ -940,7 +934,6 @@ static const YYCODETYPE yyFallback[] = { 0, /* BLOB => nothing */ 0, /* VARBINARY => nothing */ 0, /* DECIMAL => nothing */ - 0, /* DELAY => nothing */ 0, /* FILE_FACTOR => nothing */ 0, /* NK_FLOAT => nothing */ 0, /* ROLLUP => nothing */ @@ -975,9 +968,8 @@ static const YYCODETYPE yyFallback[] = { 0, /* INTERVAL => nothing */ 0, /* TOPIC => nothing */ 0, /* AS => nothing */ - 0, /* CGROUP => nothing */ - 0, /* WITH => nothing */ - 0, /* SCHEMA => nothing */ + 0, /* CONSUMER => nothing */ + 0, /* GROUP => nothing */ 0, /* DESC => nothing */ 0, /* DESCRIBE => nothing */ 0, /* RESET => nothing */ @@ -1052,7 +1044,6 @@ static const YYCODETYPE yyFallback[] = { 0, /* PREV => nothing */ 0, /* LINEAR => nothing */ 0, /* NEXT => nothing */ - 0, /* GROUP => nothing */ 0, /* HAVING => nothing */ 0, /* ORDER => nothing */ 0, /* SLIMIT => nothing */ @@ -1062,12 +1053,12 @@ static const YYCODETYPE yyFallback[] = { 0, /* ASC => nothing */ 0, /* NULLS => nothing */ 0, /* ID => nothing */ - 233, /* NK_BITNOT => ID */ - 233, /* INSERT => ID */ - 233, /* VALUES => ID */ - 233, /* IMPORT => ID */ - 233, /* NK_SEMI => ID */ - 233, /* FILE => ID */ + 230, /* NK_BITNOT => ID */ + 230, /* INSERT => ID */ + 230, /* VALUES => ID */ + 230, /* IMPORT => ID */ + 230, /* NK_SEMI => ID */ + 230, /* FILE => ID */ }; #endif /* YYFALLBACK */ @@ -1267,255 +1258,251 @@ static const char *const yyTokenName[] = { /* 109 */ "BLOB", /* 110 */ "VARBINARY", /* 111 */ "DECIMAL", - /* 112 */ "DELAY", - /* 113 */ "FILE_FACTOR", - /* 114 */ "NK_FLOAT", - /* 115 */ "ROLLUP", - /* 116 */ "TTL", - /* 117 */ "SMA", - /* 118 */ "SHOW", - /* 119 */ "DATABASES", - /* 120 */ "TABLES", - /* 121 */ "STABLES", - /* 122 */ "MNODES", - /* 123 */ "MODULES", - /* 124 */ "QNODES", - /* 125 */ "FUNCTIONS", - /* 126 */ "INDEXES", - /* 127 */ "ACCOUNTS", - /* 128 */ "APPS", - /* 129 */ "CONNECTIONS", - /* 130 */ "LICENCE", - /* 131 */ "GRANTS", - /* 132 */ "QUERIES", - /* 133 */ "SCORES", - /* 134 */ "TOPICS", - /* 135 */ "VARIABLES", - /* 136 */ "BNODES", - /* 137 */ "SNODES", - /* 138 */ "CLUSTER", - /* 139 */ "TRANSACTIONS", - /* 140 */ "LIKE", - /* 141 */ "INDEX", - /* 142 */ "FULLTEXT", - /* 143 */ "FUNCTION", - /* 144 */ "INTERVAL", - /* 145 */ "TOPIC", - /* 146 */ "AS", - /* 147 */ "CGROUP", - /* 148 */ "WITH", - /* 149 */ "SCHEMA", - /* 150 */ "DESC", - /* 151 */ "DESCRIBE", - /* 152 */ "RESET", - /* 153 */ "QUERY", - /* 154 */ "CACHE", - /* 155 */ "EXPLAIN", - /* 156 */ "ANALYZE", - /* 157 */ "VERBOSE", - /* 158 */ "NK_BOOL", - /* 159 */ "RATIO", - /* 160 */ "COMPACT", - /* 161 */ "VNODES", - /* 162 */ "IN", - /* 163 */ "OUTPUTTYPE", - /* 164 */ "AGGREGATE", - /* 165 */ "BUFSIZE", - /* 166 */ "STREAM", - /* 167 */ "INTO", - /* 168 */ "TRIGGER", - /* 169 */ "AT_ONCE", - /* 170 */ "WINDOW_CLOSE", - /* 171 */ "WATERMARK", - /* 172 */ "KILL", - /* 173 */ "CONNECTION", - /* 174 */ "TRANSACTION", - /* 175 */ "MERGE", - /* 176 */ "VGROUP", - /* 177 */ "REDISTRIBUTE", - /* 178 */ "SPLIT", - /* 179 */ "SYNCDB", - /* 180 */ "NULL", - /* 181 */ "NK_QUESTION", - /* 182 */ "NK_ARROW", - /* 183 */ "ROWTS", - /* 184 */ "TBNAME", - /* 185 */ "QSTARTTS", - /* 186 */ "QENDTS", - /* 187 */ "WSTARTTS", - /* 188 */ "WENDTS", - /* 189 */ "WDURATION", - /* 190 */ "CAST", - /* 191 */ "NOW", - /* 192 */ "TODAY", - /* 193 */ "TIMEZONE", - /* 194 */ "COUNT", - /* 195 */ "FIRST", - /* 196 */ "LAST", - /* 197 */ "LAST_ROW", - /* 198 */ "BETWEEN", - /* 199 */ "IS", - /* 200 */ "NK_LT", - /* 201 */ "NK_GT", - /* 202 */ "NK_LE", - /* 203 */ "NK_GE", - /* 204 */ "NK_NE", - /* 205 */ "MATCH", - /* 206 */ "NMATCH", - /* 207 */ "CONTAINS", - /* 208 */ "JOIN", - /* 209 */ "INNER", - /* 210 */ "SELECT", - /* 211 */ "DISTINCT", - /* 212 */ "WHERE", - /* 213 */ "PARTITION", - /* 214 */ "BY", - /* 215 */ "SESSION", - /* 216 */ "STATE_WINDOW", - /* 217 */ "SLIDING", - /* 218 */ "FILL", - /* 219 */ "VALUE", - /* 220 */ "NONE", - /* 221 */ "PREV", - /* 222 */ "LINEAR", - /* 223 */ "NEXT", - /* 224 */ "GROUP", - /* 225 */ "HAVING", - /* 226 */ "ORDER", - /* 227 */ "SLIMIT", - /* 228 */ "SOFFSET", - /* 229 */ "LIMIT", - /* 230 */ "OFFSET", - /* 231 */ "ASC", - /* 232 */ "NULLS", - /* 233 */ "ID", - /* 234 */ "NK_BITNOT", - /* 235 */ "INSERT", - /* 236 */ "VALUES", - /* 237 */ "IMPORT", - /* 238 */ "NK_SEMI", - /* 239 */ "FILE", - /* 240 */ "cmd", - /* 241 */ "account_options", - /* 242 */ "alter_account_options", - /* 243 */ "literal", - /* 244 */ "alter_account_option", - /* 245 */ "user_name", - /* 246 */ "privileges", - /* 247 */ "priv_level", - /* 248 */ "priv_type_list", - /* 249 */ "priv_type", - /* 250 */ "db_name", - /* 251 */ "dnode_endpoint", - /* 252 */ "dnode_host_name", - /* 253 */ "not_exists_opt", - /* 254 */ "db_options", - /* 255 */ "exists_opt", - /* 256 */ "alter_db_options", - /* 257 */ "integer_list", - /* 258 */ "variable_list", - /* 259 */ "retention_list", - /* 260 */ "alter_db_option", - /* 261 */ "retention", - /* 262 */ "full_table_name", - /* 263 */ "column_def_list", - /* 264 */ "tags_def_opt", - /* 265 */ "table_options", - /* 266 */ "multi_create_clause", - /* 267 */ "tags_def", - /* 268 */ "multi_drop_clause", - /* 269 */ "alter_table_clause", - /* 270 */ "alter_table_options", - /* 271 */ "column_name", - /* 272 */ "type_name", - /* 273 */ "signed_literal", - /* 274 */ "create_subtable_clause", - /* 275 */ "specific_tags_opt", - /* 276 */ "literal_list", - /* 277 */ "drop_table_clause", - /* 278 */ "col_name_list", - /* 279 */ "table_name", - /* 280 */ "column_def", - /* 281 */ "func_name_list", - /* 282 */ "alter_table_option", - /* 283 */ "col_name", - /* 284 */ "db_name_cond_opt", - /* 285 */ "like_pattern_opt", - /* 286 */ "table_name_cond", - /* 287 */ "from_db_opt", - /* 288 */ "func_name", - /* 289 */ "function_name", - /* 290 */ "index_name", - /* 291 */ "index_options", - /* 292 */ "func_list", - /* 293 */ "duration_literal", - /* 294 */ "sliding_opt", - /* 295 */ "func", - /* 296 */ "expression_list", - /* 297 */ "topic_name", - /* 298 */ "topic_options", - /* 299 */ "query_expression", - /* 300 */ "cgroup_name", - /* 301 */ "analyze_opt", - /* 302 */ "explain_options", - /* 303 */ "agg_func_opt", - /* 304 */ "bufsize_opt", - /* 305 */ "stream_name", - /* 306 */ "stream_options", - /* 307 */ "into_opt", - /* 308 */ "dnode_list", - /* 309 */ "signed", - /* 310 */ "literal_func", - /* 311 */ "table_alias", - /* 312 */ "column_alias", - /* 313 */ "expression", - /* 314 */ "pseudo_column", - /* 315 */ "column_reference", - /* 316 */ "function_expression", - /* 317 */ "subquery", - /* 318 */ "star_func", - /* 319 */ "star_func_para_list", - /* 320 */ "noarg_func", - /* 321 */ "other_para_list", - /* 322 */ "star_func_para", - /* 323 */ "predicate", - /* 324 */ "compare_op", - /* 325 */ "in_op", - /* 326 */ "in_predicate_value", - /* 327 */ "boolean_value_expression", - /* 328 */ "boolean_primary", - /* 329 */ "common_expression", - /* 330 */ "from_clause", - /* 331 */ "table_reference_list", - /* 332 */ "table_reference", - /* 333 */ "table_primary", - /* 334 */ "joined_table", - /* 335 */ "alias_opt", - /* 336 */ "parenthesized_joined_table", - /* 337 */ "join_type", - /* 338 */ "search_condition", - /* 339 */ "query_specification", - /* 340 */ "set_quantifier_opt", - /* 341 */ "select_list", - /* 342 */ "where_clause_opt", - /* 343 */ "partition_by_clause_opt", - /* 344 */ "twindow_clause_opt", - /* 345 */ "group_by_clause_opt", - /* 346 */ "having_clause_opt", - /* 347 */ "select_sublist", - /* 348 */ "select_item", - /* 349 */ "fill_opt", - /* 350 */ "fill_mode", - /* 351 */ "group_by_list", - /* 352 */ "query_expression_body", - /* 353 */ "order_by_clause_opt", - /* 354 */ "slimit_clause_opt", - /* 355 */ "limit_clause_opt", - /* 356 */ "query_primary", - /* 357 */ "sort_specification_list", - /* 358 */ "sort_specification", - /* 359 */ "ordering_specification_opt", - /* 360 */ "null_ordering_opt", + /* 112 */ "FILE_FACTOR", + /* 113 */ "NK_FLOAT", + /* 114 */ "ROLLUP", + /* 115 */ "TTL", + /* 116 */ "SMA", + /* 117 */ "SHOW", + /* 118 */ "DATABASES", + /* 119 */ "TABLES", + /* 120 */ "STABLES", + /* 121 */ "MNODES", + /* 122 */ "MODULES", + /* 123 */ "QNODES", + /* 124 */ "FUNCTIONS", + /* 125 */ "INDEXES", + /* 126 */ "ACCOUNTS", + /* 127 */ "APPS", + /* 128 */ "CONNECTIONS", + /* 129 */ "LICENCE", + /* 130 */ "GRANTS", + /* 131 */ "QUERIES", + /* 132 */ "SCORES", + /* 133 */ "TOPICS", + /* 134 */ "VARIABLES", + /* 135 */ "BNODES", + /* 136 */ "SNODES", + /* 137 */ "CLUSTER", + /* 138 */ "TRANSACTIONS", + /* 139 */ "LIKE", + /* 140 */ "INDEX", + /* 141 */ "FULLTEXT", + /* 142 */ "FUNCTION", + /* 143 */ "INTERVAL", + /* 144 */ "TOPIC", + /* 145 */ "AS", + /* 146 */ "CONSUMER", + /* 147 */ "GROUP", + /* 148 */ "DESC", + /* 149 */ "DESCRIBE", + /* 150 */ "RESET", + /* 151 */ "QUERY", + /* 152 */ "CACHE", + /* 153 */ "EXPLAIN", + /* 154 */ "ANALYZE", + /* 155 */ "VERBOSE", + /* 156 */ "NK_BOOL", + /* 157 */ "RATIO", + /* 158 */ "COMPACT", + /* 159 */ "VNODES", + /* 160 */ "IN", + /* 161 */ "OUTPUTTYPE", + /* 162 */ "AGGREGATE", + /* 163 */ "BUFSIZE", + /* 164 */ "STREAM", + /* 165 */ "INTO", + /* 166 */ "TRIGGER", + /* 167 */ "AT_ONCE", + /* 168 */ "WINDOW_CLOSE", + /* 169 */ "WATERMARK", + /* 170 */ "KILL", + /* 171 */ "CONNECTION", + /* 172 */ "TRANSACTION", + /* 173 */ "MERGE", + /* 174 */ "VGROUP", + /* 175 */ "REDISTRIBUTE", + /* 176 */ "SPLIT", + /* 177 */ "SYNCDB", + /* 178 */ "NULL", + /* 179 */ "NK_QUESTION", + /* 180 */ "NK_ARROW", + /* 181 */ "ROWTS", + /* 182 */ "TBNAME", + /* 183 */ "QSTARTTS", + /* 184 */ "QENDTS", + /* 185 */ "WSTARTTS", + /* 186 */ "WENDTS", + /* 187 */ "WDURATION", + /* 188 */ "CAST", + /* 189 */ "NOW", + /* 190 */ "TODAY", + /* 191 */ "TIMEZONE", + /* 192 */ "COUNT", + /* 193 */ "FIRST", + /* 194 */ "LAST", + /* 195 */ "LAST_ROW", + /* 196 */ "BETWEEN", + /* 197 */ "IS", + /* 198 */ "NK_LT", + /* 199 */ "NK_GT", + /* 200 */ "NK_LE", + /* 201 */ "NK_GE", + /* 202 */ "NK_NE", + /* 203 */ "MATCH", + /* 204 */ "NMATCH", + /* 205 */ "CONTAINS", + /* 206 */ "JOIN", + /* 207 */ "INNER", + /* 208 */ "SELECT", + /* 209 */ "DISTINCT", + /* 210 */ "WHERE", + /* 211 */ "PARTITION", + /* 212 */ "BY", + /* 213 */ "SESSION", + /* 214 */ "STATE_WINDOW", + /* 215 */ "SLIDING", + /* 216 */ "FILL", + /* 217 */ "VALUE", + /* 218 */ "NONE", + /* 219 */ "PREV", + /* 220 */ "LINEAR", + /* 221 */ "NEXT", + /* 222 */ "HAVING", + /* 223 */ "ORDER", + /* 224 */ "SLIMIT", + /* 225 */ "SOFFSET", + /* 226 */ "LIMIT", + /* 227 */ "OFFSET", + /* 228 */ "ASC", + /* 229 */ "NULLS", + /* 230 */ "ID", + /* 231 */ "NK_BITNOT", + /* 232 */ "INSERT", + /* 233 */ "VALUES", + /* 234 */ "IMPORT", + /* 235 */ "NK_SEMI", + /* 236 */ "FILE", + /* 237 */ "cmd", + /* 238 */ "account_options", + /* 239 */ "alter_account_options", + /* 240 */ "literal", + /* 241 */ "alter_account_option", + /* 242 */ "user_name", + /* 243 */ "privileges", + /* 244 */ "priv_level", + /* 245 */ "priv_type_list", + /* 246 */ "priv_type", + /* 247 */ "db_name", + /* 248 */ "dnode_endpoint", + /* 249 */ "dnode_host_name", + /* 250 */ "not_exists_opt", + /* 251 */ "db_options", + /* 252 */ "exists_opt", + /* 253 */ "alter_db_options", + /* 254 */ "integer_list", + /* 255 */ "variable_list", + /* 256 */ "retention_list", + /* 257 */ "alter_db_option", + /* 258 */ "retention", + /* 259 */ "full_table_name", + /* 260 */ "column_def_list", + /* 261 */ "tags_def_opt", + /* 262 */ "table_options", + /* 263 */ "multi_create_clause", + /* 264 */ "tags_def", + /* 265 */ "multi_drop_clause", + /* 266 */ "alter_table_clause", + /* 267 */ "alter_table_options", + /* 268 */ "column_name", + /* 269 */ "type_name", + /* 270 */ "signed_literal", + /* 271 */ "create_subtable_clause", + /* 272 */ "specific_tags_opt", + /* 273 */ "literal_list", + /* 274 */ "drop_table_clause", + /* 275 */ "col_name_list", + /* 276 */ "table_name", + /* 277 */ "column_def", + /* 278 */ "func_name_list", + /* 279 */ "alter_table_option", + /* 280 */ "col_name", + /* 281 */ "db_name_cond_opt", + /* 282 */ "like_pattern_opt", + /* 283 */ "table_name_cond", + /* 284 */ "from_db_opt", + /* 285 */ "func_name", + /* 286 */ "function_name", + /* 287 */ "index_name", + /* 288 */ "index_options", + /* 289 */ "func_list", + /* 290 */ "duration_literal", + /* 291 */ "sliding_opt", + /* 292 */ "func", + /* 293 */ "expression_list", + /* 294 */ "topic_name", + /* 295 */ "query_expression", + /* 296 */ "cgroup_name", + /* 297 */ "analyze_opt", + /* 298 */ "explain_options", + /* 299 */ "agg_func_opt", + /* 300 */ "bufsize_opt", + /* 301 */ "stream_name", + /* 302 */ "stream_options", + /* 303 */ "into_opt", + /* 304 */ "dnode_list", + /* 305 */ "signed", + /* 306 */ "literal_func", + /* 307 */ "table_alias", + /* 308 */ "column_alias", + /* 309 */ "expression", + /* 310 */ "pseudo_column", + /* 311 */ "column_reference", + /* 312 */ "function_expression", + /* 313 */ "subquery", + /* 314 */ "star_func", + /* 315 */ "star_func_para_list", + /* 316 */ "noarg_func", + /* 317 */ "other_para_list", + /* 318 */ "star_func_para", + /* 319 */ "predicate", + /* 320 */ "compare_op", + /* 321 */ "in_op", + /* 322 */ "in_predicate_value", + /* 323 */ "boolean_value_expression", + /* 324 */ "boolean_primary", + /* 325 */ "common_expression", + /* 326 */ "from_clause", + /* 327 */ "table_reference_list", + /* 328 */ "table_reference", + /* 329 */ "table_primary", + /* 330 */ "joined_table", + /* 331 */ "alias_opt", + /* 332 */ "parenthesized_joined_table", + /* 333 */ "join_type", + /* 334 */ "search_condition", + /* 335 */ "query_specification", + /* 336 */ "set_quantifier_opt", + /* 337 */ "select_list", + /* 338 */ "where_clause_opt", + /* 339 */ "partition_by_clause_opt", + /* 340 */ "twindow_clause_opt", + /* 341 */ "group_by_clause_opt", + /* 342 */ "having_clause_opt", + /* 343 */ "select_sublist", + /* 344 */ "select_item", + /* 345 */ "fill_opt", + /* 346 */ "fill_mode", + /* 347 */ "group_by_list", + /* 348 */ "query_expression_body", + /* 349 */ "order_by_clause_opt", + /* 350 */ "slimit_clause_opt", + /* 351 */ "limit_clause_opt", + /* 352 */ "query_primary", + /* 353 */ "sort_specification_list", + /* 354 */ "sort_specification", + /* 355 */ "ordering_specification_opt", + /* 356 */ "null_ordering_opt", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -1688,296 +1675,292 @@ static const char *const yyRuleName[] = { /* 162 */ "tags_def ::= TAGS NK_LP column_def_list NK_RP", /* 163 */ "table_options ::=", /* 164 */ "table_options ::= table_options COMMENT NK_STRING", - /* 165 */ "table_options ::= table_options DELAY NK_INTEGER", - /* 166 */ "table_options ::= table_options FILE_FACTOR NK_FLOAT", - /* 167 */ "table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP", - /* 168 */ "table_options ::= table_options TTL NK_INTEGER", - /* 169 */ "table_options ::= table_options SMA NK_LP col_name_list NK_RP", - /* 170 */ "alter_table_options ::= alter_table_option", - /* 171 */ "alter_table_options ::= alter_table_options alter_table_option", - /* 172 */ "alter_table_option ::= COMMENT NK_STRING", - /* 173 */ "alter_table_option ::= TTL NK_INTEGER", - /* 174 */ "col_name_list ::= col_name", - /* 175 */ "col_name_list ::= col_name_list NK_COMMA col_name", - /* 176 */ "col_name ::= column_name", - /* 177 */ "cmd ::= SHOW DNODES", - /* 178 */ "cmd ::= SHOW USERS", - /* 179 */ "cmd ::= SHOW DATABASES", - /* 180 */ "cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt", - /* 181 */ "cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt", - /* 182 */ "cmd ::= SHOW db_name_cond_opt VGROUPS", - /* 183 */ "cmd ::= SHOW MNODES", - /* 184 */ "cmd ::= SHOW MODULES", - /* 185 */ "cmd ::= SHOW QNODES", - /* 186 */ "cmd ::= SHOW FUNCTIONS", - /* 187 */ "cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt", - /* 188 */ "cmd ::= SHOW STREAMS", - /* 189 */ "cmd ::= SHOW ACCOUNTS", - /* 190 */ "cmd ::= SHOW APPS", - /* 191 */ "cmd ::= SHOW CONNECTIONS", - /* 192 */ "cmd ::= SHOW LICENCE", - /* 193 */ "cmd ::= SHOW GRANTS", - /* 194 */ "cmd ::= SHOW CREATE DATABASE db_name", - /* 195 */ "cmd ::= SHOW CREATE TABLE full_table_name", - /* 196 */ "cmd ::= SHOW CREATE STABLE full_table_name", - /* 197 */ "cmd ::= SHOW QUERIES", - /* 198 */ "cmd ::= SHOW SCORES", - /* 199 */ "cmd ::= SHOW TOPICS", - /* 200 */ "cmd ::= SHOW VARIABLES", - /* 201 */ "cmd ::= SHOW BNODES", - /* 202 */ "cmd ::= SHOW SNODES", - /* 203 */ "cmd ::= SHOW CLUSTER", - /* 204 */ "cmd ::= SHOW TRANSACTIONS", - /* 205 */ "db_name_cond_opt ::=", - /* 206 */ "db_name_cond_opt ::= db_name NK_DOT", - /* 207 */ "like_pattern_opt ::=", - /* 208 */ "like_pattern_opt ::= LIKE NK_STRING", - /* 209 */ "table_name_cond ::= table_name", - /* 210 */ "from_db_opt ::=", - /* 211 */ "from_db_opt ::= FROM db_name", - /* 212 */ "func_name_list ::= func_name", - /* 213 */ "func_name_list ::= func_name_list NK_COMMA func_name", - /* 214 */ "func_name ::= function_name", - /* 215 */ "cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options", - /* 216 */ "cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP", - /* 217 */ "cmd ::= DROP INDEX exists_opt index_name ON table_name", - /* 218 */ "index_options ::=", - /* 219 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt", - /* 220 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt", - /* 221 */ "func_list ::= func", - /* 222 */ "func_list ::= func_list NK_COMMA func", - /* 223 */ "func ::= function_name NK_LP expression_list NK_RP", - /* 224 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS query_expression", - /* 225 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS db_name", + /* 165 */ "table_options ::= table_options FILE_FACTOR NK_FLOAT", + /* 166 */ "table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP", + /* 167 */ "table_options ::= table_options TTL NK_INTEGER", + /* 168 */ "table_options ::= table_options SMA NK_LP col_name_list NK_RP", + /* 169 */ "alter_table_options ::= alter_table_option", + /* 170 */ "alter_table_options ::= alter_table_options alter_table_option", + /* 171 */ "alter_table_option ::= COMMENT NK_STRING", + /* 172 */ "alter_table_option ::= TTL NK_INTEGER", + /* 173 */ "col_name_list ::= col_name", + /* 174 */ "col_name_list ::= col_name_list NK_COMMA col_name", + /* 175 */ "col_name ::= column_name", + /* 176 */ "cmd ::= SHOW DNODES", + /* 177 */ "cmd ::= SHOW USERS", + /* 178 */ "cmd ::= SHOW DATABASES", + /* 179 */ "cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt", + /* 180 */ "cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt", + /* 181 */ "cmd ::= SHOW db_name_cond_opt VGROUPS", + /* 182 */ "cmd ::= SHOW MNODES", + /* 183 */ "cmd ::= SHOW MODULES", + /* 184 */ "cmd ::= SHOW QNODES", + /* 185 */ "cmd ::= SHOW FUNCTIONS", + /* 186 */ "cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt", + /* 187 */ "cmd ::= SHOW STREAMS", + /* 188 */ "cmd ::= SHOW ACCOUNTS", + /* 189 */ "cmd ::= SHOW APPS", + /* 190 */ "cmd ::= SHOW CONNECTIONS", + /* 191 */ "cmd ::= SHOW LICENCE", + /* 192 */ "cmd ::= SHOW GRANTS", + /* 193 */ "cmd ::= SHOW CREATE DATABASE db_name", + /* 194 */ "cmd ::= SHOW CREATE TABLE full_table_name", + /* 195 */ "cmd ::= SHOW CREATE STABLE full_table_name", + /* 196 */ "cmd ::= SHOW QUERIES", + /* 197 */ "cmd ::= SHOW SCORES", + /* 198 */ "cmd ::= SHOW TOPICS", + /* 199 */ "cmd ::= SHOW VARIABLES", + /* 200 */ "cmd ::= SHOW BNODES", + /* 201 */ "cmd ::= SHOW SNODES", + /* 202 */ "cmd ::= SHOW CLUSTER", + /* 203 */ "cmd ::= SHOW TRANSACTIONS", + /* 204 */ "db_name_cond_opt ::=", + /* 205 */ "db_name_cond_opt ::= db_name NK_DOT", + /* 206 */ "like_pattern_opt ::=", + /* 207 */ "like_pattern_opt ::= LIKE NK_STRING", + /* 208 */ "table_name_cond ::= table_name", + /* 209 */ "from_db_opt ::=", + /* 210 */ "from_db_opt ::= FROM db_name", + /* 211 */ "func_name_list ::= func_name", + /* 212 */ "func_name_list ::= func_name_list NK_COMMA func_name", + /* 213 */ "func_name ::= function_name", + /* 214 */ "cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options", + /* 215 */ "cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP", + /* 216 */ "cmd ::= DROP INDEX exists_opt index_name ON table_name", + /* 217 */ "index_options ::=", + /* 218 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt", + /* 219 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt", + /* 220 */ "func_list ::= func", + /* 221 */ "func_list ::= func_list NK_COMMA func", + /* 222 */ "func ::= function_name NK_LP expression_list NK_RP", + /* 223 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression", + /* 224 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name", + /* 225 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name", /* 226 */ "cmd ::= DROP TOPIC exists_opt topic_name", - /* 227 */ "cmd ::= DROP CGROUP exists_opt cgroup_name ON topic_name", - /* 228 */ "topic_options ::=", - /* 229 */ "topic_options ::= topic_options WITH TABLE", - /* 230 */ "topic_options ::= topic_options WITH SCHEMA", - /* 231 */ "topic_options ::= topic_options WITH TAG", - /* 232 */ "cmd ::= DESC full_table_name", - /* 233 */ "cmd ::= DESCRIBE full_table_name", - /* 234 */ "cmd ::= RESET QUERY CACHE", - /* 235 */ "cmd ::= EXPLAIN analyze_opt explain_options query_expression", - /* 236 */ "analyze_opt ::=", - /* 237 */ "analyze_opt ::= ANALYZE", - /* 238 */ "explain_options ::=", - /* 239 */ "explain_options ::= explain_options VERBOSE NK_BOOL", - /* 240 */ "explain_options ::= explain_options RATIO NK_FLOAT", - /* 241 */ "cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP", - /* 242 */ "cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt", - /* 243 */ "cmd ::= DROP FUNCTION exists_opt function_name", - /* 244 */ "agg_func_opt ::=", - /* 245 */ "agg_func_opt ::= AGGREGATE", - /* 246 */ "bufsize_opt ::=", - /* 247 */ "bufsize_opt ::= BUFSIZE NK_INTEGER", - /* 248 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression", - /* 249 */ "cmd ::= DROP STREAM exists_opt stream_name", - /* 250 */ "into_opt ::=", - /* 251 */ "into_opt ::= INTO full_table_name", - /* 252 */ "stream_options ::=", - /* 253 */ "stream_options ::= stream_options TRIGGER AT_ONCE", - /* 254 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE", - /* 255 */ "stream_options ::= stream_options WATERMARK duration_literal", - /* 256 */ "cmd ::= KILL CONNECTION NK_INTEGER", - /* 257 */ "cmd ::= KILL QUERY NK_INTEGER", - /* 258 */ "cmd ::= KILL TRANSACTION NK_INTEGER", - /* 259 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", - /* 260 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", - /* 261 */ "cmd ::= SPLIT VGROUP NK_INTEGER", - /* 262 */ "dnode_list ::= DNODE NK_INTEGER", - /* 263 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", - /* 264 */ "cmd ::= SYNCDB db_name REPLICA", - /* 265 */ "cmd ::= query_expression", - /* 266 */ "literal ::= NK_INTEGER", - /* 267 */ "literal ::= NK_FLOAT", - /* 268 */ "literal ::= NK_STRING", - /* 269 */ "literal ::= NK_BOOL", - /* 270 */ "literal ::= TIMESTAMP NK_STRING", - /* 271 */ "literal ::= duration_literal", - /* 272 */ "literal ::= NULL", - /* 273 */ "literal ::= NK_QUESTION", - /* 274 */ "duration_literal ::= NK_VARIABLE", - /* 275 */ "signed ::= NK_INTEGER", - /* 276 */ "signed ::= NK_PLUS NK_INTEGER", - /* 277 */ "signed ::= NK_MINUS NK_INTEGER", - /* 278 */ "signed ::= NK_FLOAT", - /* 279 */ "signed ::= NK_PLUS NK_FLOAT", - /* 280 */ "signed ::= NK_MINUS NK_FLOAT", - /* 281 */ "signed_literal ::= signed", - /* 282 */ "signed_literal ::= NK_STRING", - /* 283 */ "signed_literal ::= NK_BOOL", - /* 284 */ "signed_literal ::= TIMESTAMP NK_STRING", - /* 285 */ "signed_literal ::= duration_literal", - /* 286 */ "signed_literal ::= NULL", - /* 287 */ "signed_literal ::= literal_func", - /* 288 */ "literal_list ::= signed_literal", - /* 289 */ "literal_list ::= literal_list NK_COMMA signed_literal", - /* 290 */ "db_name ::= NK_ID", - /* 291 */ "table_name ::= NK_ID", - /* 292 */ "column_name ::= NK_ID", - /* 293 */ "function_name ::= NK_ID", - /* 294 */ "table_alias ::= NK_ID", - /* 295 */ "column_alias ::= NK_ID", - /* 296 */ "user_name ::= NK_ID", - /* 297 */ "index_name ::= NK_ID", - /* 298 */ "topic_name ::= NK_ID", - /* 299 */ "stream_name ::= NK_ID", - /* 300 */ "cgroup_name ::= NK_ID", - /* 301 */ "expression ::= literal", - /* 302 */ "expression ::= pseudo_column", - /* 303 */ "expression ::= column_reference", - /* 304 */ "expression ::= function_expression", - /* 305 */ "expression ::= subquery", - /* 306 */ "expression ::= NK_LP expression NK_RP", - /* 307 */ "expression ::= NK_PLUS expression", - /* 308 */ "expression ::= NK_MINUS expression", - /* 309 */ "expression ::= expression NK_PLUS expression", - /* 310 */ "expression ::= expression NK_MINUS expression", - /* 311 */ "expression ::= expression NK_STAR expression", - /* 312 */ "expression ::= expression NK_SLASH expression", - /* 313 */ "expression ::= expression NK_REM expression", - /* 314 */ "expression ::= column_reference NK_ARROW NK_STRING", - /* 315 */ "expression_list ::= expression", - /* 316 */ "expression_list ::= expression_list NK_COMMA expression", - /* 317 */ "column_reference ::= column_name", - /* 318 */ "column_reference ::= table_name NK_DOT column_name", - /* 319 */ "pseudo_column ::= ROWTS", - /* 320 */ "pseudo_column ::= TBNAME", - /* 321 */ "pseudo_column ::= table_name NK_DOT TBNAME", - /* 322 */ "pseudo_column ::= QSTARTTS", - /* 323 */ "pseudo_column ::= QENDTS", - /* 324 */ "pseudo_column ::= WSTARTTS", - /* 325 */ "pseudo_column ::= WENDTS", - /* 326 */ "pseudo_column ::= WDURATION", - /* 327 */ "function_expression ::= function_name NK_LP expression_list NK_RP", - /* 328 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", - /* 329 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP", - /* 330 */ "function_expression ::= literal_func", - /* 331 */ "literal_func ::= noarg_func NK_LP NK_RP", - /* 332 */ "literal_func ::= NOW", - /* 333 */ "noarg_func ::= NOW", - /* 334 */ "noarg_func ::= TODAY", - /* 335 */ "noarg_func ::= TIMEZONE", - /* 336 */ "star_func ::= COUNT", - /* 337 */ "star_func ::= FIRST", - /* 338 */ "star_func ::= LAST", - /* 339 */ "star_func ::= LAST_ROW", - /* 340 */ "star_func_para_list ::= NK_STAR", - /* 341 */ "star_func_para_list ::= other_para_list", - /* 342 */ "other_para_list ::= star_func_para", - /* 343 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", - /* 344 */ "star_func_para ::= expression", - /* 345 */ "star_func_para ::= table_name NK_DOT NK_STAR", - /* 346 */ "predicate ::= expression compare_op expression", - /* 347 */ "predicate ::= expression BETWEEN expression AND expression", - /* 348 */ "predicate ::= expression NOT BETWEEN expression AND expression", - /* 349 */ "predicate ::= expression IS NULL", - /* 350 */ "predicate ::= expression IS NOT NULL", - /* 351 */ "predicate ::= expression in_op in_predicate_value", - /* 352 */ "compare_op ::= NK_LT", - /* 353 */ "compare_op ::= NK_GT", - /* 354 */ "compare_op ::= NK_LE", - /* 355 */ "compare_op ::= NK_GE", - /* 356 */ "compare_op ::= NK_NE", - /* 357 */ "compare_op ::= NK_EQ", - /* 358 */ "compare_op ::= LIKE", - /* 359 */ "compare_op ::= NOT LIKE", - /* 360 */ "compare_op ::= MATCH", - /* 361 */ "compare_op ::= NMATCH", - /* 362 */ "compare_op ::= CONTAINS", - /* 363 */ "in_op ::= IN", - /* 364 */ "in_op ::= NOT IN", - /* 365 */ "in_predicate_value ::= NK_LP expression_list NK_RP", - /* 366 */ "boolean_value_expression ::= boolean_primary", - /* 367 */ "boolean_value_expression ::= NOT boolean_primary", - /* 368 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", - /* 369 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", - /* 370 */ "boolean_primary ::= predicate", - /* 371 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", - /* 372 */ "common_expression ::= expression", - /* 373 */ "common_expression ::= boolean_value_expression", - /* 374 */ "from_clause ::= FROM table_reference_list", - /* 375 */ "table_reference_list ::= table_reference", - /* 376 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", - /* 377 */ "table_reference ::= table_primary", - /* 378 */ "table_reference ::= joined_table", - /* 379 */ "table_primary ::= table_name alias_opt", - /* 380 */ "table_primary ::= db_name NK_DOT table_name alias_opt", - /* 381 */ "table_primary ::= subquery alias_opt", - /* 382 */ "table_primary ::= parenthesized_joined_table", - /* 383 */ "alias_opt ::=", - /* 384 */ "alias_opt ::= table_alias", - /* 385 */ "alias_opt ::= AS table_alias", - /* 386 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", - /* 387 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", - /* 388 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition", - /* 389 */ "join_type ::=", - /* 390 */ "join_type ::= INNER", - /* 391 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt", - /* 392 */ "set_quantifier_opt ::=", - /* 393 */ "set_quantifier_opt ::= DISTINCT", - /* 394 */ "set_quantifier_opt ::= ALL", - /* 395 */ "select_list ::= NK_STAR", - /* 396 */ "select_list ::= select_sublist", - /* 397 */ "select_sublist ::= select_item", - /* 398 */ "select_sublist ::= select_sublist NK_COMMA select_item", - /* 399 */ "select_item ::= common_expression", - /* 400 */ "select_item ::= common_expression column_alias", - /* 401 */ "select_item ::= common_expression AS column_alias", - /* 402 */ "select_item ::= table_name NK_DOT NK_STAR", - /* 403 */ "where_clause_opt ::=", - /* 404 */ "where_clause_opt ::= WHERE search_condition", - /* 405 */ "partition_by_clause_opt ::=", - /* 406 */ "partition_by_clause_opt ::= PARTITION BY expression_list", - /* 407 */ "twindow_clause_opt ::=", - /* 408 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP", - /* 409 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP", - /* 410 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt", - /* 411 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt", - /* 412 */ "sliding_opt ::=", - /* 413 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP", - /* 414 */ "fill_opt ::=", - /* 415 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", - /* 416 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP", - /* 417 */ "fill_mode ::= NONE", - /* 418 */ "fill_mode ::= PREV", - /* 419 */ "fill_mode ::= NULL", - /* 420 */ "fill_mode ::= LINEAR", - /* 421 */ "fill_mode ::= NEXT", - /* 422 */ "group_by_clause_opt ::=", - /* 423 */ "group_by_clause_opt ::= GROUP BY group_by_list", - /* 424 */ "group_by_list ::= expression", - /* 425 */ "group_by_list ::= group_by_list NK_COMMA expression", - /* 426 */ "having_clause_opt ::=", - /* 427 */ "having_clause_opt ::= HAVING search_condition", - /* 428 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt", - /* 429 */ "query_expression_body ::= query_primary", - /* 430 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body", - /* 431 */ "query_expression_body ::= query_expression_body UNION query_expression_body", - /* 432 */ "query_primary ::= query_specification", - /* 433 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP", - /* 434 */ "order_by_clause_opt ::=", - /* 435 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", - /* 436 */ "slimit_clause_opt ::=", - /* 437 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", - /* 438 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", - /* 439 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 440 */ "limit_clause_opt ::=", - /* 441 */ "limit_clause_opt ::= LIMIT NK_INTEGER", - /* 442 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", - /* 443 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 444 */ "subquery ::= NK_LP query_expression NK_RP", - /* 445 */ "search_condition ::= common_expression", - /* 446 */ "sort_specification_list ::= sort_specification", - /* 447 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", - /* 448 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt", - /* 449 */ "ordering_specification_opt ::=", - /* 450 */ "ordering_specification_opt ::= ASC", - /* 451 */ "ordering_specification_opt ::= DESC", - /* 452 */ "null_ordering_opt ::=", - /* 453 */ "null_ordering_opt ::= NULLS FIRST", - /* 454 */ "null_ordering_opt ::= NULLS LAST", + /* 227 */ "cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name", + /* 228 */ "cmd ::= DESC full_table_name", + /* 229 */ "cmd ::= DESCRIBE full_table_name", + /* 230 */ "cmd ::= RESET QUERY CACHE", + /* 231 */ "cmd ::= EXPLAIN analyze_opt explain_options query_expression", + /* 232 */ "analyze_opt ::=", + /* 233 */ "analyze_opt ::= ANALYZE", + /* 234 */ "explain_options ::=", + /* 235 */ "explain_options ::= explain_options VERBOSE NK_BOOL", + /* 236 */ "explain_options ::= explain_options RATIO NK_FLOAT", + /* 237 */ "cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP", + /* 238 */ "cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt", + /* 239 */ "cmd ::= DROP FUNCTION exists_opt function_name", + /* 240 */ "agg_func_opt ::=", + /* 241 */ "agg_func_opt ::= AGGREGATE", + /* 242 */ "bufsize_opt ::=", + /* 243 */ "bufsize_opt ::= BUFSIZE NK_INTEGER", + /* 244 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression", + /* 245 */ "cmd ::= DROP STREAM exists_opt stream_name", + /* 246 */ "into_opt ::=", + /* 247 */ "into_opt ::= INTO full_table_name", + /* 248 */ "stream_options ::=", + /* 249 */ "stream_options ::= stream_options TRIGGER AT_ONCE", + /* 250 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE", + /* 251 */ "stream_options ::= stream_options WATERMARK duration_literal", + /* 252 */ "cmd ::= KILL CONNECTION NK_INTEGER", + /* 253 */ "cmd ::= KILL QUERY NK_INTEGER", + /* 254 */ "cmd ::= KILL TRANSACTION NK_INTEGER", + /* 255 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", + /* 256 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", + /* 257 */ "cmd ::= SPLIT VGROUP NK_INTEGER", + /* 258 */ "dnode_list ::= DNODE NK_INTEGER", + /* 259 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", + /* 260 */ "cmd ::= SYNCDB db_name REPLICA", + /* 261 */ "cmd ::= query_expression", + /* 262 */ "literal ::= NK_INTEGER", + /* 263 */ "literal ::= NK_FLOAT", + /* 264 */ "literal ::= NK_STRING", + /* 265 */ "literal ::= NK_BOOL", + /* 266 */ "literal ::= TIMESTAMP NK_STRING", + /* 267 */ "literal ::= duration_literal", + /* 268 */ "literal ::= NULL", + /* 269 */ "literal ::= NK_QUESTION", + /* 270 */ "duration_literal ::= NK_VARIABLE", + /* 271 */ "signed ::= NK_INTEGER", + /* 272 */ "signed ::= NK_PLUS NK_INTEGER", + /* 273 */ "signed ::= NK_MINUS NK_INTEGER", + /* 274 */ "signed ::= NK_FLOAT", + /* 275 */ "signed ::= NK_PLUS NK_FLOAT", + /* 276 */ "signed ::= NK_MINUS NK_FLOAT", + /* 277 */ "signed_literal ::= signed", + /* 278 */ "signed_literal ::= NK_STRING", + /* 279 */ "signed_literal ::= NK_BOOL", + /* 280 */ "signed_literal ::= TIMESTAMP NK_STRING", + /* 281 */ "signed_literal ::= duration_literal", + /* 282 */ "signed_literal ::= NULL", + /* 283 */ "signed_literal ::= literal_func", + /* 284 */ "literal_list ::= signed_literal", + /* 285 */ "literal_list ::= literal_list NK_COMMA signed_literal", + /* 286 */ "db_name ::= NK_ID", + /* 287 */ "table_name ::= NK_ID", + /* 288 */ "column_name ::= NK_ID", + /* 289 */ "function_name ::= NK_ID", + /* 290 */ "table_alias ::= NK_ID", + /* 291 */ "column_alias ::= NK_ID", + /* 292 */ "user_name ::= NK_ID", + /* 293 */ "index_name ::= NK_ID", + /* 294 */ "topic_name ::= NK_ID", + /* 295 */ "stream_name ::= NK_ID", + /* 296 */ "cgroup_name ::= NK_ID", + /* 297 */ "expression ::= literal", + /* 298 */ "expression ::= pseudo_column", + /* 299 */ "expression ::= column_reference", + /* 300 */ "expression ::= function_expression", + /* 301 */ "expression ::= subquery", + /* 302 */ "expression ::= NK_LP expression NK_RP", + /* 303 */ "expression ::= NK_PLUS expression", + /* 304 */ "expression ::= NK_MINUS expression", + /* 305 */ "expression ::= expression NK_PLUS expression", + /* 306 */ "expression ::= expression NK_MINUS expression", + /* 307 */ "expression ::= expression NK_STAR expression", + /* 308 */ "expression ::= expression NK_SLASH expression", + /* 309 */ "expression ::= expression NK_REM expression", + /* 310 */ "expression ::= column_reference NK_ARROW NK_STRING", + /* 311 */ "expression_list ::= expression", + /* 312 */ "expression_list ::= expression_list NK_COMMA expression", + /* 313 */ "column_reference ::= column_name", + /* 314 */ "column_reference ::= table_name NK_DOT column_name", + /* 315 */ "pseudo_column ::= ROWTS", + /* 316 */ "pseudo_column ::= TBNAME", + /* 317 */ "pseudo_column ::= table_name NK_DOT TBNAME", + /* 318 */ "pseudo_column ::= QSTARTTS", + /* 319 */ "pseudo_column ::= QENDTS", + /* 320 */ "pseudo_column ::= WSTARTTS", + /* 321 */ "pseudo_column ::= WENDTS", + /* 322 */ "pseudo_column ::= WDURATION", + /* 323 */ "function_expression ::= function_name NK_LP expression_list NK_RP", + /* 324 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", + /* 325 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP", + /* 326 */ "function_expression ::= literal_func", + /* 327 */ "literal_func ::= noarg_func NK_LP NK_RP", + /* 328 */ "literal_func ::= NOW", + /* 329 */ "noarg_func ::= NOW", + /* 330 */ "noarg_func ::= TODAY", + /* 331 */ "noarg_func ::= TIMEZONE", + /* 332 */ "star_func ::= COUNT", + /* 333 */ "star_func ::= FIRST", + /* 334 */ "star_func ::= LAST", + /* 335 */ "star_func ::= LAST_ROW", + /* 336 */ "star_func_para_list ::= NK_STAR", + /* 337 */ "star_func_para_list ::= other_para_list", + /* 338 */ "other_para_list ::= star_func_para", + /* 339 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", + /* 340 */ "star_func_para ::= expression", + /* 341 */ "star_func_para ::= table_name NK_DOT NK_STAR", + /* 342 */ "predicate ::= expression compare_op expression", + /* 343 */ "predicate ::= expression BETWEEN expression AND expression", + /* 344 */ "predicate ::= expression NOT BETWEEN expression AND expression", + /* 345 */ "predicate ::= expression IS NULL", + /* 346 */ "predicate ::= expression IS NOT NULL", + /* 347 */ "predicate ::= expression in_op in_predicate_value", + /* 348 */ "compare_op ::= NK_LT", + /* 349 */ "compare_op ::= NK_GT", + /* 350 */ "compare_op ::= NK_LE", + /* 351 */ "compare_op ::= NK_GE", + /* 352 */ "compare_op ::= NK_NE", + /* 353 */ "compare_op ::= NK_EQ", + /* 354 */ "compare_op ::= LIKE", + /* 355 */ "compare_op ::= NOT LIKE", + /* 356 */ "compare_op ::= MATCH", + /* 357 */ "compare_op ::= NMATCH", + /* 358 */ "compare_op ::= CONTAINS", + /* 359 */ "in_op ::= IN", + /* 360 */ "in_op ::= NOT IN", + /* 361 */ "in_predicate_value ::= NK_LP expression_list NK_RP", + /* 362 */ "boolean_value_expression ::= boolean_primary", + /* 363 */ "boolean_value_expression ::= NOT boolean_primary", + /* 364 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", + /* 365 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", + /* 366 */ "boolean_primary ::= predicate", + /* 367 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", + /* 368 */ "common_expression ::= expression", + /* 369 */ "common_expression ::= boolean_value_expression", + /* 370 */ "from_clause ::= FROM table_reference_list", + /* 371 */ "table_reference_list ::= table_reference", + /* 372 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", + /* 373 */ "table_reference ::= table_primary", + /* 374 */ "table_reference ::= joined_table", + /* 375 */ "table_primary ::= table_name alias_opt", + /* 376 */ "table_primary ::= db_name NK_DOT table_name alias_opt", + /* 377 */ "table_primary ::= subquery alias_opt", + /* 378 */ "table_primary ::= parenthesized_joined_table", + /* 379 */ "alias_opt ::=", + /* 380 */ "alias_opt ::= table_alias", + /* 381 */ "alias_opt ::= AS table_alias", + /* 382 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", + /* 383 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", + /* 384 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition", + /* 385 */ "join_type ::=", + /* 386 */ "join_type ::= INNER", + /* 387 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt", + /* 388 */ "set_quantifier_opt ::=", + /* 389 */ "set_quantifier_opt ::= DISTINCT", + /* 390 */ "set_quantifier_opt ::= ALL", + /* 391 */ "select_list ::= NK_STAR", + /* 392 */ "select_list ::= select_sublist", + /* 393 */ "select_sublist ::= select_item", + /* 394 */ "select_sublist ::= select_sublist NK_COMMA select_item", + /* 395 */ "select_item ::= common_expression", + /* 396 */ "select_item ::= common_expression column_alias", + /* 397 */ "select_item ::= common_expression AS column_alias", + /* 398 */ "select_item ::= table_name NK_DOT NK_STAR", + /* 399 */ "where_clause_opt ::=", + /* 400 */ "where_clause_opt ::= WHERE search_condition", + /* 401 */ "partition_by_clause_opt ::=", + /* 402 */ "partition_by_clause_opt ::= PARTITION BY expression_list", + /* 403 */ "twindow_clause_opt ::=", + /* 404 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP", + /* 405 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP", + /* 406 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt", + /* 407 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt", + /* 408 */ "sliding_opt ::=", + /* 409 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP", + /* 410 */ "fill_opt ::=", + /* 411 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", + /* 412 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP", + /* 413 */ "fill_mode ::= NONE", + /* 414 */ "fill_mode ::= PREV", + /* 415 */ "fill_mode ::= NULL", + /* 416 */ "fill_mode ::= LINEAR", + /* 417 */ "fill_mode ::= NEXT", + /* 418 */ "group_by_clause_opt ::=", + /* 419 */ "group_by_clause_opt ::= GROUP BY group_by_list", + /* 420 */ "group_by_list ::= expression", + /* 421 */ "group_by_list ::= group_by_list NK_COMMA expression", + /* 422 */ "having_clause_opt ::=", + /* 423 */ "having_clause_opt ::= HAVING search_condition", + /* 424 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt", + /* 425 */ "query_expression_body ::= query_primary", + /* 426 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body", + /* 427 */ "query_expression_body ::= query_expression_body UNION query_expression_body", + /* 428 */ "query_primary ::= query_specification", + /* 429 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP", + /* 430 */ "order_by_clause_opt ::=", + /* 431 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", + /* 432 */ "slimit_clause_opt ::=", + /* 433 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", + /* 434 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", + /* 435 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 436 */ "limit_clause_opt ::=", + /* 437 */ "limit_clause_opt ::= LIMIT NK_INTEGER", + /* 438 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", + /* 439 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 440 */ "subquery ::= NK_LP query_expression NK_RP", + /* 441 */ "search_condition ::= common_expression", + /* 442 */ "sort_specification_list ::= sort_specification", + /* 443 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", + /* 444 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt", + /* 445 */ "ordering_specification_opt ::=", + /* 446 */ "ordering_specification_opt ::= ASC", + /* 447 */ "ordering_specification_opt ::= DESC", + /* 448 */ "null_ordering_opt ::=", + /* 449 */ "null_ordering_opt ::= NULLS FIRST", + /* 450 */ "null_ordering_opt ::= NULLS LAST", }; #endif /* NDEBUG */ @@ -2104,175 +2087,174 @@ static void yy_destructor( */ /********* Begin destructor definitions ***************************************/ /* Default NON-TERMINAL Destructor */ - case 240: /* cmd */ - case 243: /* literal */ - case 254: /* db_options */ - case 256: /* alter_db_options */ - case 261: /* retention */ - case 262: /* full_table_name */ - case 265: /* table_options */ - case 269: /* alter_table_clause */ - case 270: /* alter_table_options */ - case 273: /* signed_literal */ - case 274: /* create_subtable_clause */ - case 277: /* drop_table_clause */ - case 280: /* column_def */ - case 283: /* col_name */ - case 284: /* db_name_cond_opt */ - case 285: /* like_pattern_opt */ - case 286: /* table_name_cond */ - case 287: /* from_db_opt */ - case 288: /* func_name */ - case 291: /* index_options */ - case 293: /* duration_literal */ - case 294: /* sliding_opt */ - case 295: /* func */ - case 298: /* topic_options */ - case 299: /* query_expression */ - case 302: /* explain_options */ - case 306: /* stream_options */ - case 307: /* into_opt */ - case 309: /* signed */ - case 310: /* literal_func */ - case 313: /* expression */ - case 314: /* pseudo_column */ - case 315: /* column_reference */ - case 316: /* function_expression */ - case 317: /* subquery */ - case 322: /* star_func_para */ - case 323: /* predicate */ - case 326: /* in_predicate_value */ - case 327: /* boolean_value_expression */ - case 328: /* boolean_primary */ - case 329: /* common_expression */ - case 330: /* from_clause */ - case 331: /* table_reference_list */ - case 332: /* table_reference */ - case 333: /* table_primary */ - case 334: /* joined_table */ - case 336: /* parenthesized_joined_table */ - case 338: /* search_condition */ - case 339: /* query_specification */ - case 342: /* where_clause_opt */ - case 344: /* twindow_clause_opt */ - case 346: /* having_clause_opt */ - case 348: /* select_item */ - case 349: /* fill_opt */ - case 352: /* query_expression_body */ - case 354: /* slimit_clause_opt */ - case 355: /* limit_clause_opt */ - case 356: /* query_primary */ - case 358: /* sort_specification */ + case 237: /* cmd */ + case 240: /* literal */ + case 251: /* db_options */ + case 253: /* alter_db_options */ + case 258: /* retention */ + case 259: /* full_table_name */ + case 262: /* table_options */ + case 266: /* alter_table_clause */ + case 267: /* alter_table_options */ + case 270: /* signed_literal */ + case 271: /* create_subtable_clause */ + case 274: /* drop_table_clause */ + case 277: /* column_def */ + case 280: /* col_name */ + case 281: /* db_name_cond_opt */ + case 282: /* like_pattern_opt */ + case 283: /* table_name_cond */ + case 284: /* from_db_opt */ + case 285: /* func_name */ + case 288: /* index_options */ + case 290: /* duration_literal */ + case 291: /* sliding_opt */ + case 292: /* func */ + case 295: /* query_expression */ + case 298: /* explain_options */ + case 302: /* stream_options */ + case 303: /* into_opt */ + case 305: /* signed */ + case 306: /* literal_func */ + case 309: /* expression */ + case 310: /* pseudo_column */ + case 311: /* column_reference */ + case 312: /* function_expression */ + case 313: /* subquery */ + case 318: /* star_func_para */ + case 319: /* predicate */ + case 322: /* in_predicate_value */ + case 323: /* boolean_value_expression */ + case 324: /* boolean_primary */ + case 325: /* common_expression */ + case 326: /* from_clause */ + case 327: /* table_reference_list */ + case 328: /* table_reference */ + case 329: /* table_primary */ + case 330: /* joined_table */ + case 332: /* parenthesized_joined_table */ + case 334: /* search_condition */ + case 335: /* query_specification */ + case 338: /* where_clause_opt */ + case 340: /* twindow_clause_opt */ + case 342: /* having_clause_opt */ + case 344: /* select_item */ + case 345: /* fill_opt */ + case 348: /* query_expression_body */ + case 350: /* slimit_clause_opt */ + case 351: /* limit_clause_opt */ + case 352: /* query_primary */ + case 354: /* sort_specification */ { - nodesDestroyNode((yypminor->yy636)); + nodesDestroyNode((yypminor->yy686)); } break; - case 241: /* account_options */ - case 242: /* alter_account_options */ - case 244: /* alter_account_option */ - case 304: /* bufsize_opt */ + case 238: /* account_options */ + case 239: /* alter_account_options */ + case 241: /* alter_account_option */ + case 300: /* bufsize_opt */ { } break; - case 245: /* user_name */ - case 247: /* priv_level */ - case 250: /* db_name */ - case 251: /* dnode_endpoint */ - case 252: /* dnode_host_name */ - case 271: /* column_name */ - case 279: /* table_name */ - case 289: /* function_name */ - case 290: /* index_name */ - case 297: /* topic_name */ - case 300: /* cgroup_name */ - case 305: /* stream_name */ - case 311: /* table_alias */ - case 312: /* column_alias */ - case 318: /* star_func */ - case 320: /* noarg_func */ - case 335: /* alias_opt */ + case 242: /* user_name */ + case 244: /* priv_level */ + case 247: /* db_name */ + case 248: /* dnode_endpoint */ + case 249: /* dnode_host_name */ + case 268: /* column_name */ + case 276: /* table_name */ + case 286: /* function_name */ + case 287: /* index_name */ + case 294: /* topic_name */ + case 296: /* cgroup_name */ + case 301: /* stream_name */ + case 307: /* table_alias */ + case 308: /* column_alias */ + case 314: /* star_func */ + case 316: /* noarg_func */ + case 331: /* alias_opt */ { } break; - case 246: /* privileges */ - case 248: /* priv_type_list */ - case 249: /* priv_type */ + case 243: /* privileges */ + case 245: /* priv_type_list */ + case 246: /* priv_type */ { } break; - case 253: /* not_exists_opt */ - case 255: /* exists_opt */ - case 301: /* analyze_opt */ - case 303: /* agg_func_opt */ - case 340: /* set_quantifier_opt */ + case 250: /* not_exists_opt */ + case 252: /* exists_opt */ + case 297: /* analyze_opt */ + case 299: /* agg_func_opt */ + case 336: /* set_quantifier_opt */ { } break; - case 257: /* integer_list */ - case 258: /* variable_list */ - case 259: /* retention_list */ - case 263: /* column_def_list */ - case 264: /* tags_def_opt */ - case 266: /* multi_create_clause */ - case 267: /* tags_def */ - case 268: /* multi_drop_clause */ - case 275: /* specific_tags_opt */ - case 276: /* literal_list */ - case 278: /* col_name_list */ - case 281: /* func_name_list */ - case 292: /* func_list */ - case 296: /* expression_list */ - case 308: /* dnode_list */ - case 319: /* star_func_para_list */ - case 321: /* other_para_list */ - case 341: /* select_list */ - case 343: /* partition_by_clause_opt */ - case 345: /* group_by_clause_opt */ - case 347: /* select_sublist */ - case 351: /* group_by_list */ - case 353: /* order_by_clause_opt */ - case 357: /* sort_specification_list */ + case 254: /* integer_list */ + case 255: /* variable_list */ + case 256: /* retention_list */ + case 260: /* column_def_list */ + case 261: /* tags_def_opt */ + case 263: /* multi_create_clause */ + case 264: /* tags_def */ + case 265: /* multi_drop_clause */ + case 272: /* specific_tags_opt */ + case 273: /* literal_list */ + case 275: /* col_name_list */ + case 278: /* func_name_list */ + case 289: /* func_list */ + case 293: /* expression_list */ + case 304: /* dnode_list */ + case 315: /* star_func_para_list */ + case 317: /* other_para_list */ + case 337: /* select_list */ + case 339: /* partition_by_clause_opt */ + case 341: /* group_by_clause_opt */ + case 343: /* select_sublist */ + case 347: /* group_by_list */ + case 349: /* order_by_clause_opt */ + case 353: /* sort_specification_list */ { - nodesDestroyList((yypminor->yy236)); + nodesDestroyList((yypminor->yy670)); } break; - case 260: /* alter_db_option */ - case 282: /* alter_table_option */ + case 257: /* alter_db_option */ + case 279: /* alter_table_option */ { } break; - case 272: /* type_name */ + case 269: /* type_name */ { } break; - case 324: /* compare_op */ - case 325: /* in_op */ + case 320: /* compare_op */ + case 321: /* in_op */ { } break; - case 337: /* join_type */ + case 333: /* join_type */ { } break; - case 350: /* fill_mode */ + case 346: /* fill_mode */ { } break; - case 359: /* ordering_specification_opt */ + case 355: /* ordering_specification_opt */ { } break; - case 360: /* null_ordering_opt */ + case 356: /* null_ordering_opt */ { } @@ -2571,461 +2553,457 @@ static const struct { YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ signed char nrhs; /* Negative of the number of RHS symbols in the rule */ } yyRuleInfo[] = { - { 240, -6 }, /* (0) cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */ - { 240, -4 }, /* (1) cmd ::= ALTER ACCOUNT NK_ID alter_account_options */ - { 241, 0 }, /* (2) account_options ::= */ - { 241, -3 }, /* (3) account_options ::= account_options PPS literal */ - { 241, -3 }, /* (4) account_options ::= account_options TSERIES literal */ - { 241, -3 }, /* (5) account_options ::= account_options STORAGE literal */ - { 241, -3 }, /* (6) account_options ::= account_options STREAMS literal */ - { 241, -3 }, /* (7) account_options ::= account_options QTIME literal */ - { 241, -3 }, /* (8) account_options ::= account_options DBS literal */ - { 241, -3 }, /* (9) account_options ::= account_options USERS literal */ - { 241, -3 }, /* (10) account_options ::= account_options CONNS literal */ - { 241, -3 }, /* (11) account_options ::= account_options STATE literal */ - { 242, -1 }, /* (12) alter_account_options ::= alter_account_option */ - { 242, -2 }, /* (13) alter_account_options ::= alter_account_options alter_account_option */ - { 244, -2 }, /* (14) alter_account_option ::= PASS literal */ - { 244, -2 }, /* (15) alter_account_option ::= PPS literal */ - { 244, -2 }, /* (16) alter_account_option ::= TSERIES literal */ - { 244, -2 }, /* (17) alter_account_option ::= STORAGE literal */ - { 244, -2 }, /* (18) alter_account_option ::= STREAMS literal */ - { 244, -2 }, /* (19) alter_account_option ::= QTIME literal */ - { 244, -2 }, /* (20) alter_account_option ::= DBS literal */ - { 244, -2 }, /* (21) alter_account_option ::= USERS literal */ - { 244, -2 }, /* (22) alter_account_option ::= CONNS literal */ - { 244, -2 }, /* (23) alter_account_option ::= STATE literal */ - { 240, -5 }, /* (24) cmd ::= CREATE USER user_name PASS NK_STRING */ - { 240, -5 }, /* (25) cmd ::= ALTER USER user_name PASS NK_STRING */ - { 240, -5 }, /* (26) cmd ::= ALTER USER user_name PRIVILEGE NK_STRING */ - { 240, -3 }, /* (27) cmd ::= DROP USER user_name */ - { 240, -6 }, /* (28) cmd ::= GRANT privileges ON priv_level TO user_name */ - { 240, -6 }, /* (29) cmd ::= REVOKE privileges ON priv_level FROM user_name */ - { 246, -1 }, /* (30) privileges ::= ALL */ - { 246, -1 }, /* (31) privileges ::= priv_type_list */ - { 248, -1 }, /* (32) priv_type_list ::= priv_type */ - { 248, -3 }, /* (33) priv_type_list ::= priv_type_list NK_COMMA priv_type */ - { 249, -1 }, /* (34) priv_type ::= READ */ - { 249, -1 }, /* (35) priv_type ::= WRITE */ - { 247, -3 }, /* (36) priv_level ::= NK_STAR NK_DOT NK_STAR */ - { 247, -3 }, /* (37) priv_level ::= db_name NK_DOT NK_STAR */ - { 240, -3 }, /* (38) cmd ::= CREATE DNODE dnode_endpoint */ - { 240, -5 }, /* (39) cmd ::= CREATE DNODE dnode_host_name PORT NK_INTEGER */ - { 240, -3 }, /* (40) cmd ::= DROP DNODE NK_INTEGER */ - { 240, -3 }, /* (41) cmd ::= DROP DNODE dnode_endpoint */ - { 240, -4 }, /* (42) cmd ::= ALTER DNODE NK_INTEGER NK_STRING */ - { 240, -5 }, /* (43) cmd ::= ALTER DNODE NK_INTEGER NK_STRING NK_STRING */ - { 240, -4 }, /* (44) cmd ::= ALTER ALL DNODES NK_STRING */ - { 240, -5 }, /* (45) cmd ::= ALTER ALL DNODES NK_STRING NK_STRING */ - { 251, -1 }, /* (46) dnode_endpoint ::= NK_STRING */ - { 252, -1 }, /* (47) dnode_host_name ::= NK_ID */ - { 252, -1 }, /* (48) dnode_host_name ::= NK_IPTOKEN */ - { 240, -3 }, /* (49) cmd ::= ALTER LOCAL NK_STRING */ - { 240, -4 }, /* (50) cmd ::= ALTER LOCAL NK_STRING NK_STRING */ - { 240, -5 }, /* (51) cmd ::= CREATE QNODE ON DNODE NK_INTEGER */ - { 240, -5 }, /* (52) cmd ::= DROP QNODE ON DNODE NK_INTEGER */ - { 240, -5 }, /* (53) cmd ::= CREATE BNODE ON DNODE NK_INTEGER */ - { 240, -5 }, /* (54) cmd ::= DROP BNODE ON DNODE NK_INTEGER */ - { 240, -5 }, /* (55) cmd ::= CREATE SNODE ON DNODE NK_INTEGER */ - { 240, -5 }, /* (56) cmd ::= DROP SNODE ON DNODE NK_INTEGER */ - { 240, -5 }, /* (57) cmd ::= CREATE MNODE ON DNODE NK_INTEGER */ - { 240, -5 }, /* (58) cmd ::= DROP MNODE ON DNODE NK_INTEGER */ - { 240, -5 }, /* (59) cmd ::= CREATE DATABASE not_exists_opt db_name db_options */ - { 240, -4 }, /* (60) cmd ::= DROP DATABASE exists_opt db_name */ - { 240, -2 }, /* (61) cmd ::= USE db_name */ - { 240, -4 }, /* (62) cmd ::= ALTER DATABASE db_name alter_db_options */ - { 253, -3 }, /* (63) not_exists_opt ::= IF NOT EXISTS */ - { 253, 0 }, /* (64) not_exists_opt ::= */ - { 255, -2 }, /* (65) exists_opt ::= IF EXISTS */ - { 255, 0 }, /* (66) exists_opt ::= */ - { 254, 0 }, /* (67) db_options ::= */ - { 254, -3 }, /* (68) db_options ::= db_options BUFFER NK_INTEGER */ - { 254, -3 }, /* (69) db_options ::= db_options CACHELAST NK_INTEGER */ - { 254, -3 }, /* (70) db_options ::= db_options COMP NK_INTEGER */ - { 254, -3 }, /* (71) db_options ::= db_options DAYS NK_INTEGER */ - { 254, -3 }, /* (72) db_options ::= db_options DAYS NK_VARIABLE */ - { 254, -3 }, /* (73) db_options ::= db_options FSYNC NK_INTEGER */ - { 254, -3 }, /* (74) db_options ::= db_options MAXROWS NK_INTEGER */ - { 254, -3 }, /* (75) db_options ::= db_options MINROWS NK_INTEGER */ - { 254, -3 }, /* (76) db_options ::= db_options KEEP integer_list */ - { 254, -3 }, /* (77) db_options ::= db_options KEEP variable_list */ - { 254, -3 }, /* (78) db_options ::= db_options PAGES NK_INTEGER */ - { 254, -3 }, /* (79) db_options ::= db_options PAGESIZE NK_INTEGER */ - { 254, -3 }, /* (80) db_options ::= db_options PRECISION NK_STRING */ - { 254, -3 }, /* (81) db_options ::= db_options REPLICA NK_INTEGER */ - { 254, -3 }, /* (82) db_options ::= db_options STRICT NK_INTEGER */ - { 254, -3 }, /* (83) db_options ::= db_options WAL NK_INTEGER */ - { 254, -3 }, /* (84) db_options ::= db_options VGROUPS NK_INTEGER */ - { 254, -3 }, /* (85) db_options ::= db_options SINGLE_STABLE NK_INTEGER */ - { 254, -3 }, /* (86) db_options ::= db_options RETENTIONS retention_list */ - { 254, -3 }, /* (87) db_options ::= db_options SCHEMALESS NK_INTEGER */ - { 256, -1 }, /* (88) alter_db_options ::= alter_db_option */ - { 256, -2 }, /* (89) alter_db_options ::= alter_db_options alter_db_option */ - { 260, -2 }, /* (90) alter_db_option ::= BUFFER NK_INTEGER */ - { 260, -2 }, /* (91) alter_db_option ::= CACHELAST NK_INTEGER */ - { 260, -2 }, /* (92) alter_db_option ::= FSYNC NK_INTEGER */ - { 260, -2 }, /* (93) alter_db_option ::= KEEP integer_list */ - { 260, -2 }, /* (94) alter_db_option ::= KEEP variable_list */ - { 260, -2 }, /* (95) alter_db_option ::= PAGES NK_INTEGER */ - { 260, -2 }, /* (96) alter_db_option ::= REPLICA NK_INTEGER */ - { 260, -2 }, /* (97) alter_db_option ::= STRICT NK_INTEGER */ - { 260, -2 }, /* (98) alter_db_option ::= WAL NK_INTEGER */ - { 257, -1 }, /* (99) integer_list ::= NK_INTEGER */ - { 257, -3 }, /* (100) integer_list ::= integer_list NK_COMMA NK_INTEGER */ - { 258, -1 }, /* (101) variable_list ::= NK_VARIABLE */ - { 258, -3 }, /* (102) variable_list ::= variable_list NK_COMMA NK_VARIABLE */ - { 259, -1 }, /* (103) retention_list ::= retention */ - { 259, -3 }, /* (104) retention_list ::= retention_list NK_COMMA retention */ - { 261, -3 }, /* (105) retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ - { 240, -9 }, /* (106) cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ - { 240, -3 }, /* (107) cmd ::= CREATE TABLE multi_create_clause */ - { 240, -9 }, /* (108) cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ - { 240, -3 }, /* (109) cmd ::= DROP TABLE multi_drop_clause */ - { 240, -4 }, /* (110) cmd ::= DROP STABLE exists_opt full_table_name */ - { 240, -3 }, /* (111) cmd ::= ALTER TABLE alter_table_clause */ - { 240, -3 }, /* (112) cmd ::= ALTER STABLE alter_table_clause */ - { 269, -2 }, /* (113) alter_table_clause ::= full_table_name alter_table_options */ - { 269, -5 }, /* (114) alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */ - { 269, -4 }, /* (115) alter_table_clause ::= full_table_name DROP COLUMN column_name */ - { 269, -5 }, /* (116) alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ - { 269, -5 }, /* (117) alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ - { 269, -5 }, /* (118) alter_table_clause ::= full_table_name ADD TAG column_name type_name */ - { 269, -4 }, /* (119) alter_table_clause ::= full_table_name DROP TAG column_name */ - { 269, -5 }, /* (120) alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ - { 269, -5 }, /* (121) alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ - { 269, -6 }, /* (122) alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */ - { 266, -1 }, /* (123) multi_create_clause ::= create_subtable_clause */ - { 266, -2 }, /* (124) multi_create_clause ::= multi_create_clause create_subtable_clause */ - { 274, -10 }, /* (125) create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options */ - { 268, -1 }, /* (126) multi_drop_clause ::= drop_table_clause */ - { 268, -2 }, /* (127) multi_drop_clause ::= multi_drop_clause drop_table_clause */ - { 277, -2 }, /* (128) drop_table_clause ::= exists_opt full_table_name */ - { 275, 0 }, /* (129) specific_tags_opt ::= */ - { 275, -3 }, /* (130) specific_tags_opt ::= NK_LP col_name_list NK_RP */ - { 262, -1 }, /* (131) full_table_name ::= table_name */ - { 262, -3 }, /* (132) full_table_name ::= db_name NK_DOT table_name */ - { 263, -1 }, /* (133) column_def_list ::= column_def */ - { 263, -3 }, /* (134) column_def_list ::= column_def_list NK_COMMA column_def */ - { 280, -2 }, /* (135) column_def ::= column_name type_name */ - { 280, -4 }, /* (136) column_def ::= column_name type_name COMMENT NK_STRING */ - { 272, -1 }, /* (137) type_name ::= BOOL */ - { 272, -1 }, /* (138) type_name ::= TINYINT */ - { 272, -1 }, /* (139) type_name ::= SMALLINT */ - { 272, -1 }, /* (140) type_name ::= INT */ - { 272, -1 }, /* (141) type_name ::= INTEGER */ - { 272, -1 }, /* (142) type_name ::= BIGINT */ - { 272, -1 }, /* (143) type_name ::= FLOAT */ - { 272, -1 }, /* (144) type_name ::= DOUBLE */ - { 272, -4 }, /* (145) type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ - { 272, -1 }, /* (146) type_name ::= TIMESTAMP */ - { 272, -4 }, /* (147) type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ - { 272, -2 }, /* (148) type_name ::= TINYINT UNSIGNED */ - { 272, -2 }, /* (149) type_name ::= SMALLINT UNSIGNED */ - { 272, -2 }, /* (150) type_name ::= INT UNSIGNED */ - { 272, -2 }, /* (151) type_name ::= BIGINT UNSIGNED */ - { 272, -1 }, /* (152) type_name ::= JSON */ - { 272, -4 }, /* (153) type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ - { 272, -1 }, /* (154) type_name ::= MEDIUMBLOB */ - { 272, -1 }, /* (155) type_name ::= BLOB */ - { 272, -4 }, /* (156) type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ - { 272, -1 }, /* (157) type_name ::= DECIMAL */ - { 272, -4 }, /* (158) type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ - { 272, -6 }, /* (159) type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ - { 264, 0 }, /* (160) tags_def_opt ::= */ - { 264, -1 }, /* (161) tags_def_opt ::= tags_def */ - { 267, -4 }, /* (162) tags_def ::= TAGS NK_LP column_def_list NK_RP */ - { 265, 0 }, /* (163) table_options ::= */ - { 265, -3 }, /* (164) table_options ::= table_options COMMENT NK_STRING */ - { 265, -3 }, /* (165) table_options ::= table_options DELAY NK_INTEGER */ - { 265, -3 }, /* (166) table_options ::= table_options FILE_FACTOR NK_FLOAT */ - { 265, -5 }, /* (167) table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP */ - { 265, -3 }, /* (168) table_options ::= table_options TTL NK_INTEGER */ - { 265, -5 }, /* (169) table_options ::= table_options SMA NK_LP col_name_list NK_RP */ - { 270, -1 }, /* (170) alter_table_options ::= alter_table_option */ - { 270, -2 }, /* (171) alter_table_options ::= alter_table_options alter_table_option */ - { 282, -2 }, /* (172) alter_table_option ::= COMMENT NK_STRING */ - { 282, -2 }, /* (173) alter_table_option ::= TTL NK_INTEGER */ - { 278, -1 }, /* (174) col_name_list ::= col_name */ - { 278, -3 }, /* (175) col_name_list ::= col_name_list NK_COMMA col_name */ - { 283, -1 }, /* (176) col_name ::= column_name */ - { 240, -2 }, /* (177) cmd ::= SHOW DNODES */ - { 240, -2 }, /* (178) cmd ::= SHOW USERS */ - { 240, -2 }, /* (179) cmd ::= SHOW DATABASES */ - { 240, -4 }, /* (180) cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ - { 240, -4 }, /* (181) cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ - { 240, -3 }, /* (182) cmd ::= SHOW db_name_cond_opt VGROUPS */ - { 240, -2 }, /* (183) cmd ::= SHOW MNODES */ - { 240, -2 }, /* (184) cmd ::= SHOW MODULES */ - { 240, -2 }, /* (185) cmd ::= SHOW QNODES */ - { 240, -2 }, /* (186) cmd ::= SHOW FUNCTIONS */ - { 240, -5 }, /* (187) cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ - { 240, -2 }, /* (188) cmd ::= SHOW STREAMS */ - { 240, -2 }, /* (189) cmd ::= SHOW ACCOUNTS */ - { 240, -2 }, /* (190) cmd ::= SHOW APPS */ - { 240, -2 }, /* (191) cmd ::= SHOW CONNECTIONS */ - { 240, -2 }, /* (192) cmd ::= SHOW LICENCE */ - { 240, -2 }, /* (193) cmd ::= SHOW GRANTS */ - { 240, -4 }, /* (194) cmd ::= SHOW CREATE DATABASE db_name */ - { 240, -4 }, /* (195) cmd ::= SHOW CREATE TABLE full_table_name */ - { 240, -4 }, /* (196) cmd ::= SHOW CREATE STABLE full_table_name */ - { 240, -2 }, /* (197) cmd ::= SHOW QUERIES */ - { 240, -2 }, /* (198) cmd ::= SHOW SCORES */ - { 240, -2 }, /* (199) cmd ::= SHOW TOPICS */ - { 240, -2 }, /* (200) cmd ::= SHOW VARIABLES */ - { 240, -2 }, /* (201) cmd ::= SHOW BNODES */ - { 240, -2 }, /* (202) cmd ::= SHOW SNODES */ - { 240, -2 }, /* (203) cmd ::= SHOW CLUSTER */ - { 240, -2 }, /* (204) cmd ::= SHOW TRANSACTIONS */ - { 284, 0 }, /* (205) db_name_cond_opt ::= */ - { 284, -2 }, /* (206) db_name_cond_opt ::= db_name NK_DOT */ - { 285, 0 }, /* (207) like_pattern_opt ::= */ - { 285, -2 }, /* (208) like_pattern_opt ::= LIKE NK_STRING */ - { 286, -1 }, /* (209) table_name_cond ::= table_name */ - { 287, 0 }, /* (210) from_db_opt ::= */ - { 287, -2 }, /* (211) from_db_opt ::= FROM db_name */ - { 281, -1 }, /* (212) func_name_list ::= func_name */ - { 281, -3 }, /* (213) func_name_list ::= func_name_list NK_COMMA func_name */ - { 288, -1 }, /* (214) func_name ::= function_name */ - { 240, -8 }, /* (215) cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options */ - { 240, -10 }, /* (216) cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP */ - { 240, -6 }, /* (217) cmd ::= DROP INDEX exists_opt index_name ON table_name */ - { 291, 0 }, /* (218) index_options ::= */ - { 291, -9 }, /* (219) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt */ - { 291, -11 }, /* (220) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt */ - { 292, -1 }, /* (221) func_list ::= func */ - { 292, -3 }, /* (222) func_list ::= func_list NK_COMMA func */ - { 295, -4 }, /* (223) func ::= function_name NK_LP expression_list NK_RP */ - { 240, -7 }, /* (224) cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS query_expression */ - { 240, -7 }, /* (225) cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS db_name */ - { 240, -4 }, /* (226) cmd ::= DROP TOPIC exists_opt topic_name */ - { 240, -6 }, /* (227) cmd ::= DROP CGROUP exists_opt cgroup_name ON topic_name */ - { 298, 0 }, /* (228) topic_options ::= */ - { 298, -3 }, /* (229) topic_options ::= topic_options WITH TABLE */ - { 298, -3 }, /* (230) topic_options ::= topic_options WITH SCHEMA */ - { 298, -3 }, /* (231) topic_options ::= topic_options WITH TAG */ - { 240, -2 }, /* (232) cmd ::= DESC full_table_name */ - { 240, -2 }, /* (233) cmd ::= DESCRIBE full_table_name */ - { 240, -3 }, /* (234) cmd ::= RESET QUERY CACHE */ - { 240, -4 }, /* (235) cmd ::= EXPLAIN analyze_opt explain_options query_expression */ - { 301, 0 }, /* (236) analyze_opt ::= */ - { 301, -1 }, /* (237) analyze_opt ::= ANALYZE */ - { 302, 0 }, /* (238) explain_options ::= */ - { 302, -3 }, /* (239) explain_options ::= explain_options VERBOSE NK_BOOL */ - { 302, -3 }, /* (240) explain_options ::= explain_options RATIO NK_FLOAT */ - { 240, -6 }, /* (241) cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */ - { 240, -10 }, /* (242) cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ - { 240, -4 }, /* (243) cmd ::= DROP FUNCTION exists_opt function_name */ - { 303, 0 }, /* (244) agg_func_opt ::= */ - { 303, -1 }, /* (245) agg_func_opt ::= AGGREGATE */ - { 304, 0 }, /* (246) bufsize_opt ::= */ - { 304, -2 }, /* (247) bufsize_opt ::= BUFSIZE NK_INTEGER */ - { 240, -8 }, /* (248) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */ - { 240, -4 }, /* (249) cmd ::= DROP STREAM exists_opt stream_name */ - { 307, 0 }, /* (250) into_opt ::= */ - { 307, -2 }, /* (251) into_opt ::= INTO full_table_name */ - { 306, 0 }, /* (252) stream_options ::= */ - { 306, -3 }, /* (253) stream_options ::= stream_options TRIGGER AT_ONCE */ - { 306, -3 }, /* (254) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ - { 306, -3 }, /* (255) stream_options ::= stream_options WATERMARK duration_literal */ - { 240, -3 }, /* (256) cmd ::= KILL CONNECTION NK_INTEGER */ - { 240, -3 }, /* (257) cmd ::= KILL QUERY NK_INTEGER */ - { 240, -3 }, /* (258) cmd ::= KILL TRANSACTION NK_INTEGER */ - { 240, -4 }, /* (259) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ - { 240, -4 }, /* (260) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ - { 240, -3 }, /* (261) cmd ::= SPLIT VGROUP NK_INTEGER */ - { 308, -2 }, /* (262) dnode_list ::= DNODE NK_INTEGER */ - { 308, -3 }, /* (263) dnode_list ::= dnode_list DNODE NK_INTEGER */ - { 240, -3 }, /* (264) cmd ::= SYNCDB db_name REPLICA */ - { 240, -1 }, /* (265) cmd ::= query_expression */ - { 243, -1 }, /* (266) literal ::= NK_INTEGER */ - { 243, -1 }, /* (267) literal ::= NK_FLOAT */ - { 243, -1 }, /* (268) literal ::= NK_STRING */ - { 243, -1 }, /* (269) literal ::= NK_BOOL */ - { 243, -2 }, /* (270) literal ::= TIMESTAMP NK_STRING */ - { 243, -1 }, /* (271) literal ::= duration_literal */ - { 243, -1 }, /* (272) literal ::= NULL */ - { 243, -1 }, /* (273) literal ::= NK_QUESTION */ - { 293, -1 }, /* (274) duration_literal ::= NK_VARIABLE */ - { 309, -1 }, /* (275) signed ::= NK_INTEGER */ - { 309, -2 }, /* (276) signed ::= NK_PLUS NK_INTEGER */ - { 309, -2 }, /* (277) signed ::= NK_MINUS NK_INTEGER */ - { 309, -1 }, /* (278) signed ::= NK_FLOAT */ - { 309, -2 }, /* (279) signed ::= NK_PLUS NK_FLOAT */ - { 309, -2 }, /* (280) signed ::= NK_MINUS NK_FLOAT */ - { 273, -1 }, /* (281) signed_literal ::= signed */ - { 273, -1 }, /* (282) signed_literal ::= NK_STRING */ - { 273, -1 }, /* (283) signed_literal ::= NK_BOOL */ - { 273, -2 }, /* (284) signed_literal ::= TIMESTAMP NK_STRING */ - { 273, -1 }, /* (285) signed_literal ::= duration_literal */ - { 273, -1 }, /* (286) signed_literal ::= NULL */ - { 273, -1 }, /* (287) signed_literal ::= literal_func */ - { 276, -1 }, /* (288) literal_list ::= signed_literal */ - { 276, -3 }, /* (289) literal_list ::= literal_list NK_COMMA signed_literal */ - { 250, -1 }, /* (290) db_name ::= NK_ID */ - { 279, -1 }, /* (291) table_name ::= NK_ID */ - { 271, -1 }, /* (292) column_name ::= NK_ID */ - { 289, -1 }, /* (293) function_name ::= NK_ID */ - { 311, -1 }, /* (294) table_alias ::= NK_ID */ - { 312, -1 }, /* (295) column_alias ::= NK_ID */ - { 245, -1 }, /* (296) user_name ::= NK_ID */ - { 290, -1 }, /* (297) index_name ::= NK_ID */ - { 297, -1 }, /* (298) topic_name ::= NK_ID */ - { 305, -1 }, /* (299) stream_name ::= NK_ID */ - { 300, -1 }, /* (300) cgroup_name ::= NK_ID */ - { 313, -1 }, /* (301) expression ::= literal */ - { 313, -1 }, /* (302) expression ::= pseudo_column */ - { 313, -1 }, /* (303) expression ::= column_reference */ - { 313, -1 }, /* (304) expression ::= function_expression */ - { 313, -1 }, /* (305) expression ::= subquery */ - { 313, -3 }, /* (306) expression ::= NK_LP expression NK_RP */ - { 313, -2 }, /* (307) expression ::= NK_PLUS expression */ - { 313, -2 }, /* (308) expression ::= NK_MINUS expression */ - { 313, -3 }, /* (309) expression ::= expression NK_PLUS expression */ - { 313, -3 }, /* (310) expression ::= expression NK_MINUS expression */ - { 313, -3 }, /* (311) expression ::= expression NK_STAR expression */ - { 313, -3 }, /* (312) expression ::= expression NK_SLASH expression */ - { 313, -3 }, /* (313) expression ::= expression NK_REM expression */ - { 313, -3 }, /* (314) expression ::= column_reference NK_ARROW NK_STRING */ - { 296, -1 }, /* (315) expression_list ::= expression */ - { 296, -3 }, /* (316) expression_list ::= expression_list NK_COMMA expression */ - { 315, -1 }, /* (317) column_reference ::= column_name */ - { 315, -3 }, /* (318) column_reference ::= table_name NK_DOT column_name */ - { 314, -1 }, /* (319) pseudo_column ::= ROWTS */ - { 314, -1 }, /* (320) pseudo_column ::= TBNAME */ - { 314, -3 }, /* (321) pseudo_column ::= table_name NK_DOT TBNAME */ - { 314, -1 }, /* (322) pseudo_column ::= QSTARTTS */ - { 314, -1 }, /* (323) pseudo_column ::= QENDTS */ - { 314, -1 }, /* (324) pseudo_column ::= WSTARTTS */ - { 314, -1 }, /* (325) pseudo_column ::= WENDTS */ - { 314, -1 }, /* (326) pseudo_column ::= WDURATION */ - { 316, -4 }, /* (327) function_expression ::= function_name NK_LP expression_list NK_RP */ - { 316, -4 }, /* (328) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ - { 316, -6 }, /* (329) function_expression ::= CAST NK_LP expression AS type_name NK_RP */ - { 316, -1 }, /* (330) function_expression ::= literal_func */ - { 310, -3 }, /* (331) literal_func ::= noarg_func NK_LP NK_RP */ - { 310, -1 }, /* (332) literal_func ::= NOW */ - { 320, -1 }, /* (333) noarg_func ::= NOW */ - { 320, -1 }, /* (334) noarg_func ::= TODAY */ - { 320, -1 }, /* (335) noarg_func ::= TIMEZONE */ - { 318, -1 }, /* (336) star_func ::= COUNT */ - { 318, -1 }, /* (337) star_func ::= FIRST */ - { 318, -1 }, /* (338) star_func ::= LAST */ - { 318, -1 }, /* (339) star_func ::= LAST_ROW */ - { 319, -1 }, /* (340) star_func_para_list ::= NK_STAR */ - { 319, -1 }, /* (341) star_func_para_list ::= other_para_list */ - { 321, -1 }, /* (342) other_para_list ::= star_func_para */ - { 321, -3 }, /* (343) other_para_list ::= other_para_list NK_COMMA star_func_para */ - { 322, -1 }, /* (344) star_func_para ::= expression */ - { 322, -3 }, /* (345) star_func_para ::= table_name NK_DOT NK_STAR */ - { 323, -3 }, /* (346) predicate ::= expression compare_op expression */ - { 323, -5 }, /* (347) predicate ::= expression BETWEEN expression AND expression */ - { 323, -6 }, /* (348) predicate ::= expression NOT BETWEEN expression AND expression */ - { 323, -3 }, /* (349) predicate ::= expression IS NULL */ - { 323, -4 }, /* (350) predicate ::= expression IS NOT NULL */ - { 323, -3 }, /* (351) predicate ::= expression in_op in_predicate_value */ - { 324, -1 }, /* (352) compare_op ::= NK_LT */ - { 324, -1 }, /* (353) compare_op ::= NK_GT */ - { 324, -1 }, /* (354) compare_op ::= NK_LE */ - { 324, -1 }, /* (355) compare_op ::= NK_GE */ - { 324, -1 }, /* (356) compare_op ::= NK_NE */ - { 324, -1 }, /* (357) compare_op ::= NK_EQ */ - { 324, -1 }, /* (358) compare_op ::= LIKE */ - { 324, -2 }, /* (359) compare_op ::= NOT LIKE */ - { 324, -1 }, /* (360) compare_op ::= MATCH */ - { 324, -1 }, /* (361) compare_op ::= NMATCH */ - { 324, -1 }, /* (362) compare_op ::= CONTAINS */ - { 325, -1 }, /* (363) in_op ::= IN */ - { 325, -2 }, /* (364) in_op ::= NOT IN */ - { 326, -3 }, /* (365) in_predicate_value ::= NK_LP expression_list NK_RP */ - { 327, -1 }, /* (366) boolean_value_expression ::= boolean_primary */ - { 327, -2 }, /* (367) boolean_value_expression ::= NOT boolean_primary */ - { 327, -3 }, /* (368) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ - { 327, -3 }, /* (369) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ - { 328, -1 }, /* (370) boolean_primary ::= predicate */ - { 328, -3 }, /* (371) boolean_primary ::= NK_LP boolean_value_expression NK_RP */ - { 329, -1 }, /* (372) common_expression ::= expression */ - { 329, -1 }, /* (373) common_expression ::= boolean_value_expression */ - { 330, -2 }, /* (374) from_clause ::= FROM table_reference_list */ - { 331, -1 }, /* (375) table_reference_list ::= table_reference */ - { 331, -3 }, /* (376) table_reference_list ::= table_reference_list NK_COMMA table_reference */ - { 332, -1 }, /* (377) table_reference ::= table_primary */ - { 332, -1 }, /* (378) table_reference ::= joined_table */ - { 333, -2 }, /* (379) table_primary ::= table_name alias_opt */ - { 333, -4 }, /* (380) table_primary ::= db_name NK_DOT table_name alias_opt */ - { 333, -2 }, /* (381) table_primary ::= subquery alias_opt */ - { 333, -1 }, /* (382) table_primary ::= parenthesized_joined_table */ - { 335, 0 }, /* (383) alias_opt ::= */ - { 335, -1 }, /* (384) alias_opt ::= table_alias */ - { 335, -2 }, /* (385) alias_opt ::= AS table_alias */ - { 336, -3 }, /* (386) parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - { 336, -3 }, /* (387) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ - { 334, -6 }, /* (388) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ - { 337, 0 }, /* (389) join_type ::= */ - { 337, -1 }, /* (390) join_type ::= INNER */ - { 339, -9 }, /* (391) query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ - { 340, 0 }, /* (392) set_quantifier_opt ::= */ - { 340, -1 }, /* (393) set_quantifier_opt ::= DISTINCT */ - { 340, -1 }, /* (394) set_quantifier_opt ::= ALL */ - { 341, -1 }, /* (395) select_list ::= NK_STAR */ - { 341, -1 }, /* (396) select_list ::= select_sublist */ - { 347, -1 }, /* (397) select_sublist ::= select_item */ - { 347, -3 }, /* (398) select_sublist ::= select_sublist NK_COMMA select_item */ - { 348, -1 }, /* (399) select_item ::= common_expression */ - { 348, -2 }, /* (400) select_item ::= common_expression column_alias */ - { 348, -3 }, /* (401) select_item ::= common_expression AS column_alias */ - { 348, -3 }, /* (402) select_item ::= table_name NK_DOT NK_STAR */ - { 342, 0 }, /* (403) where_clause_opt ::= */ - { 342, -2 }, /* (404) where_clause_opt ::= WHERE search_condition */ - { 343, 0 }, /* (405) partition_by_clause_opt ::= */ - { 343, -3 }, /* (406) partition_by_clause_opt ::= PARTITION BY expression_list */ - { 344, 0 }, /* (407) twindow_clause_opt ::= */ - { 344, -6 }, /* (408) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ - { 344, -4 }, /* (409) twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ - { 344, -6 }, /* (410) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ - { 344, -8 }, /* (411) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ - { 294, 0 }, /* (412) sliding_opt ::= */ - { 294, -4 }, /* (413) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ - { 349, 0 }, /* (414) fill_opt ::= */ - { 349, -4 }, /* (415) fill_opt ::= FILL NK_LP fill_mode NK_RP */ - { 349, -6 }, /* (416) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ - { 350, -1 }, /* (417) fill_mode ::= NONE */ - { 350, -1 }, /* (418) fill_mode ::= PREV */ - { 350, -1 }, /* (419) fill_mode ::= NULL */ - { 350, -1 }, /* (420) fill_mode ::= LINEAR */ - { 350, -1 }, /* (421) fill_mode ::= NEXT */ - { 345, 0 }, /* (422) group_by_clause_opt ::= */ - { 345, -3 }, /* (423) group_by_clause_opt ::= GROUP BY group_by_list */ - { 351, -1 }, /* (424) group_by_list ::= expression */ - { 351, -3 }, /* (425) group_by_list ::= group_by_list NK_COMMA expression */ - { 346, 0 }, /* (426) having_clause_opt ::= */ - { 346, -2 }, /* (427) having_clause_opt ::= HAVING search_condition */ - { 299, -4 }, /* (428) query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ - { 352, -1 }, /* (429) query_expression_body ::= query_primary */ - { 352, -4 }, /* (430) query_expression_body ::= query_expression_body UNION ALL query_expression_body */ - { 352, -3 }, /* (431) query_expression_body ::= query_expression_body UNION query_expression_body */ - { 356, -1 }, /* (432) query_primary ::= query_specification */ - { 356, -6 }, /* (433) query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ - { 353, 0 }, /* (434) order_by_clause_opt ::= */ - { 353, -3 }, /* (435) order_by_clause_opt ::= ORDER BY sort_specification_list */ - { 354, 0 }, /* (436) slimit_clause_opt ::= */ - { 354, -2 }, /* (437) slimit_clause_opt ::= SLIMIT NK_INTEGER */ - { 354, -4 }, /* (438) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - { 354, -4 }, /* (439) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - { 355, 0 }, /* (440) limit_clause_opt ::= */ - { 355, -2 }, /* (441) limit_clause_opt ::= LIMIT NK_INTEGER */ - { 355, -4 }, /* (442) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ - { 355, -4 }, /* (443) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - { 317, -3 }, /* (444) subquery ::= NK_LP query_expression NK_RP */ - { 338, -1 }, /* (445) search_condition ::= common_expression */ - { 357, -1 }, /* (446) sort_specification_list ::= sort_specification */ - { 357, -3 }, /* (447) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ - { 358, -3 }, /* (448) sort_specification ::= expression ordering_specification_opt null_ordering_opt */ - { 359, 0 }, /* (449) ordering_specification_opt ::= */ - { 359, -1 }, /* (450) ordering_specification_opt ::= ASC */ - { 359, -1 }, /* (451) ordering_specification_opt ::= DESC */ - { 360, 0 }, /* (452) null_ordering_opt ::= */ - { 360, -2 }, /* (453) null_ordering_opt ::= NULLS FIRST */ - { 360, -2 }, /* (454) null_ordering_opt ::= NULLS LAST */ + { 237, -6 }, /* (0) cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */ + { 237, -4 }, /* (1) cmd ::= ALTER ACCOUNT NK_ID alter_account_options */ + { 238, 0 }, /* (2) account_options ::= */ + { 238, -3 }, /* (3) account_options ::= account_options PPS literal */ + { 238, -3 }, /* (4) account_options ::= account_options TSERIES literal */ + { 238, -3 }, /* (5) account_options ::= account_options STORAGE literal */ + { 238, -3 }, /* (6) account_options ::= account_options STREAMS literal */ + { 238, -3 }, /* (7) account_options ::= account_options QTIME literal */ + { 238, -3 }, /* (8) account_options ::= account_options DBS literal */ + { 238, -3 }, /* (9) account_options ::= account_options USERS literal */ + { 238, -3 }, /* (10) account_options ::= account_options CONNS literal */ + { 238, -3 }, /* (11) account_options ::= account_options STATE literal */ + { 239, -1 }, /* (12) alter_account_options ::= alter_account_option */ + { 239, -2 }, /* (13) alter_account_options ::= alter_account_options alter_account_option */ + { 241, -2 }, /* (14) alter_account_option ::= PASS literal */ + { 241, -2 }, /* (15) alter_account_option ::= PPS literal */ + { 241, -2 }, /* (16) alter_account_option ::= TSERIES literal */ + { 241, -2 }, /* (17) alter_account_option ::= STORAGE literal */ + { 241, -2 }, /* (18) alter_account_option ::= STREAMS literal */ + { 241, -2 }, /* (19) alter_account_option ::= QTIME literal */ + { 241, -2 }, /* (20) alter_account_option ::= DBS literal */ + { 241, -2 }, /* (21) alter_account_option ::= USERS literal */ + { 241, -2 }, /* (22) alter_account_option ::= CONNS literal */ + { 241, -2 }, /* (23) alter_account_option ::= STATE literal */ + { 237, -5 }, /* (24) cmd ::= CREATE USER user_name PASS NK_STRING */ + { 237, -5 }, /* (25) cmd ::= ALTER USER user_name PASS NK_STRING */ + { 237, -5 }, /* (26) cmd ::= ALTER USER user_name PRIVILEGE NK_STRING */ + { 237, -3 }, /* (27) cmd ::= DROP USER user_name */ + { 237, -6 }, /* (28) cmd ::= GRANT privileges ON priv_level TO user_name */ + { 237, -6 }, /* (29) cmd ::= REVOKE privileges ON priv_level FROM user_name */ + { 243, -1 }, /* (30) privileges ::= ALL */ + { 243, -1 }, /* (31) privileges ::= priv_type_list */ + { 245, -1 }, /* (32) priv_type_list ::= priv_type */ + { 245, -3 }, /* (33) priv_type_list ::= priv_type_list NK_COMMA priv_type */ + { 246, -1 }, /* (34) priv_type ::= READ */ + { 246, -1 }, /* (35) priv_type ::= WRITE */ + { 244, -3 }, /* (36) priv_level ::= NK_STAR NK_DOT NK_STAR */ + { 244, -3 }, /* (37) priv_level ::= db_name NK_DOT NK_STAR */ + { 237, -3 }, /* (38) cmd ::= CREATE DNODE dnode_endpoint */ + { 237, -5 }, /* (39) cmd ::= CREATE DNODE dnode_host_name PORT NK_INTEGER */ + { 237, -3 }, /* (40) cmd ::= DROP DNODE NK_INTEGER */ + { 237, -3 }, /* (41) cmd ::= DROP DNODE dnode_endpoint */ + { 237, -4 }, /* (42) cmd ::= ALTER DNODE NK_INTEGER NK_STRING */ + { 237, -5 }, /* (43) cmd ::= ALTER DNODE NK_INTEGER NK_STRING NK_STRING */ + { 237, -4 }, /* (44) cmd ::= ALTER ALL DNODES NK_STRING */ + { 237, -5 }, /* (45) cmd ::= ALTER ALL DNODES NK_STRING NK_STRING */ + { 248, -1 }, /* (46) dnode_endpoint ::= NK_STRING */ + { 249, -1 }, /* (47) dnode_host_name ::= NK_ID */ + { 249, -1 }, /* (48) dnode_host_name ::= NK_IPTOKEN */ + { 237, -3 }, /* (49) cmd ::= ALTER LOCAL NK_STRING */ + { 237, -4 }, /* (50) cmd ::= ALTER LOCAL NK_STRING NK_STRING */ + { 237, -5 }, /* (51) cmd ::= CREATE QNODE ON DNODE NK_INTEGER */ + { 237, -5 }, /* (52) cmd ::= DROP QNODE ON DNODE NK_INTEGER */ + { 237, -5 }, /* (53) cmd ::= CREATE BNODE ON DNODE NK_INTEGER */ + { 237, -5 }, /* (54) cmd ::= DROP BNODE ON DNODE NK_INTEGER */ + { 237, -5 }, /* (55) cmd ::= CREATE SNODE ON DNODE NK_INTEGER */ + { 237, -5 }, /* (56) cmd ::= DROP SNODE ON DNODE NK_INTEGER */ + { 237, -5 }, /* (57) cmd ::= CREATE MNODE ON DNODE NK_INTEGER */ + { 237, -5 }, /* (58) cmd ::= DROP MNODE ON DNODE NK_INTEGER */ + { 237, -5 }, /* (59) cmd ::= CREATE DATABASE not_exists_opt db_name db_options */ + { 237, -4 }, /* (60) cmd ::= DROP DATABASE exists_opt db_name */ + { 237, -2 }, /* (61) cmd ::= USE db_name */ + { 237, -4 }, /* (62) cmd ::= ALTER DATABASE db_name alter_db_options */ + { 250, -3 }, /* (63) not_exists_opt ::= IF NOT EXISTS */ + { 250, 0 }, /* (64) not_exists_opt ::= */ + { 252, -2 }, /* (65) exists_opt ::= IF EXISTS */ + { 252, 0 }, /* (66) exists_opt ::= */ + { 251, 0 }, /* (67) db_options ::= */ + { 251, -3 }, /* (68) db_options ::= db_options BUFFER NK_INTEGER */ + { 251, -3 }, /* (69) db_options ::= db_options CACHELAST NK_INTEGER */ + { 251, -3 }, /* (70) db_options ::= db_options COMP NK_INTEGER */ + { 251, -3 }, /* (71) db_options ::= db_options DAYS NK_INTEGER */ + { 251, -3 }, /* (72) db_options ::= db_options DAYS NK_VARIABLE */ + { 251, -3 }, /* (73) db_options ::= db_options FSYNC NK_INTEGER */ + { 251, -3 }, /* (74) db_options ::= db_options MAXROWS NK_INTEGER */ + { 251, -3 }, /* (75) db_options ::= db_options MINROWS NK_INTEGER */ + { 251, -3 }, /* (76) db_options ::= db_options KEEP integer_list */ + { 251, -3 }, /* (77) db_options ::= db_options KEEP variable_list */ + { 251, -3 }, /* (78) db_options ::= db_options PAGES NK_INTEGER */ + { 251, -3 }, /* (79) db_options ::= db_options PAGESIZE NK_INTEGER */ + { 251, -3 }, /* (80) db_options ::= db_options PRECISION NK_STRING */ + { 251, -3 }, /* (81) db_options ::= db_options REPLICA NK_INTEGER */ + { 251, -3 }, /* (82) db_options ::= db_options STRICT NK_INTEGER */ + { 251, -3 }, /* (83) db_options ::= db_options WAL NK_INTEGER */ + { 251, -3 }, /* (84) db_options ::= db_options VGROUPS NK_INTEGER */ + { 251, -3 }, /* (85) db_options ::= db_options SINGLE_STABLE NK_INTEGER */ + { 251, -3 }, /* (86) db_options ::= db_options RETENTIONS retention_list */ + { 251, -3 }, /* (87) db_options ::= db_options SCHEMALESS NK_INTEGER */ + { 253, -1 }, /* (88) alter_db_options ::= alter_db_option */ + { 253, -2 }, /* (89) alter_db_options ::= alter_db_options alter_db_option */ + { 257, -2 }, /* (90) alter_db_option ::= BUFFER NK_INTEGER */ + { 257, -2 }, /* (91) alter_db_option ::= CACHELAST NK_INTEGER */ + { 257, -2 }, /* (92) alter_db_option ::= FSYNC NK_INTEGER */ + { 257, -2 }, /* (93) alter_db_option ::= KEEP integer_list */ + { 257, -2 }, /* (94) alter_db_option ::= KEEP variable_list */ + { 257, -2 }, /* (95) alter_db_option ::= PAGES NK_INTEGER */ + { 257, -2 }, /* (96) alter_db_option ::= REPLICA NK_INTEGER */ + { 257, -2 }, /* (97) alter_db_option ::= STRICT NK_INTEGER */ + { 257, -2 }, /* (98) alter_db_option ::= WAL NK_INTEGER */ + { 254, -1 }, /* (99) integer_list ::= NK_INTEGER */ + { 254, -3 }, /* (100) integer_list ::= integer_list NK_COMMA NK_INTEGER */ + { 255, -1 }, /* (101) variable_list ::= NK_VARIABLE */ + { 255, -3 }, /* (102) variable_list ::= variable_list NK_COMMA NK_VARIABLE */ + { 256, -1 }, /* (103) retention_list ::= retention */ + { 256, -3 }, /* (104) retention_list ::= retention_list NK_COMMA retention */ + { 258, -3 }, /* (105) retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ + { 237, -9 }, /* (106) cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ + { 237, -3 }, /* (107) cmd ::= CREATE TABLE multi_create_clause */ + { 237, -9 }, /* (108) cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ + { 237, -3 }, /* (109) cmd ::= DROP TABLE multi_drop_clause */ + { 237, -4 }, /* (110) cmd ::= DROP STABLE exists_opt full_table_name */ + { 237, -3 }, /* (111) cmd ::= ALTER TABLE alter_table_clause */ + { 237, -3 }, /* (112) cmd ::= ALTER STABLE alter_table_clause */ + { 266, -2 }, /* (113) alter_table_clause ::= full_table_name alter_table_options */ + { 266, -5 }, /* (114) alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */ + { 266, -4 }, /* (115) alter_table_clause ::= full_table_name DROP COLUMN column_name */ + { 266, -5 }, /* (116) alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ + { 266, -5 }, /* (117) alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ + { 266, -5 }, /* (118) alter_table_clause ::= full_table_name ADD TAG column_name type_name */ + { 266, -4 }, /* (119) alter_table_clause ::= full_table_name DROP TAG column_name */ + { 266, -5 }, /* (120) alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ + { 266, -5 }, /* (121) alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ + { 266, -6 }, /* (122) alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */ + { 263, -1 }, /* (123) multi_create_clause ::= create_subtable_clause */ + { 263, -2 }, /* (124) multi_create_clause ::= multi_create_clause create_subtable_clause */ + { 271, -10 }, /* (125) create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options */ + { 265, -1 }, /* (126) multi_drop_clause ::= drop_table_clause */ + { 265, -2 }, /* (127) multi_drop_clause ::= multi_drop_clause drop_table_clause */ + { 274, -2 }, /* (128) drop_table_clause ::= exists_opt full_table_name */ + { 272, 0 }, /* (129) specific_tags_opt ::= */ + { 272, -3 }, /* (130) specific_tags_opt ::= NK_LP col_name_list NK_RP */ + { 259, -1 }, /* (131) full_table_name ::= table_name */ + { 259, -3 }, /* (132) full_table_name ::= db_name NK_DOT table_name */ + { 260, -1 }, /* (133) column_def_list ::= column_def */ + { 260, -3 }, /* (134) column_def_list ::= column_def_list NK_COMMA column_def */ + { 277, -2 }, /* (135) column_def ::= column_name type_name */ + { 277, -4 }, /* (136) column_def ::= column_name type_name COMMENT NK_STRING */ + { 269, -1 }, /* (137) type_name ::= BOOL */ + { 269, -1 }, /* (138) type_name ::= TINYINT */ + { 269, -1 }, /* (139) type_name ::= SMALLINT */ + { 269, -1 }, /* (140) type_name ::= INT */ + { 269, -1 }, /* (141) type_name ::= INTEGER */ + { 269, -1 }, /* (142) type_name ::= BIGINT */ + { 269, -1 }, /* (143) type_name ::= FLOAT */ + { 269, -1 }, /* (144) type_name ::= DOUBLE */ + { 269, -4 }, /* (145) type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ + { 269, -1 }, /* (146) type_name ::= TIMESTAMP */ + { 269, -4 }, /* (147) type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ + { 269, -2 }, /* (148) type_name ::= TINYINT UNSIGNED */ + { 269, -2 }, /* (149) type_name ::= SMALLINT UNSIGNED */ + { 269, -2 }, /* (150) type_name ::= INT UNSIGNED */ + { 269, -2 }, /* (151) type_name ::= BIGINT UNSIGNED */ + { 269, -1 }, /* (152) type_name ::= JSON */ + { 269, -4 }, /* (153) type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ + { 269, -1 }, /* (154) type_name ::= MEDIUMBLOB */ + { 269, -1 }, /* (155) type_name ::= BLOB */ + { 269, -4 }, /* (156) type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ + { 269, -1 }, /* (157) type_name ::= DECIMAL */ + { 269, -4 }, /* (158) type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ + { 269, -6 }, /* (159) type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ + { 261, 0 }, /* (160) tags_def_opt ::= */ + { 261, -1 }, /* (161) tags_def_opt ::= tags_def */ + { 264, -4 }, /* (162) tags_def ::= TAGS NK_LP column_def_list NK_RP */ + { 262, 0 }, /* (163) table_options ::= */ + { 262, -3 }, /* (164) table_options ::= table_options COMMENT NK_STRING */ + { 262, -3 }, /* (165) table_options ::= table_options FILE_FACTOR NK_FLOAT */ + { 262, -5 }, /* (166) table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP */ + { 262, -3 }, /* (167) table_options ::= table_options TTL NK_INTEGER */ + { 262, -5 }, /* (168) table_options ::= table_options SMA NK_LP col_name_list NK_RP */ + { 267, -1 }, /* (169) alter_table_options ::= alter_table_option */ + { 267, -2 }, /* (170) alter_table_options ::= alter_table_options alter_table_option */ + { 279, -2 }, /* (171) alter_table_option ::= COMMENT NK_STRING */ + { 279, -2 }, /* (172) alter_table_option ::= TTL NK_INTEGER */ + { 275, -1 }, /* (173) col_name_list ::= col_name */ + { 275, -3 }, /* (174) col_name_list ::= col_name_list NK_COMMA col_name */ + { 280, -1 }, /* (175) col_name ::= column_name */ + { 237, -2 }, /* (176) cmd ::= SHOW DNODES */ + { 237, -2 }, /* (177) cmd ::= SHOW USERS */ + { 237, -2 }, /* (178) cmd ::= SHOW DATABASES */ + { 237, -4 }, /* (179) cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ + { 237, -4 }, /* (180) cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ + { 237, -3 }, /* (181) cmd ::= SHOW db_name_cond_opt VGROUPS */ + { 237, -2 }, /* (182) cmd ::= SHOW MNODES */ + { 237, -2 }, /* (183) cmd ::= SHOW MODULES */ + { 237, -2 }, /* (184) cmd ::= SHOW QNODES */ + { 237, -2 }, /* (185) cmd ::= SHOW FUNCTIONS */ + { 237, -5 }, /* (186) cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ + { 237, -2 }, /* (187) cmd ::= SHOW STREAMS */ + { 237, -2 }, /* (188) cmd ::= SHOW ACCOUNTS */ + { 237, -2 }, /* (189) cmd ::= SHOW APPS */ + { 237, -2 }, /* (190) cmd ::= SHOW CONNECTIONS */ + { 237, -2 }, /* (191) cmd ::= SHOW LICENCE */ + { 237, -2 }, /* (192) cmd ::= SHOW GRANTS */ + { 237, -4 }, /* (193) cmd ::= SHOW CREATE DATABASE db_name */ + { 237, -4 }, /* (194) cmd ::= SHOW CREATE TABLE full_table_name */ + { 237, -4 }, /* (195) cmd ::= SHOW CREATE STABLE full_table_name */ + { 237, -2 }, /* (196) cmd ::= SHOW QUERIES */ + { 237, -2 }, /* (197) cmd ::= SHOW SCORES */ + { 237, -2 }, /* (198) cmd ::= SHOW TOPICS */ + { 237, -2 }, /* (199) cmd ::= SHOW VARIABLES */ + { 237, -2 }, /* (200) cmd ::= SHOW BNODES */ + { 237, -2 }, /* (201) cmd ::= SHOW SNODES */ + { 237, -2 }, /* (202) cmd ::= SHOW CLUSTER */ + { 237, -2 }, /* (203) cmd ::= SHOW TRANSACTIONS */ + { 281, 0 }, /* (204) db_name_cond_opt ::= */ + { 281, -2 }, /* (205) db_name_cond_opt ::= db_name NK_DOT */ + { 282, 0 }, /* (206) like_pattern_opt ::= */ + { 282, -2 }, /* (207) like_pattern_opt ::= LIKE NK_STRING */ + { 283, -1 }, /* (208) table_name_cond ::= table_name */ + { 284, 0 }, /* (209) from_db_opt ::= */ + { 284, -2 }, /* (210) from_db_opt ::= FROM db_name */ + { 278, -1 }, /* (211) func_name_list ::= func_name */ + { 278, -3 }, /* (212) func_name_list ::= func_name_list NK_COMMA func_name */ + { 285, -1 }, /* (213) func_name ::= function_name */ + { 237, -8 }, /* (214) cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options */ + { 237, -10 }, /* (215) cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP */ + { 237, -6 }, /* (216) cmd ::= DROP INDEX exists_opt index_name ON table_name */ + { 288, 0 }, /* (217) index_options ::= */ + { 288, -9 }, /* (218) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt */ + { 288, -11 }, /* (219) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt */ + { 289, -1 }, /* (220) func_list ::= func */ + { 289, -3 }, /* (221) func_list ::= func_list NK_COMMA func */ + { 292, -4 }, /* (222) func ::= function_name NK_LP expression_list NK_RP */ + { 237, -6 }, /* (223) cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression */ + { 237, -7 }, /* (224) cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */ + { 237, -7 }, /* (225) cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */ + { 237, -4 }, /* (226) cmd ::= DROP TOPIC exists_opt topic_name */ + { 237, -7 }, /* (227) cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */ + { 237, -2 }, /* (228) cmd ::= DESC full_table_name */ + { 237, -2 }, /* (229) cmd ::= DESCRIBE full_table_name */ + { 237, -3 }, /* (230) cmd ::= RESET QUERY CACHE */ + { 237, -4 }, /* (231) cmd ::= EXPLAIN analyze_opt explain_options query_expression */ + { 297, 0 }, /* (232) analyze_opt ::= */ + { 297, -1 }, /* (233) analyze_opt ::= ANALYZE */ + { 298, 0 }, /* (234) explain_options ::= */ + { 298, -3 }, /* (235) explain_options ::= explain_options VERBOSE NK_BOOL */ + { 298, -3 }, /* (236) explain_options ::= explain_options RATIO NK_FLOAT */ + { 237, -6 }, /* (237) cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */ + { 237, -10 }, /* (238) cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ + { 237, -4 }, /* (239) cmd ::= DROP FUNCTION exists_opt function_name */ + { 299, 0 }, /* (240) agg_func_opt ::= */ + { 299, -1 }, /* (241) agg_func_opt ::= AGGREGATE */ + { 300, 0 }, /* (242) bufsize_opt ::= */ + { 300, -2 }, /* (243) bufsize_opt ::= BUFSIZE NK_INTEGER */ + { 237, -8 }, /* (244) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */ + { 237, -4 }, /* (245) cmd ::= DROP STREAM exists_opt stream_name */ + { 303, 0 }, /* (246) into_opt ::= */ + { 303, -2 }, /* (247) into_opt ::= INTO full_table_name */ + { 302, 0 }, /* (248) stream_options ::= */ + { 302, -3 }, /* (249) stream_options ::= stream_options TRIGGER AT_ONCE */ + { 302, -3 }, /* (250) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ + { 302, -3 }, /* (251) stream_options ::= stream_options WATERMARK duration_literal */ + { 237, -3 }, /* (252) cmd ::= KILL CONNECTION NK_INTEGER */ + { 237, -3 }, /* (253) cmd ::= KILL QUERY NK_INTEGER */ + { 237, -3 }, /* (254) cmd ::= KILL TRANSACTION NK_INTEGER */ + { 237, -4 }, /* (255) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ + { 237, -4 }, /* (256) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ + { 237, -3 }, /* (257) cmd ::= SPLIT VGROUP NK_INTEGER */ + { 304, -2 }, /* (258) dnode_list ::= DNODE NK_INTEGER */ + { 304, -3 }, /* (259) dnode_list ::= dnode_list DNODE NK_INTEGER */ + { 237, -3 }, /* (260) cmd ::= SYNCDB db_name REPLICA */ + { 237, -1 }, /* (261) cmd ::= query_expression */ + { 240, -1 }, /* (262) literal ::= NK_INTEGER */ + { 240, -1 }, /* (263) literal ::= NK_FLOAT */ + { 240, -1 }, /* (264) literal ::= NK_STRING */ + { 240, -1 }, /* (265) literal ::= NK_BOOL */ + { 240, -2 }, /* (266) literal ::= TIMESTAMP NK_STRING */ + { 240, -1 }, /* (267) literal ::= duration_literal */ + { 240, -1 }, /* (268) literal ::= NULL */ + { 240, -1 }, /* (269) literal ::= NK_QUESTION */ + { 290, -1 }, /* (270) duration_literal ::= NK_VARIABLE */ + { 305, -1 }, /* (271) signed ::= NK_INTEGER */ + { 305, -2 }, /* (272) signed ::= NK_PLUS NK_INTEGER */ + { 305, -2 }, /* (273) signed ::= NK_MINUS NK_INTEGER */ + { 305, -1 }, /* (274) signed ::= NK_FLOAT */ + { 305, -2 }, /* (275) signed ::= NK_PLUS NK_FLOAT */ + { 305, -2 }, /* (276) signed ::= NK_MINUS NK_FLOAT */ + { 270, -1 }, /* (277) signed_literal ::= signed */ + { 270, -1 }, /* (278) signed_literal ::= NK_STRING */ + { 270, -1 }, /* (279) signed_literal ::= NK_BOOL */ + { 270, -2 }, /* (280) signed_literal ::= TIMESTAMP NK_STRING */ + { 270, -1 }, /* (281) signed_literal ::= duration_literal */ + { 270, -1 }, /* (282) signed_literal ::= NULL */ + { 270, -1 }, /* (283) signed_literal ::= literal_func */ + { 273, -1 }, /* (284) literal_list ::= signed_literal */ + { 273, -3 }, /* (285) literal_list ::= literal_list NK_COMMA signed_literal */ + { 247, -1 }, /* (286) db_name ::= NK_ID */ + { 276, -1 }, /* (287) table_name ::= NK_ID */ + { 268, -1 }, /* (288) column_name ::= NK_ID */ + { 286, -1 }, /* (289) function_name ::= NK_ID */ + { 307, -1 }, /* (290) table_alias ::= NK_ID */ + { 308, -1 }, /* (291) column_alias ::= NK_ID */ + { 242, -1 }, /* (292) user_name ::= NK_ID */ + { 287, -1 }, /* (293) index_name ::= NK_ID */ + { 294, -1 }, /* (294) topic_name ::= NK_ID */ + { 301, -1 }, /* (295) stream_name ::= NK_ID */ + { 296, -1 }, /* (296) cgroup_name ::= NK_ID */ + { 309, -1 }, /* (297) expression ::= literal */ + { 309, -1 }, /* (298) expression ::= pseudo_column */ + { 309, -1 }, /* (299) expression ::= column_reference */ + { 309, -1 }, /* (300) expression ::= function_expression */ + { 309, -1 }, /* (301) expression ::= subquery */ + { 309, -3 }, /* (302) expression ::= NK_LP expression NK_RP */ + { 309, -2 }, /* (303) expression ::= NK_PLUS expression */ + { 309, -2 }, /* (304) expression ::= NK_MINUS expression */ + { 309, -3 }, /* (305) expression ::= expression NK_PLUS expression */ + { 309, -3 }, /* (306) expression ::= expression NK_MINUS expression */ + { 309, -3 }, /* (307) expression ::= expression NK_STAR expression */ + { 309, -3 }, /* (308) expression ::= expression NK_SLASH expression */ + { 309, -3 }, /* (309) expression ::= expression NK_REM expression */ + { 309, -3 }, /* (310) expression ::= column_reference NK_ARROW NK_STRING */ + { 293, -1 }, /* (311) expression_list ::= expression */ + { 293, -3 }, /* (312) expression_list ::= expression_list NK_COMMA expression */ + { 311, -1 }, /* (313) column_reference ::= column_name */ + { 311, -3 }, /* (314) column_reference ::= table_name NK_DOT column_name */ + { 310, -1 }, /* (315) pseudo_column ::= ROWTS */ + { 310, -1 }, /* (316) pseudo_column ::= TBNAME */ + { 310, -3 }, /* (317) pseudo_column ::= table_name NK_DOT TBNAME */ + { 310, -1 }, /* (318) pseudo_column ::= QSTARTTS */ + { 310, -1 }, /* (319) pseudo_column ::= QENDTS */ + { 310, -1 }, /* (320) pseudo_column ::= WSTARTTS */ + { 310, -1 }, /* (321) pseudo_column ::= WENDTS */ + { 310, -1 }, /* (322) pseudo_column ::= WDURATION */ + { 312, -4 }, /* (323) function_expression ::= function_name NK_LP expression_list NK_RP */ + { 312, -4 }, /* (324) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ + { 312, -6 }, /* (325) function_expression ::= CAST NK_LP expression AS type_name NK_RP */ + { 312, -1 }, /* (326) function_expression ::= literal_func */ + { 306, -3 }, /* (327) literal_func ::= noarg_func NK_LP NK_RP */ + { 306, -1 }, /* (328) literal_func ::= NOW */ + { 316, -1 }, /* (329) noarg_func ::= NOW */ + { 316, -1 }, /* (330) noarg_func ::= TODAY */ + { 316, -1 }, /* (331) noarg_func ::= TIMEZONE */ + { 314, -1 }, /* (332) star_func ::= COUNT */ + { 314, -1 }, /* (333) star_func ::= FIRST */ + { 314, -1 }, /* (334) star_func ::= LAST */ + { 314, -1 }, /* (335) star_func ::= LAST_ROW */ + { 315, -1 }, /* (336) star_func_para_list ::= NK_STAR */ + { 315, -1 }, /* (337) star_func_para_list ::= other_para_list */ + { 317, -1 }, /* (338) other_para_list ::= star_func_para */ + { 317, -3 }, /* (339) other_para_list ::= other_para_list NK_COMMA star_func_para */ + { 318, -1 }, /* (340) star_func_para ::= expression */ + { 318, -3 }, /* (341) star_func_para ::= table_name NK_DOT NK_STAR */ + { 319, -3 }, /* (342) predicate ::= expression compare_op expression */ + { 319, -5 }, /* (343) predicate ::= expression BETWEEN expression AND expression */ + { 319, -6 }, /* (344) predicate ::= expression NOT BETWEEN expression AND expression */ + { 319, -3 }, /* (345) predicate ::= expression IS NULL */ + { 319, -4 }, /* (346) predicate ::= expression IS NOT NULL */ + { 319, -3 }, /* (347) predicate ::= expression in_op in_predicate_value */ + { 320, -1 }, /* (348) compare_op ::= NK_LT */ + { 320, -1 }, /* (349) compare_op ::= NK_GT */ + { 320, -1 }, /* (350) compare_op ::= NK_LE */ + { 320, -1 }, /* (351) compare_op ::= NK_GE */ + { 320, -1 }, /* (352) compare_op ::= NK_NE */ + { 320, -1 }, /* (353) compare_op ::= NK_EQ */ + { 320, -1 }, /* (354) compare_op ::= LIKE */ + { 320, -2 }, /* (355) compare_op ::= NOT LIKE */ + { 320, -1 }, /* (356) compare_op ::= MATCH */ + { 320, -1 }, /* (357) compare_op ::= NMATCH */ + { 320, -1 }, /* (358) compare_op ::= CONTAINS */ + { 321, -1 }, /* (359) in_op ::= IN */ + { 321, -2 }, /* (360) in_op ::= NOT IN */ + { 322, -3 }, /* (361) in_predicate_value ::= NK_LP expression_list NK_RP */ + { 323, -1 }, /* (362) boolean_value_expression ::= boolean_primary */ + { 323, -2 }, /* (363) boolean_value_expression ::= NOT boolean_primary */ + { 323, -3 }, /* (364) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ + { 323, -3 }, /* (365) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ + { 324, -1 }, /* (366) boolean_primary ::= predicate */ + { 324, -3 }, /* (367) boolean_primary ::= NK_LP boolean_value_expression NK_RP */ + { 325, -1 }, /* (368) common_expression ::= expression */ + { 325, -1 }, /* (369) common_expression ::= boolean_value_expression */ + { 326, -2 }, /* (370) from_clause ::= FROM table_reference_list */ + { 327, -1 }, /* (371) table_reference_list ::= table_reference */ + { 327, -3 }, /* (372) table_reference_list ::= table_reference_list NK_COMMA table_reference */ + { 328, -1 }, /* (373) table_reference ::= table_primary */ + { 328, -1 }, /* (374) table_reference ::= joined_table */ + { 329, -2 }, /* (375) table_primary ::= table_name alias_opt */ + { 329, -4 }, /* (376) table_primary ::= db_name NK_DOT table_name alias_opt */ + { 329, -2 }, /* (377) table_primary ::= subquery alias_opt */ + { 329, -1 }, /* (378) table_primary ::= parenthesized_joined_table */ + { 331, 0 }, /* (379) alias_opt ::= */ + { 331, -1 }, /* (380) alias_opt ::= table_alias */ + { 331, -2 }, /* (381) alias_opt ::= AS table_alias */ + { 332, -3 }, /* (382) parenthesized_joined_table ::= NK_LP joined_table NK_RP */ + { 332, -3 }, /* (383) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ + { 330, -6 }, /* (384) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ + { 333, 0 }, /* (385) join_type ::= */ + { 333, -1 }, /* (386) join_type ::= INNER */ + { 335, -9 }, /* (387) query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ + { 336, 0 }, /* (388) set_quantifier_opt ::= */ + { 336, -1 }, /* (389) set_quantifier_opt ::= DISTINCT */ + { 336, -1 }, /* (390) set_quantifier_opt ::= ALL */ + { 337, -1 }, /* (391) select_list ::= NK_STAR */ + { 337, -1 }, /* (392) select_list ::= select_sublist */ + { 343, -1 }, /* (393) select_sublist ::= select_item */ + { 343, -3 }, /* (394) select_sublist ::= select_sublist NK_COMMA select_item */ + { 344, -1 }, /* (395) select_item ::= common_expression */ + { 344, -2 }, /* (396) select_item ::= common_expression column_alias */ + { 344, -3 }, /* (397) select_item ::= common_expression AS column_alias */ + { 344, -3 }, /* (398) select_item ::= table_name NK_DOT NK_STAR */ + { 338, 0 }, /* (399) where_clause_opt ::= */ + { 338, -2 }, /* (400) where_clause_opt ::= WHERE search_condition */ + { 339, 0 }, /* (401) partition_by_clause_opt ::= */ + { 339, -3 }, /* (402) partition_by_clause_opt ::= PARTITION BY expression_list */ + { 340, 0 }, /* (403) twindow_clause_opt ::= */ + { 340, -6 }, /* (404) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ + { 340, -4 }, /* (405) twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ + { 340, -6 }, /* (406) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ + { 340, -8 }, /* (407) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ + { 291, 0 }, /* (408) sliding_opt ::= */ + { 291, -4 }, /* (409) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ + { 345, 0 }, /* (410) fill_opt ::= */ + { 345, -4 }, /* (411) fill_opt ::= FILL NK_LP fill_mode NK_RP */ + { 345, -6 }, /* (412) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ + { 346, -1 }, /* (413) fill_mode ::= NONE */ + { 346, -1 }, /* (414) fill_mode ::= PREV */ + { 346, -1 }, /* (415) fill_mode ::= NULL */ + { 346, -1 }, /* (416) fill_mode ::= LINEAR */ + { 346, -1 }, /* (417) fill_mode ::= NEXT */ + { 341, 0 }, /* (418) group_by_clause_opt ::= */ + { 341, -3 }, /* (419) group_by_clause_opt ::= GROUP BY group_by_list */ + { 347, -1 }, /* (420) group_by_list ::= expression */ + { 347, -3 }, /* (421) group_by_list ::= group_by_list NK_COMMA expression */ + { 342, 0 }, /* (422) having_clause_opt ::= */ + { 342, -2 }, /* (423) having_clause_opt ::= HAVING search_condition */ + { 295, -4 }, /* (424) query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ + { 348, -1 }, /* (425) query_expression_body ::= query_primary */ + { 348, -4 }, /* (426) query_expression_body ::= query_expression_body UNION ALL query_expression_body */ + { 348, -3 }, /* (427) query_expression_body ::= query_expression_body UNION query_expression_body */ + { 352, -1 }, /* (428) query_primary ::= query_specification */ + { 352, -6 }, /* (429) query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ + { 349, 0 }, /* (430) order_by_clause_opt ::= */ + { 349, -3 }, /* (431) order_by_clause_opt ::= ORDER BY sort_specification_list */ + { 350, 0 }, /* (432) slimit_clause_opt ::= */ + { 350, -2 }, /* (433) slimit_clause_opt ::= SLIMIT NK_INTEGER */ + { 350, -4 }, /* (434) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ + { 350, -4 }, /* (435) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + { 351, 0 }, /* (436) limit_clause_opt ::= */ + { 351, -2 }, /* (437) limit_clause_opt ::= LIMIT NK_INTEGER */ + { 351, -4 }, /* (438) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ + { 351, -4 }, /* (439) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + { 313, -3 }, /* (440) subquery ::= NK_LP query_expression NK_RP */ + { 334, -1 }, /* (441) search_condition ::= common_expression */ + { 353, -1 }, /* (442) sort_specification_list ::= sort_specification */ + { 353, -3 }, /* (443) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ + { 354, -3 }, /* (444) sort_specification ::= expression ordering_specification_opt null_ordering_opt */ + { 355, 0 }, /* (445) ordering_specification_opt ::= */ + { 355, -1 }, /* (446) ordering_specification_opt ::= ASC */ + { 355, -1 }, /* (447) ordering_specification_opt ::= DESC */ + { 356, 0 }, /* (448) null_ordering_opt ::= */ + { 356, -2 }, /* (449) null_ordering_opt ::= NULLS FIRST */ + { 356, -2 }, /* (450) null_ordering_opt ::= NULLS LAST */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -3114,11 +3092,11 @@ static YYACTIONTYPE yy_reduce( YYMINORTYPE yylhsminor; case 0: /* cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */ { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } - yy_destructor(yypParser,241,&yymsp[0].minor); + yy_destructor(yypParser,238,&yymsp[0].minor); break; case 1: /* cmd ::= ALTER ACCOUNT NK_ID alter_account_options */ { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } - yy_destructor(yypParser,242,&yymsp[0].minor); + yy_destructor(yypParser,239,&yymsp[0].minor); break; case 2: /* account_options ::= */ { } @@ -3132,20 +3110,20 @@ static YYACTIONTYPE yy_reduce( case 9: /* account_options ::= account_options USERS literal */ yytestcase(yyruleno==9); case 10: /* account_options ::= account_options CONNS literal */ yytestcase(yyruleno==10); case 11: /* account_options ::= account_options STATE literal */ yytestcase(yyruleno==11); -{ yy_destructor(yypParser,241,&yymsp[-2].minor); +{ yy_destructor(yypParser,238,&yymsp[-2].minor); { } - yy_destructor(yypParser,243,&yymsp[0].minor); + yy_destructor(yypParser,240,&yymsp[0].minor); } break; case 12: /* alter_account_options ::= alter_account_option */ -{ yy_destructor(yypParser,244,&yymsp[0].minor); +{ yy_destructor(yypParser,241,&yymsp[0].minor); { } } break; case 13: /* alter_account_options ::= alter_account_options alter_account_option */ -{ yy_destructor(yypParser,242,&yymsp[-1].minor); +{ yy_destructor(yypParser,239,&yymsp[-1].minor); { } - yy_destructor(yypParser,244,&yymsp[0].minor); + yy_destructor(yypParser,241,&yymsp[0].minor); } break; case 14: /* alter_account_option ::= PASS literal */ @@ -3159,63 +3137,63 @@ static YYACTIONTYPE yy_reduce( case 22: /* alter_account_option ::= CONNS literal */ yytestcase(yyruleno==22); case 23: /* alter_account_option ::= STATE literal */ yytestcase(yyruleno==23); { } - yy_destructor(yypParser,243,&yymsp[0].minor); + yy_destructor(yypParser,240,&yymsp[0].minor); break; case 24: /* cmd ::= CREATE USER user_name PASS NK_STRING */ -{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0); } break; case 25: /* cmd ::= ALTER USER user_name PASS NK_STRING */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy53, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy113, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); } break; case 26: /* cmd ::= ALTER USER user_name PRIVILEGE NK_STRING */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy53, TSDB_ALTER_USER_PRIVILEGES, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy113, TSDB_ALTER_USER_PRIVILEGES, &yymsp[0].minor.yy0); } break; case 27: /* cmd ::= DROP USER user_name */ -{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy53); } +{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy113); } break; case 28: /* cmd ::= GRANT privileges ON priv_level TO user_name */ -{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy435, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53); } +{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy123, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113); } break; case 29: /* cmd ::= REVOKE privileges ON priv_level FROM user_name */ -{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy435, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53); } +{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy123, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113); } break; case 30: /* privileges ::= ALL */ -{ yymsp[0].minor.yy435 = PRIVILEGE_TYPE_ALL; } +{ yymsp[0].minor.yy123 = PRIVILEGE_TYPE_ALL; } break; case 31: /* privileges ::= priv_type_list */ case 32: /* priv_type_list ::= priv_type */ yytestcase(yyruleno==32); -{ yylhsminor.yy435 = yymsp[0].minor.yy435; } - yymsp[0].minor.yy435 = yylhsminor.yy435; +{ yylhsminor.yy123 = yymsp[0].minor.yy123; } + yymsp[0].minor.yy123 = yylhsminor.yy123; break; case 33: /* priv_type_list ::= priv_type_list NK_COMMA priv_type */ -{ yylhsminor.yy435 = yymsp[-2].minor.yy435 | yymsp[0].minor.yy435; } - yymsp[-2].minor.yy435 = yylhsminor.yy435; +{ yylhsminor.yy123 = yymsp[-2].minor.yy123 | yymsp[0].minor.yy123; } + yymsp[-2].minor.yy123 = yylhsminor.yy123; break; case 34: /* priv_type ::= READ */ -{ yymsp[0].minor.yy435 = PRIVILEGE_TYPE_READ; } +{ yymsp[0].minor.yy123 = PRIVILEGE_TYPE_READ; } break; case 35: /* priv_type ::= WRITE */ -{ yymsp[0].minor.yy435 = PRIVILEGE_TYPE_WRITE; } +{ yymsp[0].minor.yy123 = PRIVILEGE_TYPE_WRITE; } break; case 36: /* priv_level ::= NK_STAR NK_DOT NK_STAR */ -{ yylhsminor.yy53 = yymsp[-2].minor.yy0; } - yymsp[-2].minor.yy53 = yylhsminor.yy53; +{ yylhsminor.yy113 = yymsp[-2].minor.yy0; } + yymsp[-2].minor.yy113 = yylhsminor.yy113; break; case 37: /* priv_level ::= db_name NK_DOT NK_STAR */ -{ yylhsminor.yy53 = yymsp[-2].minor.yy53; } - yymsp[-2].minor.yy53 = yylhsminor.yy53; +{ yylhsminor.yy113 = yymsp[-2].minor.yy113; } + yymsp[-2].minor.yy113 = yylhsminor.yy113; break; case 38: /* cmd ::= CREATE DNODE dnode_endpoint */ -{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy53, NULL); } +{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy113, NULL); } break; case 39: /* cmd ::= CREATE DNODE dnode_host_name PORT NK_INTEGER */ -{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0); } break; case 40: /* cmd ::= DROP DNODE NK_INTEGER */ { pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy0); } break; case 41: /* cmd ::= DROP DNODE dnode_endpoint */ -{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy53); } +{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy113); } break; case 42: /* cmd ::= ALTER DNODE NK_INTEGER NK_STRING */ { pCxt->pRootNode = createAlterDnodeStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, NULL); } @@ -3232,26 +3210,26 @@ static YYACTIONTYPE yy_reduce( case 46: /* dnode_endpoint ::= NK_STRING */ case 47: /* dnode_host_name ::= NK_ID */ yytestcase(yyruleno==47); case 48: /* dnode_host_name ::= NK_IPTOKEN */ yytestcase(yyruleno==48); - case 290: /* db_name ::= NK_ID */ yytestcase(yyruleno==290); - case 291: /* table_name ::= NK_ID */ yytestcase(yyruleno==291); - case 292: /* column_name ::= NK_ID */ yytestcase(yyruleno==292); - case 293: /* function_name ::= NK_ID */ yytestcase(yyruleno==293); - case 294: /* table_alias ::= NK_ID */ yytestcase(yyruleno==294); - case 295: /* column_alias ::= NK_ID */ yytestcase(yyruleno==295); - case 296: /* user_name ::= NK_ID */ yytestcase(yyruleno==296); - case 297: /* index_name ::= NK_ID */ yytestcase(yyruleno==297); - case 298: /* topic_name ::= NK_ID */ yytestcase(yyruleno==298); - case 299: /* stream_name ::= NK_ID */ yytestcase(yyruleno==299); - case 300: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==300); - case 333: /* noarg_func ::= NOW */ yytestcase(yyruleno==333); - case 334: /* noarg_func ::= TODAY */ yytestcase(yyruleno==334); - case 335: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==335); - case 336: /* star_func ::= COUNT */ yytestcase(yyruleno==336); - case 337: /* star_func ::= FIRST */ yytestcase(yyruleno==337); - case 338: /* star_func ::= LAST */ yytestcase(yyruleno==338); - case 339: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==339); -{ yylhsminor.yy53 = yymsp[0].minor.yy0; } - yymsp[0].minor.yy53 = yylhsminor.yy53; + case 286: /* db_name ::= NK_ID */ yytestcase(yyruleno==286); + case 287: /* table_name ::= NK_ID */ yytestcase(yyruleno==287); + case 288: /* column_name ::= NK_ID */ yytestcase(yyruleno==288); + case 289: /* function_name ::= NK_ID */ yytestcase(yyruleno==289); + case 290: /* table_alias ::= NK_ID */ yytestcase(yyruleno==290); + case 291: /* column_alias ::= NK_ID */ yytestcase(yyruleno==291); + case 292: /* user_name ::= NK_ID */ yytestcase(yyruleno==292); + case 293: /* index_name ::= NK_ID */ yytestcase(yyruleno==293); + case 294: /* topic_name ::= NK_ID */ yytestcase(yyruleno==294); + case 295: /* stream_name ::= NK_ID */ yytestcase(yyruleno==295); + case 296: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==296); + case 329: /* noarg_func ::= NOW */ yytestcase(yyruleno==329); + case 330: /* noarg_func ::= TODAY */ yytestcase(yyruleno==330); + case 331: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==331); + case 332: /* star_func ::= COUNT */ yytestcase(yyruleno==332); + case 333: /* star_func ::= FIRST */ yytestcase(yyruleno==333); + case 334: /* star_func ::= LAST */ yytestcase(yyruleno==334); + case 335: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==335); +{ yylhsminor.yy113 = yymsp[0].minor.yy0; } + yymsp[0].minor.yy113 = yylhsminor.yy113; break; case 49: /* cmd ::= ALTER LOCAL NK_STRING */ { pCxt->pRootNode = createAlterLocalStmt(pCxt, &yymsp[0].minor.yy0, NULL); } @@ -3284,1161 +3262,1145 @@ static YYACTIONTYPE yy_reduce( { pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_MNODE_STMT, &yymsp[0].minor.yy0); } break; case 59: /* cmd ::= CREATE DATABASE not_exists_opt db_name db_options */ -{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy603, &yymsp[-1].minor.yy53, yymsp[0].minor.yy636); } +{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy131, &yymsp[-1].minor.yy113, yymsp[0].minor.yy686); } break; case 60: /* cmd ::= DROP DATABASE exists_opt db_name */ -{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy603, &yymsp[0].minor.yy53); } +{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy131, &yymsp[0].minor.yy113); } break; case 61: /* cmd ::= USE db_name */ -{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy53); } +{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy113); } break; case 62: /* cmd ::= ALTER DATABASE db_name alter_db_options */ -{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy53, yymsp[0].minor.yy636); } +{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy113, yymsp[0].minor.yy686); } break; case 63: /* not_exists_opt ::= IF NOT EXISTS */ -{ yymsp[-2].minor.yy603 = true; } +{ yymsp[-2].minor.yy131 = true; } break; case 64: /* not_exists_opt ::= */ case 66: /* exists_opt ::= */ yytestcase(yyruleno==66); - case 236: /* analyze_opt ::= */ yytestcase(yyruleno==236); - case 244: /* agg_func_opt ::= */ yytestcase(yyruleno==244); - case 392: /* set_quantifier_opt ::= */ yytestcase(yyruleno==392); -{ yymsp[1].minor.yy603 = false; } + case 232: /* analyze_opt ::= */ yytestcase(yyruleno==232); + case 240: /* agg_func_opt ::= */ yytestcase(yyruleno==240); + case 388: /* set_quantifier_opt ::= */ yytestcase(yyruleno==388); +{ yymsp[1].minor.yy131 = false; } break; case 65: /* exists_opt ::= IF EXISTS */ -{ yymsp[-1].minor.yy603 = true; } +{ yymsp[-1].minor.yy131 = true; } break; case 67: /* db_options ::= */ -{ yymsp[1].minor.yy636 = createDefaultDatabaseOptions(pCxt); } +{ yymsp[1].minor.yy686 = createDefaultDatabaseOptions(pCxt); } break; case 68: /* db_options ::= db_options BUFFER NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 69: /* db_options ::= db_options CACHELAST NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_CACHELAST, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_CACHELAST, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 70: /* db_options ::= db_options COMP NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_COMP, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_COMP, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 71: /* db_options ::= db_options DAYS NK_INTEGER */ case 72: /* db_options ::= db_options DAYS NK_VARIABLE */ yytestcase(yyruleno==72); -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_DAYS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_DAYS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 73: /* db_options ::= db_options FSYNC NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 74: /* db_options ::= db_options MAXROWS NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 75: /* db_options ::= db_options MINROWS NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 76: /* db_options ::= db_options KEEP integer_list */ case 77: /* db_options ::= db_options KEEP variable_list */ yytestcase(yyruleno==77); -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_KEEP, yymsp[0].minor.yy236); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_KEEP, yymsp[0].minor.yy670); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 78: /* db_options ::= db_options PAGES NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_PAGES, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_PAGES, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 79: /* db_options ::= db_options PAGESIZE NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 80: /* db_options ::= db_options PRECISION NK_STRING */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 81: /* db_options ::= db_options REPLICA NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 82: /* db_options ::= db_options STRICT NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_STRICT, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_STRICT, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 83: /* db_options ::= db_options WAL NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_WAL, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_WAL, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 84: /* db_options ::= db_options VGROUPS NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 85: /* db_options ::= db_options SINGLE_STABLE NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 86: /* db_options ::= db_options RETENTIONS retention_list */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_RETENTIONS, yymsp[0].minor.yy236); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_RETENTIONS, yymsp[0].minor.yy670); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 87: /* db_options ::= db_options SCHEMALESS NK_INTEGER */ -{ yylhsminor.yy636 = setDatabaseOption(pCxt, yymsp[-2].minor.yy636, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setDatabaseOption(pCxt, yymsp[-2].minor.yy686, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 88: /* alter_db_options ::= alter_db_option */ -{ yylhsminor.yy636 = createAlterDatabaseOptions(pCxt); yylhsminor.yy636 = setAlterDatabaseOption(pCxt, yylhsminor.yy636, &yymsp[0].minor.yy25); } - yymsp[0].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterDatabaseOptions(pCxt); yylhsminor.yy686 = setAlterDatabaseOption(pCxt, yylhsminor.yy686, &yymsp[0].minor.yy53); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; case 89: /* alter_db_options ::= alter_db_options alter_db_option */ -{ yylhsminor.yy636 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy636, &yymsp[0].minor.yy25); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy686, &yymsp[0].minor.yy53); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; case 90: /* alter_db_option ::= BUFFER NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_BUFFER; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_BUFFER; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 91: /* alter_db_option ::= CACHELAST NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_CACHELAST; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_CACHELAST; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 92: /* alter_db_option ::= FSYNC NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 93: /* alter_db_option ::= KEEP integer_list */ case 94: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==94); -{ yymsp[-1].minor.yy25.type = DB_OPTION_KEEP; yymsp[-1].minor.yy25.pList = yymsp[0].minor.yy236; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_KEEP; yymsp[-1].minor.yy53.pList = yymsp[0].minor.yy670; } break; case 95: /* alter_db_option ::= PAGES NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_PAGES; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_PAGES; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 96: /* alter_db_option ::= REPLICA NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_REPLICA; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_REPLICA; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 97: /* alter_db_option ::= STRICT NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_STRICT; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_STRICT; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 98: /* alter_db_option ::= WAL NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = DB_OPTION_WAL; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy53.type = DB_OPTION_WAL; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; case 99: /* integer_list ::= NK_INTEGER */ -{ yylhsminor.yy236 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy236 = yylhsminor.yy236; +{ yylhsminor.yy670 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy670 = yylhsminor.yy670; break; case 100: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */ - case 263: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==263); -{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy236 = yylhsminor.yy236; + case 259: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==259); +{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy670 = yylhsminor.yy670; break; case 101: /* variable_list ::= NK_VARIABLE */ -{ yylhsminor.yy236 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy236 = yylhsminor.yy236; +{ yylhsminor.yy670 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy670 = yylhsminor.yy670; break; case 102: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */ -{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy236 = yylhsminor.yy236; +{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy670 = yylhsminor.yy670; break; case 103: /* retention_list ::= retention */ case 123: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==123); case 126: /* multi_drop_clause ::= drop_table_clause */ yytestcase(yyruleno==126); case 133: /* column_def_list ::= column_def */ yytestcase(yyruleno==133); - case 174: /* col_name_list ::= col_name */ yytestcase(yyruleno==174); - case 212: /* func_name_list ::= func_name */ yytestcase(yyruleno==212); - case 221: /* func_list ::= func */ yytestcase(yyruleno==221); - case 288: /* literal_list ::= signed_literal */ yytestcase(yyruleno==288); - case 342: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==342); - case 397: /* select_sublist ::= select_item */ yytestcase(yyruleno==397); - case 446: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==446); -{ yylhsminor.yy236 = createNodeList(pCxt, yymsp[0].minor.yy636); } - yymsp[0].minor.yy236 = yylhsminor.yy236; + case 173: /* col_name_list ::= col_name */ yytestcase(yyruleno==173); + case 211: /* func_name_list ::= func_name */ yytestcase(yyruleno==211); + case 220: /* func_list ::= func */ yytestcase(yyruleno==220); + case 284: /* literal_list ::= signed_literal */ yytestcase(yyruleno==284); + case 338: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==338); + case 393: /* select_sublist ::= select_item */ yytestcase(yyruleno==393); + case 442: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==442); +{ yylhsminor.yy670 = createNodeList(pCxt, yymsp[0].minor.yy686); } + yymsp[0].minor.yy670 = yylhsminor.yy670; break; case 104: /* retention_list ::= retention_list NK_COMMA retention */ case 134: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==134); - case 175: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==175); - case 213: /* func_name_list ::= func_name_list NK_COMMA func_name */ yytestcase(yyruleno==213); - case 222: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==222); - case 289: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==289); - case 343: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==343); - case 398: /* select_sublist ::= select_sublist NK_COMMA select_item */ yytestcase(yyruleno==398); - case 447: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==447); -{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, yymsp[0].minor.yy636); } - yymsp[-2].minor.yy236 = yylhsminor.yy236; + case 174: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==174); + case 212: /* func_name_list ::= func_name_list NK_COMMA func_name */ yytestcase(yyruleno==212); + case 221: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==221); + case 285: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==285); + case 339: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==339); + case 394: /* select_sublist ::= select_sublist NK_COMMA select_item */ yytestcase(yyruleno==394); + case 443: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==443); +{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, yymsp[0].minor.yy686); } + yymsp[-2].minor.yy670 = yylhsminor.yy670; break; case 105: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ -{ yylhsminor.yy636 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 106: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ case 108: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==108); -{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy603, yymsp[-5].minor.yy636, yymsp[-3].minor.yy236, yymsp[-1].minor.yy236, yymsp[0].minor.yy636); } +{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy131, yymsp[-5].minor.yy686, yymsp[-3].minor.yy670, yymsp[-1].minor.yy670, yymsp[0].minor.yy686); } break; case 107: /* cmd ::= CREATE TABLE multi_create_clause */ -{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy236); } +{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy670); } break; case 109: /* cmd ::= DROP TABLE multi_drop_clause */ -{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy236); } +{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy670); } break; case 110: /* cmd ::= DROP STABLE exists_opt full_table_name */ -{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy603, yymsp[0].minor.yy636); } +{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy131, yymsp[0].minor.yy686); } break; case 111: /* cmd ::= ALTER TABLE alter_table_clause */ case 112: /* cmd ::= ALTER STABLE alter_table_clause */ yytestcase(yyruleno==112); - case 265: /* cmd ::= query_expression */ yytestcase(yyruleno==265); -{ pCxt->pRootNode = yymsp[0].minor.yy636; } + case 261: /* cmd ::= query_expression */ yytestcase(yyruleno==261); +{ pCxt->pRootNode = yymsp[0].minor.yy686; } break; case 113: /* alter_table_clause ::= full_table_name alter_table_options */ -{ yylhsminor.yy636 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy686, yymsp[0].minor.yy686); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; case 114: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */ -{ yylhsminor.yy636 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; case 115: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */ -{ yylhsminor.yy636 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy636, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy53); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy686, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy113); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; case 116: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ -{ yylhsminor.yy636 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; case 117: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ -{ yylhsminor.yy636 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy53, &yymsp[0].minor.yy53); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy113, &yymsp[0].minor.yy113); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; case 118: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */ -{ yylhsminor.yy636 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; case 119: /* alter_table_clause ::= full_table_name DROP TAG column_name */ -{ yylhsminor.yy636 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy636, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy53); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy686, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy113); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; case 120: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ -{ yylhsminor.yy636 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; case 121: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ -{ yylhsminor.yy636 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy636, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy53, &yymsp[0].minor.yy53); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy686, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy113, &yymsp[0].minor.yy113); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; case 122: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */ -{ yylhsminor.yy636 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy636, &yymsp[-2].minor.yy53, yymsp[0].minor.yy636); } - yymsp[-5].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy686, &yymsp[-2].minor.yy113, yymsp[0].minor.yy686); } + yymsp[-5].minor.yy686 = yylhsminor.yy686; break; case 124: /* multi_create_clause ::= multi_create_clause create_subtable_clause */ case 127: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==127); -{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-1].minor.yy236, yymsp[0].minor.yy636); } - yymsp[-1].minor.yy236 = yylhsminor.yy236; +{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-1].minor.yy670, yymsp[0].minor.yy686); } + yymsp[-1].minor.yy670 = yylhsminor.yy670; break; case 125: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_tags_opt TAGS NK_LP literal_list NK_RP table_options */ -{ yylhsminor.yy636 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy603, yymsp[-8].minor.yy636, yymsp[-6].minor.yy636, yymsp[-5].minor.yy236, yymsp[-2].minor.yy236, yymsp[0].minor.yy636); } - yymsp[-9].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy131, yymsp[-8].minor.yy686, yymsp[-6].minor.yy686, yymsp[-5].minor.yy670, yymsp[-2].minor.yy670, yymsp[0].minor.yy686); } + yymsp[-9].minor.yy686 = yylhsminor.yy686; break; case 128: /* drop_table_clause ::= exists_opt full_table_name */ -{ yylhsminor.yy636 = createDropTableClause(pCxt, yymsp[-1].minor.yy603, yymsp[0].minor.yy636); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createDropTableClause(pCxt, yymsp[-1].minor.yy131, yymsp[0].minor.yy686); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; case 129: /* specific_tags_opt ::= */ case 160: /* tags_def_opt ::= */ yytestcase(yyruleno==160); - case 405: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==405); - case 422: /* group_by_clause_opt ::= */ yytestcase(yyruleno==422); - case 434: /* order_by_clause_opt ::= */ yytestcase(yyruleno==434); -{ yymsp[1].minor.yy236 = NULL; } + case 401: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==401); + case 418: /* group_by_clause_opt ::= */ yytestcase(yyruleno==418); + case 430: /* order_by_clause_opt ::= */ yytestcase(yyruleno==430); +{ yymsp[1].minor.yy670 = NULL; } break; case 130: /* specific_tags_opt ::= NK_LP col_name_list NK_RP */ -{ yymsp[-2].minor.yy236 = yymsp[-1].minor.yy236; } +{ yymsp[-2].minor.yy670 = yymsp[-1].minor.yy670; } break; case 131: /* full_table_name ::= table_name */ -{ yylhsminor.yy636 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy53, NULL); } - yymsp[0].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy113, NULL); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; case 132: /* full_table_name ::= db_name NK_DOT table_name */ -{ yylhsminor.yy636 = createRealTableNode(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53, NULL); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createRealTableNode(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113, NULL); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; case 135: /* column_def ::= column_name type_name */ -{ yylhsminor.yy636 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy53, yymsp[0].minor.yy450, NULL); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy113, yymsp[0].minor.yy490, NULL); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; case 136: /* column_def ::= column_name type_name COMMENT NK_STRING */ -{ yylhsminor.yy636 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy53, yymsp[-2].minor.yy450, &yymsp[0].minor.yy0); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy113, yymsp[-2].minor.yy490, &yymsp[0].minor.yy0); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; case 137: /* type_name ::= BOOL */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_BOOL); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_BOOL); } break; case 138: /* type_name ::= TINYINT */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_TINYINT); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_TINYINT); } break; case 139: /* type_name ::= SMALLINT */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_SMALLINT); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_SMALLINT); } break; case 140: /* type_name ::= INT */ case 141: /* type_name ::= INTEGER */ yytestcase(yyruleno==141); -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_INT); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_INT); } break; case 142: /* type_name ::= BIGINT */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_BIGINT); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_BIGINT); } break; case 143: /* type_name ::= FLOAT */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_FLOAT); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_FLOAT); } break; case 144: /* type_name ::= DOUBLE */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_DOUBLE); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_DOUBLE); } break; case 145: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy450 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); } +{ yymsp[-3].minor.yy490 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); } break; case 146: /* type_name ::= TIMESTAMP */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); } break; case 147: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy450 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); } +{ yymsp[-3].minor.yy490 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); } break; case 148: /* type_name ::= TINYINT UNSIGNED */ -{ yymsp[-1].minor.yy450 = createDataType(TSDB_DATA_TYPE_UTINYINT); } +{ yymsp[-1].minor.yy490 = createDataType(TSDB_DATA_TYPE_UTINYINT); } break; case 149: /* type_name ::= SMALLINT UNSIGNED */ -{ yymsp[-1].minor.yy450 = createDataType(TSDB_DATA_TYPE_USMALLINT); } +{ yymsp[-1].minor.yy490 = createDataType(TSDB_DATA_TYPE_USMALLINT); } break; case 150: /* type_name ::= INT UNSIGNED */ -{ yymsp[-1].minor.yy450 = createDataType(TSDB_DATA_TYPE_UINT); } +{ yymsp[-1].minor.yy490 = createDataType(TSDB_DATA_TYPE_UINT); } break; case 151: /* type_name ::= BIGINT UNSIGNED */ -{ yymsp[-1].minor.yy450 = createDataType(TSDB_DATA_TYPE_UBIGINT); } +{ yymsp[-1].minor.yy490 = createDataType(TSDB_DATA_TYPE_UBIGINT); } break; case 152: /* type_name ::= JSON */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_JSON); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_JSON); } break; case 153: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy450 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); } +{ yymsp[-3].minor.yy490 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); } break; case 154: /* type_name ::= MEDIUMBLOB */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); } break; case 155: /* type_name ::= BLOB */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_BLOB); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_BLOB); } break; case 156: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy450 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); } +{ yymsp[-3].minor.yy490 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); } break; case 157: /* type_name ::= DECIMAL */ -{ yymsp[0].minor.yy450 = createDataType(TSDB_DATA_TYPE_DECIMAL); } +{ yymsp[0].minor.yy490 = createDataType(TSDB_DATA_TYPE_DECIMAL); } break; case 158: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy450 = createDataType(TSDB_DATA_TYPE_DECIMAL); } +{ yymsp[-3].minor.yy490 = createDataType(TSDB_DATA_TYPE_DECIMAL); } break; case 159: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ -{ yymsp[-5].minor.yy450 = createDataType(TSDB_DATA_TYPE_DECIMAL); } +{ yymsp[-5].minor.yy490 = createDataType(TSDB_DATA_TYPE_DECIMAL); } break; case 161: /* tags_def_opt ::= tags_def */ - case 341: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==341); - case 396: /* select_list ::= select_sublist */ yytestcase(yyruleno==396); -{ yylhsminor.yy236 = yymsp[0].minor.yy236; } - yymsp[0].minor.yy236 = yylhsminor.yy236; + case 337: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==337); + case 392: /* select_list ::= select_sublist */ yytestcase(yyruleno==392); +{ yylhsminor.yy670 = yymsp[0].minor.yy670; } + yymsp[0].minor.yy670 = yylhsminor.yy670; break; case 162: /* tags_def ::= TAGS NK_LP column_def_list NK_RP */ -{ yymsp[-3].minor.yy236 = yymsp[-1].minor.yy236; } +{ yymsp[-3].minor.yy670 = yymsp[-1].minor.yy670; } break; case 163: /* table_options ::= */ -{ yymsp[1].minor.yy636 = createDefaultTableOptions(pCxt); } +{ yymsp[1].minor.yy686 = createDefaultTableOptions(pCxt); } break; case 164: /* table_options ::= table_options COMMENT NK_STRING */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-2].minor.yy636, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; - break; - case 165: /* table_options ::= table_options DELAY NK_INTEGER */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-2].minor.yy636, TABLE_OPTION_DELAY, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; +{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-2].minor.yy686, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 166: /* table_options ::= table_options FILE_FACTOR NK_FLOAT */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-2].minor.yy636, TABLE_OPTION_FILE_FACTOR, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 165: /* table_options ::= table_options FILE_FACTOR NK_FLOAT */ +{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-2].minor.yy686, TABLE_OPTION_FILE_FACTOR, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 167: /* table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-4].minor.yy636, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy236); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; + case 166: /* table_options ::= table_options ROLLUP NK_LP func_name_list NK_RP */ +{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-4].minor.yy686, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy670); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; - case 168: /* table_options ::= table_options TTL NK_INTEGER */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-2].minor.yy636, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 167: /* table_options ::= table_options TTL NK_INTEGER */ +{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-2].minor.yy686, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 169: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-4].minor.yy636, TABLE_OPTION_SMA, yymsp[-1].minor.yy236); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; + case 168: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */ +{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-4].minor.yy686, TABLE_OPTION_SMA, yymsp[-1].minor.yy670); } + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; - case 170: /* alter_table_options ::= alter_table_option */ -{ yylhsminor.yy636 = createAlterTableOptions(pCxt); yylhsminor.yy636 = setTableOption(pCxt, yylhsminor.yy636, yymsp[0].minor.yy25.type, &yymsp[0].minor.yy25.val); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 169: /* alter_table_options ::= alter_table_option */ +{ yylhsminor.yy686 = createAlterTableOptions(pCxt); yylhsminor.yy686 = setTableOption(pCxt, yylhsminor.yy686, yymsp[0].minor.yy53.type, &yymsp[0].minor.yy53.val); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 171: /* alter_table_options ::= alter_table_options alter_table_option */ -{ yylhsminor.yy636 = setTableOption(pCxt, yymsp[-1].minor.yy636, yymsp[0].minor.yy25.type, &yymsp[0].minor.yy25.val); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + case 170: /* alter_table_options ::= alter_table_options alter_table_option */ +{ yylhsminor.yy686 = setTableOption(pCxt, yymsp[-1].minor.yy686, yymsp[0].minor.yy53.type, &yymsp[0].minor.yy53.val); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 172: /* alter_table_option ::= COMMENT NK_STRING */ -{ yymsp[-1].minor.yy25.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } + case 171: /* alter_table_option ::= COMMENT NK_STRING */ +{ yymsp[-1].minor.yy53.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; - case 173: /* alter_table_option ::= TTL NK_INTEGER */ -{ yymsp[-1].minor.yy25.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy25.val = yymsp[0].minor.yy0; } + case 172: /* alter_table_option ::= TTL NK_INTEGER */ +{ yymsp[-1].minor.yy53.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy53.val = yymsp[0].minor.yy0; } break; - case 176: /* col_name ::= column_name */ -{ yylhsminor.yy636 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy53); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 175: /* col_name ::= column_name */ +{ yylhsminor.yy686 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy113); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 177: /* cmd ::= SHOW DNODES */ + case 176: /* cmd ::= SHOW DNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT, NULL, NULL); } break; - case 178: /* cmd ::= SHOW USERS */ + case 177: /* cmd ::= SHOW USERS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_USERS_STMT, NULL, NULL); } break; - case 179: /* cmd ::= SHOW DATABASES */ + case 178: /* cmd ::= SHOW DATABASES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT, NULL, NULL); } break; - case 180: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy636, yymsp[0].minor.yy636); } + case 179: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ +{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy686, yymsp[0].minor.yy686); } break; - case 181: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy636, yymsp[0].minor.yy636); } + case 180: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ +{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy686, yymsp[0].minor.yy686); } break; - case 182: /* cmd ::= SHOW db_name_cond_opt VGROUPS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy636, NULL); } + case 181: /* cmd ::= SHOW db_name_cond_opt VGROUPS */ +{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy686, NULL); } break; - case 183: /* cmd ::= SHOW MNODES */ + case 182: /* cmd ::= SHOW MNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MNODES_STMT, NULL, NULL); } break; - case 184: /* cmd ::= SHOW MODULES */ + case 183: /* cmd ::= SHOW MODULES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MODULES_STMT, NULL, NULL); } break; - case 185: /* cmd ::= SHOW QNODES */ + case 184: /* cmd ::= SHOW QNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QNODES_STMT, NULL, NULL); } break; - case 186: /* cmd ::= SHOW FUNCTIONS */ + case 185: /* cmd ::= SHOW FUNCTIONS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_FUNCTIONS_STMT, NULL, NULL); } break; - case 187: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } + case 186: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ +{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[-1].minor.yy686, yymsp[0].minor.yy686); } break; - case 188: /* cmd ::= SHOW STREAMS */ + case 187: /* cmd ::= SHOW STREAMS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STREAMS_STMT, NULL, NULL); } break; - case 189: /* cmd ::= SHOW ACCOUNTS */ + case 188: /* cmd ::= SHOW ACCOUNTS */ { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } break; - case 190: /* cmd ::= SHOW APPS */ + case 189: /* cmd ::= SHOW APPS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_APPS_STMT, NULL, NULL); } break; - case 191: /* cmd ::= SHOW CONNECTIONS */ + case 190: /* cmd ::= SHOW CONNECTIONS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONNECTIONS_STMT, NULL, NULL); } break; - case 192: /* cmd ::= SHOW LICENCE */ - case 193: /* cmd ::= SHOW GRANTS */ yytestcase(yyruleno==193); + case 191: /* cmd ::= SHOW LICENCE */ + case 192: /* cmd ::= SHOW GRANTS */ yytestcase(yyruleno==192); { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LICENCE_STMT, NULL, NULL); } break; - case 194: /* cmd ::= SHOW CREATE DATABASE db_name */ -{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy53); } + case 193: /* cmd ::= SHOW CREATE DATABASE db_name */ +{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy113); } break; - case 195: /* cmd ::= SHOW CREATE TABLE full_table_name */ -{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy636); } + case 194: /* cmd ::= SHOW CREATE TABLE full_table_name */ +{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy686); } break; - case 196: /* cmd ::= SHOW CREATE STABLE full_table_name */ -{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy636); } + case 195: /* cmd ::= SHOW CREATE STABLE full_table_name */ +{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy686); } break; - case 197: /* cmd ::= SHOW QUERIES */ + case 196: /* cmd ::= SHOW QUERIES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QUERIES_STMT, NULL, NULL); } break; - case 198: /* cmd ::= SHOW SCORES */ + case 197: /* cmd ::= SHOW SCORES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SCORES_STMT, NULL, NULL); } break; - case 199: /* cmd ::= SHOW TOPICS */ + case 198: /* cmd ::= SHOW TOPICS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TOPICS_STMT, NULL, NULL); } break; - case 200: /* cmd ::= SHOW VARIABLES */ + case 199: /* cmd ::= SHOW VARIABLES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VARIABLE_STMT, NULL, NULL); } break; - case 201: /* cmd ::= SHOW BNODES */ + case 200: /* cmd ::= SHOW BNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_BNODES_STMT, NULL, NULL); } break; - case 202: /* cmd ::= SHOW SNODES */ + case 201: /* cmd ::= SHOW SNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SNODES_STMT, NULL, NULL); } break; - case 203: /* cmd ::= SHOW CLUSTER */ + case 202: /* cmd ::= SHOW CLUSTER */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CLUSTER_STMT, NULL, NULL); } break; - case 204: /* cmd ::= SHOW TRANSACTIONS */ + case 203: /* cmd ::= SHOW TRANSACTIONS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TRANSACTIONS_STMT, NULL, NULL); } break; - case 205: /* db_name_cond_opt ::= */ - case 210: /* from_db_opt ::= */ yytestcase(yyruleno==210); -{ yymsp[1].minor.yy636 = createDefaultDatabaseCondValue(pCxt); } + case 204: /* db_name_cond_opt ::= */ + case 209: /* from_db_opt ::= */ yytestcase(yyruleno==209); +{ yymsp[1].minor.yy686 = createDefaultDatabaseCondValue(pCxt); } break; - case 206: /* db_name_cond_opt ::= db_name NK_DOT */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy53); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + case 205: /* db_name_cond_opt ::= db_name NK_DOT */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy113); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 207: /* like_pattern_opt ::= */ - case 218: /* index_options ::= */ yytestcase(yyruleno==218); - case 250: /* into_opt ::= */ yytestcase(yyruleno==250); - case 403: /* where_clause_opt ::= */ yytestcase(yyruleno==403); - case 407: /* twindow_clause_opt ::= */ yytestcase(yyruleno==407); - case 412: /* sliding_opt ::= */ yytestcase(yyruleno==412); - case 414: /* fill_opt ::= */ yytestcase(yyruleno==414); - case 426: /* having_clause_opt ::= */ yytestcase(yyruleno==426); - case 436: /* slimit_clause_opt ::= */ yytestcase(yyruleno==436); - case 440: /* limit_clause_opt ::= */ yytestcase(yyruleno==440); -{ yymsp[1].minor.yy636 = NULL; } + case 206: /* like_pattern_opt ::= */ + case 217: /* index_options ::= */ yytestcase(yyruleno==217); + case 246: /* into_opt ::= */ yytestcase(yyruleno==246); + case 399: /* where_clause_opt ::= */ yytestcase(yyruleno==399); + case 403: /* twindow_clause_opt ::= */ yytestcase(yyruleno==403); + case 408: /* sliding_opt ::= */ yytestcase(yyruleno==408); + case 410: /* fill_opt ::= */ yytestcase(yyruleno==410); + case 422: /* having_clause_opt ::= */ yytestcase(yyruleno==422); + case 432: /* slimit_clause_opt ::= */ yytestcase(yyruleno==432); + case 436: /* limit_clause_opt ::= */ yytestcase(yyruleno==436); +{ yymsp[1].minor.yy686 = NULL; } break; - case 208: /* like_pattern_opt ::= LIKE NK_STRING */ -{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } + case 207: /* like_pattern_opt ::= LIKE NK_STRING */ +{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } break; - case 209: /* table_name_cond ::= table_name */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy53); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 208: /* table_name_cond ::= table_name */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy113); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 211: /* from_db_opt ::= FROM db_name */ -{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy53); } + case 210: /* from_db_opt ::= FROM db_name */ +{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy113); } break; - case 214: /* func_name ::= function_name */ -{ yylhsminor.yy636 = createFunctionNode(pCxt, &yymsp[0].minor.yy53, NULL); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 213: /* func_name ::= function_name */ +{ yylhsminor.yy686 = createFunctionNode(pCxt, &yymsp[0].minor.yy113, NULL); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 215: /* cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options */ -{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy603, &yymsp[-3].minor.yy53, &yymsp[-1].minor.yy53, NULL, yymsp[0].minor.yy636); } + case 214: /* cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options */ +{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy131, &yymsp[-3].minor.yy113, &yymsp[-1].minor.yy113, NULL, yymsp[0].minor.yy686); } break; - case 216: /* cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP */ -{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_FULLTEXT, yymsp[-6].minor.yy603, &yymsp[-5].minor.yy53, &yymsp[-3].minor.yy53, yymsp[-1].minor.yy236, NULL); } + case 215: /* cmd ::= CREATE FULLTEXT INDEX not_exists_opt index_name ON table_name NK_LP col_name_list NK_RP */ +{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_FULLTEXT, yymsp[-6].minor.yy131, &yymsp[-5].minor.yy113, &yymsp[-3].minor.yy113, yymsp[-1].minor.yy670, NULL); } break; - case 217: /* cmd ::= DROP INDEX exists_opt index_name ON table_name */ -{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-3].minor.yy603, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53); } + case 216: /* cmd ::= DROP INDEX exists_opt index_name ON table_name */ +{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-3].minor.yy131, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113); } break; - case 219: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt */ -{ yymsp[-8].minor.yy636 = createIndexOption(pCxt, yymsp[-6].minor.yy236, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), NULL, yymsp[0].minor.yy636); } + case 218: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt */ +{ yymsp[-8].minor.yy686 = createIndexOption(pCxt, yymsp[-6].minor.yy670, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), NULL, yymsp[0].minor.yy686); } break; - case 220: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt */ -{ yymsp[-10].minor.yy636 = createIndexOption(pCxt, yymsp[-8].minor.yy236, releaseRawExprNode(pCxt, yymsp[-4].minor.yy636), releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), yymsp[0].minor.yy636); } + case 219: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt */ +{ yymsp[-10].minor.yy686 = createIndexOption(pCxt, yymsp[-8].minor.yy670, releaseRawExprNode(pCxt, yymsp[-4].minor.yy686), releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), yymsp[0].minor.yy686); } break; - case 223: /* func ::= function_name NK_LP expression_list NK_RP */ -{ yylhsminor.yy636 = createFunctionNode(pCxt, &yymsp[-3].minor.yy53, yymsp[-1].minor.yy236); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; + case 222: /* func ::= function_name NK_LP expression_list NK_RP */ +{ yylhsminor.yy686 = createFunctionNode(pCxt, &yymsp[-3].minor.yy113, yymsp[-1].minor.yy670); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; - case 224: /* cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS query_expression */ -{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy603, &yymsp[-3].minor.yy53, yymsp[0].minor.yy636, NULL, yymsp[-2].minor.yy636); } + case 223: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression */ +{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-3].minor.yy131, &yymsp[-2].minor.yy113, yymsp[0].minor.yy686, NULL, NULL); } break; - case 225: /* cmd ::= CREATE TOPIC not_exists_opt topic_name topic_options AS db_name */ -{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy603, &yymsp[-3].minor.yy53, NULL, &yymsp[0].minor.yy53, yymsp[-2].minor.yy636); } + case 224: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */ +{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy131, &yymsp[-3].minor.yy113, NULL, &yymsp[0].minor.yy113, NULL); } break; - case 226: /* cmd ::= DROP TOPIC exists_opt topic_name */ -{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy603, &yymsp[0].minor.yy53); } - break; - case 227: /* cmd ::= DROP CGROUP exists_opt cgroup_name ON topic_name */ -{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy603, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53); } - break; - case 228: /* topic_options ::= */ -{ yymsp[1].minor.yy636 = createTopicOptions(pCxt); } + case 225: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */ +{ pCxt->pRootNode = createCreateTopicStmt(pCxt, yymsp[-4].minor.yy131, &yymsp[-3].minor.yy113, NULL, NULL, yymsp[0].minor.yy686); } break; - case 229: /* topic_options ::= topic_options WITH TABLE */ -{ ((STopicOptions*)yymsp[-2].minor.yy636)->withTable = true; yylhsminor.yy636 = yymsp[-2].minor.yy636; } - yymsp[-2].minor.yy636 = yylhsminor.yy636; - break; - case 230: /* topic_options ::= topic_options WITH SCHEMA */ -{ ((STopicOptions*)yymsp[-2].minor.yy636)->withSchema = true; yylhsminor.yy636 = yymsp[-2].minor.yy636; } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 226: /* cmd ::= DROP TOPIC exists_opt topic_name */ +{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy131, &yymsp[0].minor.yy113); } break; - case 231: /* topic_options ::= topic_options WITH TAG */ -{ ((STopicOptions*)yymsp[-2].minor.yy636)->withTag = true; yylhsminor.yy636 = yymsp[-2].minor.yy636; } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 227: /* cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */ +{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy131, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113); } break; - case 232: /* cmd ::= DESC full_table_name */ - case 233: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==233); -{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy636); } + case 228: /* cmd ::= DESC full_table_name */ + case 229: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==229); +{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy686); } break; - case 234: /* cmd ::= RESET QUERY CACHE */ + case 230: /* cmd ::= RESET QUERY CACHE */ { pCxt->pRootNode = createResetQueryCacheStmt(pCxt); } break; - case 235: /* cmd ::= EXPLAIN analyze_opt explain_options query_expression */ -{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy603, yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } + case 231: /* cmd ::= EXPLAIN analyze_opt explain_options query_expression */ +{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy131, yymsp[-1].minor.yy686, yymsp[0].minor.yy686); } break; - case 237: /* analyze_opt ::= ANALYZE */ - case 245: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==245); - case 393: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==393); -{ yymsp[0].minor.yy603 = true; } + case 233: /* analyze_opt ::= ANALYZE */ + case 241: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==241); + case 389: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==389); +{ yymsp[0].minor.yy131 = true; } break; - case 238: /* explain_options ::= */ -{ yymsp[1].minor.yy636 = createDefaultExplainOptions(pCxt); } + case 234: /* explain_options ::= */ +{ yymsp[1].minor.yy686 = createDefaultExplainOptions(pCxt); } break; - case 239: /* explain_options ::= explain_options VERBOSE NK_BOOL */ -{ yylhsminor.yy636 = setExplainVerbose(pCxt, yymsp[-2].minor.yy636, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 235: /* explain_options ::= explain_options VERBOSE NK_BOOL */ +{ yylhsminor.yy686 = setExplainVerbose(pCxt, yymsp[-2].minor.yy686, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 240: /* explain_options ::= explain_options RATIO NK_FLOAT */ -{ yylhsminor.yy636 = setExplainRatio(pCxt, yymsp[-2].minor.yy636, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 236: /* explain_options ::= explain_options RATIO NK_FLOAT */ +{ yylhsminor.yy686 = setExplainRatio(pCxt, yymsp[-2].minor.yy686, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 241: /* cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */ -{ pCxt->pRootNode = createCompactStmt(pCxt, yymsp[-1].minor.yy236); } + case 237: /* cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */ +{ pCxt->pRootNode = createCompactStmt(pCxt, yymsp[-1].minor.yy670); } break; - case 242: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ -{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy603, yymsp[-8].minor.yy603, &yymsp[-5].minor.yy53, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy450, yymsp[0].minor.yy158); } + case 238: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ +{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy131, yymsp[-8].minor.yy131, &yymsp[-5].minor.yy113, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy490, yymsp[0].minor.yy550); } break; - case 243: /* cmd ::= DROP FUNCTION exists_opt function_name */ -{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy603, &yymsp[0].minor.yy53); } + case 239: /* cmd ::= DROP FUNCTION exists_opt function_name */ +{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy131, &yymsp[0].minor.yy113); } break; - case 246: /* bufsize_opt ::= */ -{ yymsp[1].minor.yy158 = 0; } + case 242: /* bufsize_opt ::= */ +{ yymsp[1].minor.yy550 = 0; } break; - case 247: /* bufsize_opt ::= BUFSIZE NK_INTEGER */ -{ yymsp[-1].minor.yy158 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); } + case 243: /* bufsize_opt ::= BUFSIZE NK_INTEGER */ +{ yymsp[-1].minor.yy550 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 248: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */ -{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-5].minor.yy603, &yymsp[-4].minor.yy53, yymsp[-2].minor.yy636, yymsp[-3].minor.yy636, yymsp[0].minor.yy636); } + case 244: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */ +{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-5].minor.yy131, &yymsp[-4].minor.yy113, yymsp[-2].minor.yy686, yymsp[-3].minor.yy686, yymsp[0].minor.yy686); } break; - case 249: /* cmd ::= DROP STREAM exists_opt stream_name */ -{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy603, &yymsp[0].minor.yy53); } + case 245: /* cmd ::= DROP STREAM exists_opt stream_name */ +{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy131, &yymsp[0].minor.yy113); } break; - case 251: /* into_opt ::= INTO full_table_name */ - case 374: /* from_clause ::= FROM table_reference_list */ yytestcase(yyruleno==374); - case 404: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==404); - case 427: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==427); -{ yymsp[-1].minor.yy636 = yymsp[0].minor.yy636; } + case 247: /* into_opt ::= INTO full_table_name */ + case 370: /* from_clause ::= FROM table_reference_list */ yytestcase(yyruleno==370); + case 400: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==400); + case 423: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==423); +{ yymsp[-1].minor.yy686 = yymsp[0].minor.yy686; } break; - case 252: /* stream_options ::= */ -{ yymsp[1].minor.yy636 = createStreamOptions(pCxt); } + case 248: /* stream_options ::= */ +{ yymsp[1].minor.yy686 = createStreamOptions(pCxt); } break; - case 253: /* stream_options ::= stream_options TRIGGER AT_ONCE */ -{ ((SStreamOptions*)yymsp[-2].minor.yy636)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy636 = yymsp[-2].minor.yy636; } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 249: /* stream_options ::= stream_options TRIGGER AT_ONCE */ +{ ((SStreamOptions*)yymsp[-2].minor.yy686)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy686 = yymsp[-2].minor.yy686; } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 254: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ -{ ((SStreamOptions*)yymsp[-2].minor.yy636)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy636 = yymsp[-2].minor.yy636; } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 250: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ +{ ((SStreamOptions*)yymsp[-2].minor.yy686)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy686 = yymsp[-2].minor.yy686; } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 255: /* stream_options ::= stream_options WATERMARK duration_literal */ -{ ((SStreamOptions*)yymsp[-2].minor.yy636)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy636); yylhsminor.yy636 = yymsp[-2].minor.yy636; } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 251: /* stream_options ::= stream_options WATERMARK duration_literal */ +{ ((SStreamOptions*)yymsp[-2].minor.yy686)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy686); yylhsminor.yy686 = yymsp[-2].minor.yy686; } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 256: /* cmd ::= KILL CONNECTION NK_INTEGER */ + case 252: /* cmd ::= KILL CONNECTION NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); } break; - case 257: /* cmd ::= KILL QUERY NK_INTEGER */ + case 253: /* cmd ::= KILL QUERY NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_QUERY_STMT, &yymsp[0].minor.yy0); } break; - case 258: /* cmd ::= KILL TRANSACTION NK_INTEGER */ + case 254: /* cmd ::= KILL TRANSACTION NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); } break; - case 259: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ + case 255: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ { pCxt->pRootNode = createMergeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 260: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ -{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy236); } + case 256: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ +{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy670); } break; - case 261: /* cmd ::= SPLIT VGROUP NK_INTEGER */ + case 257: /* cmd ::= SPLIT VGROUP NK_INTEGER */ { pCxt->pRootNode = createSplitVgroupStmt(pCxt, &yymsp[0].minor.yy0); } break; - case 262: /* dnode_list ::= DNODE NK_INTEGER */ -{ yymsp[-1].minor.yy236 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - break; - case 264: /* cmd ::= SYNCDB db_name REPLICA */ -{ pCxt->pRootNode = createSyncdbStmt(pCxt, &yymsp[-1].minor.yy53); } - break; - case 266: /* literal ::= NK_INTEGER */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 267: /* literal ::= NK_FLOAT */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 268: /* literal ::= NK_STRING */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 269: /* literal ::= NK_BOOL */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 270: /* literal ::= TIMESTAMP NK_STRING */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; - break; - case 271: /* literal ::= duration_literal */ - case 281: /* signed_literal ::= signed */ yytestcase(yyruleno==281); - case 301: /* expression ::= literal */ yytestcase(yyruleno==301); - case 302: /* expression ::= pseudo_column */ yytestcase(yyruleno==302); - case 303: /* expression ::= column_reference */ yytestcase(yyruleno==303); - case 304: /* expression ::= function_expression */ yytestcase(yyruleno==304); - case 305: /* expression ::= subquery */ yytestcase(yyruleno==305); - case 330: /* function_expression ::= literal_func */ yytestcase(yyruleno==330); - case 366: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==366); - case 370: /* boolean_primary ::= predicate */ yytestcase(yyruleno==370); - case 372: /* common_expression ::= expression */ yytestcase(yyruleno==372); - case 373: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==373); - case 375: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==375); - case 377: /* table_reference ::= table_primary */ yytestcase(yyruleno==377); - case 378: /* table_reference ::= joined_table */ yytestcase(yyruleno==378); - case 382: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==382); - case 429: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==429); - case 432: /* query_primary ::= query_specification */ yytestcase(yyruleno==432); -{ yylhsminor.yy636 = yymsp[0].minor.yy636; } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 272: /* literal ::= NULL */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 273: /* literal ::= NK_QUESTION */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 274: /* duration_literal ::= NK_VARIABLE */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 275: /* signed ::= NK_INTEGER */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 276: /* signed ::= NK_PLUS NK_INTEGER */ -{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } - break; - case 277: /* signed ::= NK_MINUS NK_INTEGER */ + case 258: /* dnode_list ::= DNODE NK_INTEGER */ +{ yymsp[-1].minor.yy670 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + break; + case 260: /* cmd ::= SYNCDB db_name REPLICA */ +{ pCxt->pRootNode = createSyncdbStmt(pCxt, &yymsp[-1].minor.yy113); } + break; + case 262: /* literal ::= NK_INTEGER */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 263: /* literal ::= NK_FLOAT */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 264: /* literal ::= NK_STRING */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 265: /* literal ::= NK_BOOL */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 266: /* literal ::= TIMESTAMP NK_STRING */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; + break; + case 267: /* literal ::= duration_literal */ + case 277: /* signed_literal ::= signed */ yytestcase(yyruleno==277); + case 297: /* expression ::= literal */ yytestcase(yyruleno==297); + case 298: /* expression ::= pseudo_column */ yytestcase(yyruleno==298); + case 299: /* expression ::= column_reference */ yytestcase(yyruleno==299); + case 300: /* expression ::= function_expression */ yytestcase(yyruleno==300); + case 301: /* expression ::= subquery */ yytestcase(yyruleno==301); + case 326: /* function_expression ::= literal_func */ yytestcase(yyruleno==326); + case 362: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==362); + case 366: /* boolean_primary ::= predicate */ yytestcase(yyruleno==366); + case 368: /* common_expression ::= expression */ yytestcase(yyruleno==368); + case 369: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==369); + case 371: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==371); + case 373: /* table_reference ::= table_primary */ yytestcase(yyruleno==373); + case 374: /* table_reference ::= joined_table */ yytestcase(yyruleno==374); + case 378: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==378); + case 425: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==425); + case 428: /* query_primary ::= query_specification */ yytestcase(yyruleno==428); +{ yylhsminor.yy686 = yymsp[0].minor.yy686; } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 268: /* literal ::= NULL */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 269: /* literal ::= NK_QUESTION */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 270: /* duration_literal ::= NK_VARIABLE */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 271: /* signed ::= NK_INTEGER */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 272: /* signed ::= NK_PLUS NK_INTEGER */ +{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } + break; + case 273: /* signed ::= NK_MINUS NK_INTEGER */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t); + yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 278: /* signed ::= NK_FLOAT */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 274: /* signed ::= NK_FLOAT */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 279: /* signed ::= NK_PLUS NK_FLOAT */ -{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } + case 275: /* signed ::= NK_PLUS NK_FLOAT */ +{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } break; - case 280: /* signed ::= NK_MINUS NK_FLOAT */ + case 276: /* signed ::= NK_MINUS NK_FLOAT */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t); + yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 282: /* signed_literal ::= NK_STRING */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 278: /* signed_literal ::= NK_STRING */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 283: /* signed_literal ::= NK_BOOL */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 279: /* signed_literal ::= NK_BOOL */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 284: /* signed_literal ::= TIMESTAMP NK_STRING */ -{ yymsp[-1].minor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } + case 280: /* signed_literal ::= TIMESTAMP NK_STRING */ +{ yymsp[-1].minor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } break; - case 285: /* signed_literal ::= duration_literal */ - case 287: /* signed_literal ::= literal_func */ yytestcase(yyruleno==287); - case 344: /* star_func_para ::= expression */ yytestcase(yyruleno==344); - case 399: /* select_item ::= common_expression */ yytestcase(yyruleno==399); - case 445: /* search_condition ::= common_expression */ yytestcase(yyruleno==445); -{ yylhsminor.yy636 = releaseRawExprNode(pCxt, yymsp[0].minor.yy636); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 281: /* signed_literal ::= duration_literal */ + case 283: /* signed_literal ::= literal_func */ yytestcase(yyruleno==283); + case 340: /* star_func_para ::= expression */ yytestcase(yyruleno==340); + case 395: /* select_item ::= common_expression */ yytestcase(yyruleno==395); + case 441: /* search_condition ::= common_expression */ yytestcase(yyruleno==441); +{ yylhsminor.yy686 = releaseRawExprNode(pCxt, yymsp[0].minor.yy686); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 286: /* signed_literal ::= NULL */ -{ yylhsminor.yy636 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy636 = yylhsminor.yy636; + case 282: /* signed_literal ::= NULL */ +{ yylhsminor.yy686 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy686 = yylhsminor.yy686; break; - case 306: /* expression ::= NK_LP expression NK_RP */ - case 371: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==371); -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy636)); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 302: /* expression ::= NK_LP expression NK_RP */ + case 367: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==367); +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy686)); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 307: /* expression ::= NK_PLUS expression */ + case 303: /* expression ::= NK_PLUS expression */ { - SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy636)); + SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy686)); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 308: /* expression ::= NK_MINUS expression */ + case 304: /* expression ::= NK_MINUS expression */ { - SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy636), NULL)); + SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy686), NULL)); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 309: /* expression ::= expression NK_PLUS expression */ + case 305: /* expression ::= expression NK_PLUS expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 310: /* expression ::= expression NK_MINUS expression */ + case 306: /* expression ::= expression NK_MINUS expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 311: /* expression ::= expression NK_STAR expression */ + case 307: /* expression ::= expression NK_STAR expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 312: /* expression ::= expression NK_SLASH expression */ + case 308: /* expression ::= expression NK_SLASH expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 313: /* expression ::= expression NK_REM expression */ + case 309: /* expression ::= expression NK_REM expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MOD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MOD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 314: /* expression ::= column_reference NK_ARROW NK_STRING */ + case 310: /* expression ::= column_reference NK_ARROW NK_STRING */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; - break; - case 315: /* expression_list ::= expression */ -{ yylhsminor.yy236 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy636)); } - yymsp[0].minor.yy236 = yylhsminor.yy236; - break; - case 316: /* expression_list ::= expression_list NK_COMMA expression */ -{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, releaseRawExprNode(pCxt, yymsp[0].minor.yy636)); } - yymsp[-2].minor.yy236 = yylhsminor.yy236; - break; - case 317: /* column_reference ::= column_name */ -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy53, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy53)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 318: /* column_reference ::= table_name NK_DOT column_name */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53, createColumnNode(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy53)); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; - break; - case 319: /* pseudo_column ::= ROWTS */ - case 320: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==320); - case 322: /* pseudo_column ::= QSTARTTS */ yytestcase(yyruleno==322); - case 323: /* pseudo_column ::= QENDTS */ yytestcase(yyruleno==323); - case 324: /* pseudo_column ::= WSTARTTS */ yytestcase(yyruleno==324); - case 325: /* pseudo_column ::= WENDTS */ yytestcase(yyruleno==325); - case 326: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==326); - case 332: /* literal_func ::= NOW */ yytestcase(yyruleno==332); -{ yylhsminor.yy636 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } - yymsp[0].minor.yy636 = yylhsminor.yy636; - break; - case 321: /* pseudo_column ::= table_name NK_DOT TBNAME */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy53)))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; - break; - case 327: /* function_expression ::= function_name NK_LP expression_list NK_RP */ - case 328: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==328); -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy53, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy53, yymsp[-1].minor.yy236)); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; - break; - case 329: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), yymsp[-1].minor.yy450)); } - yymsp[-5].minor.yy636 = yylhsminor.yy636; - break; - case 331: /* literal_func ::= noarg_func NK_LP NK_RP */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy53, NULL)); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; - break; - case 340: /* star_func_para_list ::= NK_STAR */ -{ yylhsminor.yy236 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy236 = yylhsminor.yy236; - break; - case 345: /* star_func_para ::= table_name NK_DOT NK_STAR */ - case 402: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==402); -{ yylhsminor.yy636 = createColumnNode(pCxt, &yymsp[-2].minor.yy53, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; - break; - case 346: /* predicate ::= expression compare_op expression */ - case 351: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==351); + yymsp[-2].minor.yy686 = yylhsminor.yy686; + break; + case 311: /* expression_list ::= expression */ +{ yylhsminor.yy670 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy686)); } + yymsp[0].minor.yy670 = yylhsminor.yy670; + break; + case 312: /* expression_list ::= expression_list NK_COMMA expression */ +{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, releaseRawExprNode(pCxt, yymsp[0].minor.yy686)); } + yymsp[-2].minor.yy670 = yylhsminor.yy670; + break; + case 313: /* column_reference ::= column_name */ +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy113, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy113)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 314: /* column_reference ::= table_name NK_DOT column_name */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113, createColumnNode(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy113)); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; + break; + case 315: /* pseudo_column ::= ROWTS */ + case 316: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==316); + case 318: /* pseudo_column ::= QSTARTTS */ yytestcase(yyruleno==318); + case 319: /* pseudo_column ::= QENDTS */ yytestcase(yyruleno==319); + case 320: /* pseudo_column ::= WSTARTTS */ yytestcase(yyruleno==320); + case 321: /* pseudo_column ::= WENDTS */ yytestcase(yyruleno==321); + case 322: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==322); + case 328: /* literal_func ::= NOW */ yytestcase(yyruleno==328); +{ yylhsminor.yy686 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } + yymsp[0].minor.yy686 = yylhsminor.yy686; + break; + case 317: /* pseudo_column ::= table_name NK_DOT TBNAME */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy113)))); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; + break; + case 323: /* function_expression ::= function_name NK_LP expression_list NK_RP */ + case 324: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==324); +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy113, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy113, yymsp[-1].minor.yy670)); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; + break; + case 325: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), yymsp[-1].minor.yy490)); } + yymsp[-5].minor.yy686 = yylhsminor.yy686; + break; + case 327: /* literal_func ::= noarg_func NK_LP NK_RP */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy113, NULL)); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; + break; + case 336: /* star_func_para_list ::= NK_STAR */ +{ yylhsminor.yy670 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy670 = yylhsminor.yy670; + break; + case 341: /* star_func_para ::= table_name NK_DOT NK_STAR */ + case 398: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==398); +{ yylhsminor.yy686 = createColumnNode(pCxt, &yymsp[-2].minor.yy113, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; + break; + case 342: /* predicate ::= expression compare_op expression */ + case 347: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==347); { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy136, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy632, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 347: /* predicate ::= expression BETWEEN expression AND expression */ + case 343: /* predicate ::= expression BETWEEN expression AND expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy636), releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy686), releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-4].minor.yy636 = yylhsminor.yy636; + yymsp[-4].minor.yy686 = yylhsminor.yy686; break; - case 348: /* predicate ::= expression NOT BETWEEN expression AND expression */ + case 344: /* predicate ::= expression NOT BETWEEN expression AND expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy636), releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy686), releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-5].minor.yy636 = yylhsminor.yy636; + yymsp[-5].minor.yy686 = yylhsminor.yy686; break; - case 349: /* predicate ::= expression IS NULL */ + case 345: /* predicate ::= expression IS NULL */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), NULL)); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), NULL)); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 350: /* predicate ::= expression IS NOT NULL */ + case 346: /* predicate ::= expression IS NOT NULL */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), NULL)); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), NULL)); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; - case 352: /* compare_op ::= NK_LT */ -{ yymsp[0].minor.yy136 = OP_TYPE_LOWER_THAN; } + case 348: /* compare_op ::= NK_LT */ +{ yymsp[0].minor.yy632 = OP_TYPE_LOWER_THAN; } break; - case 353: /* compare_op ::= NK_GT */ -{ yymsp[0].minor.yy136 = OP_TYPE_GREATER_THAN; } + case 349: /* compare_op ::= NK_GT */ +{ yymsp[0].minor.yy632 = OP_TYPE_GREATER_THAN; } break; - case 354: /* compare_op ::= NK_LE */ -{ yymsp[0].minor.yy136 = OP_TYPE_LOWER_EQUAL; } + case 350: /* compare_op ::= NK_LE */ +{ yymsp[0].minor.yy632 = OP_TYPE_LOWER_EQUAL; } break; - case 355: /* compare_op ::= NK_GE */ -{ yymsp[0].minor.yy136 = OP_TYPE_GREATER_EQUAL; } + case 351: /* compare_op ::= NK_GE */ +{ yymsp[0].minor.yy632 = OP_TYPE_GREATER_EQUAL; } break; - case 356: /* compare_op ::= NK_NE */ -{ yymsp[0].minor.yy136 = OP_TYPE_NOT_EQUAL; } + case 352: /* compare_op ::= NK_NE */ +{ yymsp[0].minor.yy632 = OP_TYPE_NOT_EQUAL; } break; - case 357: /* compare_op ::= NK_EQ */ -{ yymsp[0].minor.yy136 = OP_TYPE_EQUAL; } + case 353: /* compare_op ::= NK_EQ */ +{ yymsp[0].minor.yy632 = OP_TYPE_EQUAL; } break; - case 358: /* compare_op ::= LIKE */ -{ yymsp[0].minor.yy136 = OP_TYPE_LIKE; } + case 354: /* compare_op ::= LIKE */ +{ yymsp[0].minor.yy632 = OP_TYPE_LIKE; } break; - case 359: /* compare_op ::= NOT LIKE */ -{ yymsp[-1].minor.yy136 = OP_TYPE_NOT_LIKE; } + case 355: /* compare_op ::= NOT LIKE */ +{ yymsp[-1].minor.yy632 = OP_TYPE_NOT_LIKE; } break; - case 360: /* compare_op ::= MATCH */ -{ yymsp[0].minor.yy136 = OP_TYPE_MATCH; } + case 356: /* compare_op ::= MATCH */ +{ yymsp[0].minor.yy632 = OP_TYPE_MATCH; } break; - case 361: /* compare_op ::= NMATCH */ -{ yymsp[0].minor.yy136 = OP_TYPE_NMATCH; } + case 357: /* compare_op ::= NMATCH */ +{ yymsp[0].minor.yy632 = OP_TYPE_NMATCH; } break; - case 362: /* compare_op ::= CONTAINS */ -{ yymsp[0].minor.yy136 = OP_TYPE_JSON_CONTAINS; } + case 358: /* compare_op ::= CONTAINS */ +{ yymsp[0].minor.yy632 = OP_TYPE_JSON_CONTAINS; } break; - case 363: /* in_op ::= IN */ -{ yymsp[0].minor.yy136 = OP_TYPE_IN; } + case 359: /* in_op ::= IN */ +{ yymsp[0].minor.yy632 = OP_TYPE_IN; } break; - case 364: /* in_op ::= NOT IN */ -{ yymsp[-1].minor.yy136 = OP_TYPE_NOT_IN; } + case 360: /* in_op ::= NOT IN */ +{ yymsp[-1].minor.yy632 = OP_TYPE_NOT_IN; } break; - case 365: /* in_predicate_value ::= NK_LP expression_list NK_RP */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy236)); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 361: /* in_predicate_value ::= NK_LP expression_list NK_RP */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy670)); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 367: /* boolean_value_expression ::= NOT boolean_primary */ + case 363: /* boolean_value_expression ::= NOT boolean_primary */ { - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy636), NULL)); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy686), NULL)); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 368: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ + case 364: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 369: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ + case 365: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy636); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy636); - yylhsminor.yy636 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy686); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy686); + yylhsminor.yy686 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 376: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ -{ yylhsminor.yy636 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy636, yymsp[0].minor.yy636, NULL); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 372: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ +{ yylhsminor.yy686 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy686, yymsp[0].minor.yy686, NULL); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 379: /* table_primary ::= table_name alias_opt */ -{ yylhsminor.yy636 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy53, &yymsp[0].minor.yy53); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + case 375: /* table_primary ::= table_name alias_opt */ +{ yylhsminor.yy686 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy113, &yymsp[0].minor.yy113); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 380: /* table_primary ::= db_name NK_DOT table_name alias_opt */ -{ yylhsminor.yy636 = createRealTableNode(pCxt, &yymsp[-3].minor.yy53, &yymsp[-1].minor.yy53, &yymsp[0].minor.yy53); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; + case 376: /* table_primary ::= db_name NK_DOT table_name alias_opt */ +{ yylhsminor.yy686 = createRealTableNode(pCxt, &yymsp[-3].minor.yy113, &yymsp[-1].minor.yy113, &yymsp[0].minor.yy113); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; - case 381: /* table_primary ::= subquery alias_opt */ -{ yylhsminor.yy636 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy636), &yymsp[0].minor.yy53); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + case 377: /* table_primary ::= subquery alias_opt */ +{ yylhsminor.yy686 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy686), &yymsp[0].minor.yy113); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 383: /* alias_opt ::= */ -{ yymsp[1].minor.yy53 = nil_token; } + case 379: /* alias_opt ::= */ +{ yymsp[1].minor.yy113 = nil_token; } break; - case 384: /* alias_opt ::= table_alias */ -{ yylhsminor.yy53 = yymsp[0].minor.yy53; } - yymsp[0].minor.yy53 = yylhsminor.yy53; + case 380: /* alias_opt ::= table_alias */ +{ yylhsminor.yy113 = yymsp[0].minor.yy113; } + yymsp[0].minor.yy113 = yylhsminor.yy113; break; - case 385: /* alias_opt ::= AS table_alias */ -{ yymsp[-1].minor.yy53 = yymsp[0].minor.yy53; } + case 381: /* alias_opt ::= AS table_alias */ +{ yymsp[-1].minor.yy113 = yymsp[0].minor.yy113; } break; - case 386: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - case 387: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==387); -{ yymsp[-2].minor.yy636 = yymsp[-1].minor.yy636; } + case 382: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ + case 383: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==383); +{ yymsp[-2].minor.yy686 = yymsp[-1].minor.yy686; } break; - case 388: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ -{ yylhsminor.yy636 = createJoinTableNode(pCxt, yymsp[-4].minor.yy342, yymsp[-5].minor.yy636, yymsp[-2].minor.yy636, yymsp[0].minor.yy636); } - yymsp[-5].minor.yy636 = yylhsminor.yy636; + case 384: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ +{ yylhsminor.yy686 = createJoinTableNode(pCxt, yymsp[-4].minor.yy120, yymsp[-5].minor.yy686, yymsp[-2].minor.yy686, yymsp[0].minor.yy686); } + yymsp[-5].minor.yy686 = yylhsminor.yy686; break; - case 389: /* join_type ::= */ -{ yymsp[1].minor.yy342 = JOIN_TYPE_INNER; } + case 385: /* join_type ::= */ +{ yymsp[1].minor.yy120 = JOIN_TYPE_INNER; } break; - case 390: /* join_type ::= INNER */ -{ yymsp[0].minor.yy342 = JOIN_TYPE_INNER; } + case 386: /* join_type ::= INNER */ +{ yymsp[0].minor.yy120 = JOIN_TYPE_INNER; } break; - case 391: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ + case 387: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause where_clause_opt partition_by_clause_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ { - yymsp[-8].minor.yy636 = createSelectStmt(pCxt, yymsp[-7].minor.yy603, yymsp[-6].minor.yy236, yymsp[-5].minor.yy636); - yymsp[-8].minor.yy636 = addWhereClause(pCxt, yymsp[-8].minor.yy636, yymsp[-4].minor.yy636); - yymsp[-8].minor.yy636 = addPartitionByClause(pCxt, yymsp[-8].minor.yy636, yymsp[-3].minor.yy236); - yymsp[-8].minor.yy636 = addWindowClauseClause(pCxt, yymsp[-8].minor.yy636, yymsp[-2].minor.yy636); - yymsp[-8].minor.yy636 = addGroupByClause(pCxt, yymsp[-8].minor.yy636, yymsp[-1].minor.yy236); - yymsp[-8].minor.yy636 = addHavingClause(pCxt, yymsp[-8].minor.yy636, yymsp[0].minor.yy636); + yymsp[-8].minor.yy686 = createSelectStmt(pCxt, yymsp[-7].minor.yy131, yymsp[-6].minor.yy670, yymsp[-5].minor.yy686); + yymsp[-8].minor.yy686 = addWhereClause(pCxt, yymsp[-8].minor.yy686, yymsp[-4].minor.yy686); + yymsp[-8].minor.yy686 = addPartitionByClause(pCxt, yymsp[-8].minor.yy686, yymsp[-3].minor.yy670); + yymsp[-8].minor.yy686 = addWindowClauseClause(pCxt, yymsp[-8].minor.yy686, yymsp[-2].minor.yy686); + yymsp[-8].minor.yy686 = addGroupByClause(pCxt, yymsp[-8].minor.yy686, yymsp[-1].minor.yy670); + yymsp[-8].minor.yy686 = addHavingClause(pCxt, yymsp[-8].minor.yy686, yymsp[0].minor.yy686); } break; - case 394: /* set_quantifier_opt ::= ALL */ -{ yymsp[0].minor.yy603 = false; } + case 390: /* set_quantifier_opt ::= ALL */ +{ yymsp[0].minor.yy131 = false; } break; - case 395: /* select_list ::= NK_STAR */ -{ yymsp[0].minor.yy236 = NULL; } + case 391: /* select_list ::= NK_STAR */ +{ yymsp[0].minor.yy670 = NULL; } break; - case 400: /* select_item ::= common_expression column_alias */ -{ yylhsminor.yy636 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy636), &yymsp[0].minor.yy53); } - yymsp[-1].minor.yy636 = yylhsminor.yy636; + case 396: /* select_item ::= common_expression column_alias */ +{ yylhsminor.yy686 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy686), &yymsp[0].minor.yy113); } + yymsp[-1].minor.yy686 = yylhsminor.yy686; break; - case 401: /* select_item ::= common_expression AS column_alias */ -{ yylhsminor.yy636 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), &yymsp[0].minor.yy53); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 397: /* select_item ::= common_expression AS column_alias */ +{ yylhsminor.yy686 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), &yymsp[0].minor.yy113); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 406: /* partition_by_clause_opt ::= PARTITION BY expression_list */ - case 423: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==423); - case 435: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==435); -{ yymsp[-2].minor.yy236 = yymsp[0].minor.yy236; } + case 402: /* partition_by_clause_opt ::= PARTITION BY expression_list */ + case 419: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==419); + case 431: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==431); +{ yymsp[-2].minor.yy670 = yymsp[0].minor.yy670; } break; - case 408: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ -{ yymsp[-5].minor.yy636 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), releaseRawExprNode(pCxt, yymsp[-1].minor.yy636)); } + case 404: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ +{ yymsp[-5].minor.yy686 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), releaseRawExprNode(pCxt, yymsp[-1].minor.yy686)); } break; - case 409: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ -{ yymsp[-3].minor.yy636 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy636)); } + case 405: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */ +{ yymsp[-3].minor.yy686 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy686)); } break; - case 410: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ -{ yymsp[-5].minor.yy636 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), NULL, yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } + case 406: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ +{ yymsp[-5].minor.yy686 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), NULL, yymsp[-1].minor.yy686, yymsp[0].minor.yy686); } break; - case 411: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ -{ yymsp[-7].minor.yy636 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy636), releaseRawExprNode(pCxt, yymsp[-3].minor.yy636), yymsp[-1].minor.yy636, yymsp[0].minor.yy636); } + case 407: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ +{ yymsp[-7].minor.yy686 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy686), releaseRawExprNode(pCxt, yymsp[-3].minor.yy686), yymsp[-1].minor.yy686, yymsp[0].minor.yy686); } break; - case 413: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ -{ yymsp[-3].minor.yy636 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy636); } + case 409: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ +{ yymsp[-3].minor.yy686 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy686); } break; - case 415: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ -{ yymsp[-3].minor.yy636 = createFillNode(pCxt, yymsp[-1].minor.yy18, NULL); } + case 411: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ +{ yymsp[-3].minor.yy686 = createFillNode(pCxt, yymsp[-1].minor.yy522, NULL); } break; - case 416: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ -{ yymsp[-5].minor.yy636 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy236)); } + case 412: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ +{ yymsp[-5].minor.yy686 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy670)); } break; - case 417: /* fill_mode ::= NONE */ -{ yymsp[0].minor.yy18 = FILL_MODE_NONE; } + case 413: /* fill_mode ::= NONE */ +{ yymsp[0].minor.yy522 = FILL_MODE_NONE; } break; - case 418: /* fill_mode ::= PREV */ -{ yymsp[0].minor.yy18 = FILL_MODE_PREV; } + case 414: /* fill_mode ::= PREV */ +{ yymsp[0].minor.yy522 = FILL_MODE_PREV; } break; - case 419: /* fill_mode ::= NULL */ -{ yymsp[0].minor.yy18 = FILL_MODE_NULL; } + case 415: /* fill_mode ::= NULL */ +{ yymsp[0].minor.yy522 = FILL_MODE_NULL; } break; - case 420: /* fill_mode ::= LINEAR */ -{ yymsp[0].minor.yy18 = FILL_MODE_LINEAR; } + case 416: /* fill_mode ::= LINEAR */ +{ yymsp[0].minor.yy522 = FILL_MODE_LINEAR; } break; - case 421: /* fill_mode ::= NEXT */ -{ yymsp[0].minor.yy18 = FILL_MODE_NEXT; } + case 417: /* fill_mode ::= NEXT */ +{ yymsp[0].minor.yy522 = FILL_MODE_NEXT; } break; - case 424: /* group_by_list ::= expression */ -{ yylhsminor.yy236 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[0].minor.yy236 = yylhsminor.yy236; + case 420: /* group_by_list ::= expression */ +{ yylhsminor.yy670 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } + yymsp[0].minor.yy670 = yylhsminor.yy670; break; - case 425: /* group_by_list ::= group_by_list NK_COMMA expression */ -{ yylhsminor.yy236 = addNodeToList(pCxt, yymsp[-2].minor.yy236, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy636))); } - yymsp[-2].minor.yy236 = yylhsminor.yy236; + case 421: /* group_by_list ::= group_by_list NK_COMMA expression */ +{ yylhsminor.yy670 = addNodeToList(pCxt, yymsp[-2].minor.yy670, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy686))); } + yymsp[-2].minor.yy670 = yylhsminor.yy670; break; - case 428: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ + case 424: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */ { - yylhsminor.yy636 = addOrderByClause(pCxt, yymsp[-3].minor.yy636, yymsp[-2].minor.yy236); - yylhsminor.yy636 = addSlimitClause(pCxt, yylhsminor.yy636, yymsp[-1].minor.yy636); - yylhsminor.yy636 = addLimitClause(pCxt, yylhsminor.yy636, yymsp[0].minor.yy636); + yylhsminor.yy686 = addOrderByClause(pCxt, yymsp[-3].minor.yy686, yymsp[-2].minor.yy670); + yylhsminor.yy686 = addSlimitClause(pCxt, yylhsminor.yy686, yymsp[-1].minor.yy686); + yylhsminor.yy686 = addLimitClause(pCxt, yylhsminor.yy686, yymsp[0].minor.yy686); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; - case 430: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */ -{ yylhsminor.yy636 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy636, yymsp[0].minor.yy636); } - yymsp[-3].minor.yy636 = yylhsminor.yy636; + case 426: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */ +{ yylhsminor.yy686 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy686, yymsp[0].minor.yy686); } + yymsp[-3].minor.yy686 = yylhsminor.yy686; break; - case 431: /* query_expression_body ::= query_expression_body UNION query_expression_body */ -{ yylhsminor.yy636 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy636, yymsp[0].minor.yy636); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 427: /* query_expression_body ::= query_expression_body UNION query_expression_body */ +{ yylhsminor.yy686 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy686, yymsp[0].minor.yy686); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 433: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ -{ yymsp[-5].minor.yy636 = yymsp[-4].minor.yy636; } - yy_destructor(yypParser,353,&yymsp[-3].minor); - yy_destructor(yypParser,354,&yymsp[-2].minor); - yy_destructor(yypParser,355,&yymsp[-1].minor); + case 429: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */ +{ yymsp[-5].minor.yy686 = yymsp[-4].minor.yy686; } + yy_destructor(yypParser,349,&yymsp[-3].minor); + yy_destructor(yypParser,350,&yymsp[-2].minor); + yy_destructor(yypParser,351,&yymsp[-1].minor); break; - case 437: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ - case 441: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==441); -{ yymsp[-1].minor.yy636 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); } + case 433: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ + case 437: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==437); +{ yymsp[-1].minor.yy686 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); } break; - case 438: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - case 442: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==442); -{ yymsp[-3].minor.yy636 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } + case 434: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ + case 438: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==438); +{ yymsp[-3].minor.yy686 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 439: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - case 443: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==443); -{ yymsp[-3].minor.yy636 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); } + case 435: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + case 439: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==439); +{ yymsp[-3].minor.yy686 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); } break; - case 444: /* subquery ::= NK_LP query_expression NK_RP */ -{ yylhsminor.yy636 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy636); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 440: /* subquery ::= NK_LP query_expression NK_RP */ +{ yylhsminor.yy686 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy686); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 448: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */ -{ yylhsminor.yy636 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy636), yymsp[-1].minor.yy430, yymsp[0].minor.yy185); } - yymsp[-2].minor.yy636 = yylhsminor.yy636; + case 444: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */ +{ yylhsminor.yy686 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy686), yymsp[-1].minor.yy428, yymsp[0].minor.yy109); } + yymsp[-2].minor.yy686 = yylhsminor.yy686; break; - case 449: /* ordering_specification_opt ::= */ -{ yymsp[1].minor.yy430 = ORDER_ASC; } + case 445: /* ordering_specification_opt ::= */ +{ yymsp[1].minor.yy428 = ORDER_ASC; } break; - case 450: /* ordering_specification_opt ::= ASC */ -{ yymsp[0].minor.yy430 = ORDER_ASC; } + case 446: /* ordering_specification_opt ::= ASC */ +{ yymsp[0].minor.yy428 = ORDER_ASC; } break; - case 451: /* ordering_specification_opt ::= DESC */ -{ yymsp[0].minor.yy430 = ORDER_DESC; } + case 447: /* ordering_specification_opt ::= DESC */ +{ yymsp[0].minor.yy428 = ORDER_DESC; } break; - case 452: /* null_ordering_opt ::= */ -{ yymsp[1].minor.yy185 = NULL_ORDER_DEFAULT; } + case 448: /* null_ordering_opt ::= */ +{ yymsp[1].minor.yy109 = NULL_ORDER_DEFAULT; } break; - case 453: /* null_ordering_opt ::= NULLS FIRST */ -{ yymsp[-1].minor.yy185 = NULL_ORDER_FIRST; } + case 449: /* null_ordering_opt ::= NULLS FIRST */ +{ yymsp[-1].minor.yy109 = NULL_ORDER_FIRST; } break; - case 454: /* null_ordering_opt ::= NULLS LAST */ -{ yymsp[-1].minor.yy185 = NULL_ORDER_LAST; } + case 450: /* null_ordering_opt ::= NULLS LAST */ +{ yymsp[-1].minor.yy109 = NULL_ORDER_LAST; } break; default: break; diff --git a/source/libs/parser/test/CMakeLists.txt b/source/libs/parser/test/CMakeLists.txt index c252653e9ee743ca8c9e899f6851e1893fb766be..0e8adb978dd0e9fca5a67e9999ce7c5faa877cc0 100644 --- a/source/libs/parser/test/CMakeLists.txt +++ b/source/libs/parser/test/CMakeLists.txt @@ -26,9 +26,7 @@ if(${BUILD_WINGETOPT}) target_link_libraries(parserTest PUBLIC wingetopt) endif() -if(NOT TD_WINDOWS) - add_test( - NAME parserTest - COMMAND parserTest - ) -endif(NOT TD_WINDOWS) +add_test( + NAME parserTest + COMMAND parserTest +) diff --git a/source/libs/parser/test/mockCatalogService.cpp b/source/libs/parser/test/mockCatalogService.cpp index 1b03b9683045597a5c57d37d8572b603eae47be2..566c4d8b04c0127e04ef9ce0fb0b5eabae7d25da 100644 --- a/source/libs/parser/test/mockCatalogService.cpp +++ b/source/libs/parser/test/mockCatalogService.cpp @@ -187,9 +187,8 @@ class MockCatalogServiceImpl { // number of backward fills #define NOB(n) ((n) % 2 ? (n) / 2 + 1 : (n) / 2) // center aligned -#define CA(n, s) \ - std::setw(NOF((n) - (s).length())) << "" << (s) << std::setw(NOB((n) - (s).length())) << "" \ - << "|" +#define CA(n, s) std::setw(NOF((n) - int((s).length()))) << "" << (s) \ + << std::setw(NOB((n) - int((s).length()))) << "" << "|" // string field length #define SFL 20 // string field header @@ -243,6 +242,8 @@ class MockCatalogServiceImpl { info->outputType = outputType; info->outputLen = outputLen; info->bufSize = bufSize; + info->pCode = nullptr; + info->pComment = nullptr; udf_.insert(std::make_pair(func, info)); } diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index e55f36376cbce26f1954211fe7308070a0a192bd..65d5194936811a856ef7e36de2f249e0e8bda63b 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -298,14 +298,12 @@ TEST_F(ParserInitialCTest, createStable) { auto setCreateStbReqFunc = [&](const char* pTbname, int8_t igExists = 0, float xFilesFactor = TSDB_DEFAULT_ROLLUP_FILE_FACTOR, - int32_t delay = TSDB_DEFAULT_ROLLUP_DELAY, int32_t ttl = TSDB_DEFAULT_TABLE_TTL, - const char* pComment = nullptr) { + int32_t ttl = TSDB_DEFAULT_TABLE_TTL, const char* pComment = nullptr) { memset(&expect, 0, sizeof(SMCreateStbReq)); int32_t len = snprintf(expect.name, sizeof(expect.name), "0.test.%s", pTbname); expect.name[len] = '\0'; expect.igExists = igExists; expect.xFilesFactor = xFilesFactor; - expect.delay = delay; expect.ttl = ttl; if (nullptr != pComment) { expect.comment = strdup(pComment); @@ -393,7 +391,7 @@ TEST_F(ParserInitialCTest, createStable) { addFieldToCreateStbReqFunc(false, "id", TSDB_DATA_TYPE_INT); run("CREATE STABLE t1(ts TIMESTAMP, c1 INT) TAGS(id INT)"); - setCreateStbReqFunc("t1", 1, 0.1, 2, 100, "test create table"); + setCreateStbReqFunc("t1", 1, 0.1, 100, "test create table"); addFieldToCreateStbReqFunc(true, "ts", TSDB_DATA_TYPE_TIMESTAMP, 0, 0); addFieldToCreateStbReqFunc(true, "c1", TSDB_DATA_TYPE_INT); addFieldToCreateStbReqFunc(true, "c2", TSDB_DATA_TYPE_UINT); @@ -431,7 +429,7 @@ TEST_F(ParserInitialCTest, createStable) { "TAGS (a1 TIMESTAMP, a2 INT, a3 INT UNSIGNED, a4 BIGINT, a5 BIGINT UNSIGNED, a6 FLOAT, a7 DOUBLE, " "a8 BINARY(20), a9 SMALLINT, a10 SMALLINT UNSIGNED COMMENT 'test column comment', a11 TINYINT, " "a12 TINYINT UNSIGNED, a13 BOOL, a14 NCHAR(30), a15 VARCHAR(50)) " - "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1 DELAY 2"); + "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1"); } TEST_F(ParserInitialCTest, createStream) { @@ -464,7 +462,7 @@ TEST_F(ParserInitialCTest, createTable) { "TAGS (a1 TIMESTAMP, a2 INT, a3 INT UNSIGNED, a4 BIGINT, a5 BIGINT UNSIGNED, a6 FLOAT, a7 DOUBLE, a8 BINARY(20), " "a9 SMALLINT, a10 SMALLINT UNSIGNED COMMENT 'test column comment', a11 TINYINT, a12 TINYINT UNSIGNED, a13 BOOL, " "a14 NCHAR(30), a15 VARCHAR(50)) " - "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1 DELAY 2"); + "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1"); run("CREATE TABLE IF NOT EXISTS t1 USING st1 TAGS(1, 'wxy')"); @@ -477,13 +475,62 @@ TEST_F(ParserInitialCTest, createTable) { TEST_F(ParserInitialCTest, createTopic) { useDb("root", "test"); + SCMCreateTopicReq expect = {0}; + + auto setCreateTopicReqFunc = [&](const char* pTopicName, int8_t igExists, const char* pSql, const char* pAst, + const char* pDbName = nullptr, const char* pTbname = nullptr) { + memset(&expect, 0, sizeof(SMCreateStbReq)); + snprintf(expect.name, sizeof(expect.name), "0.%s", pTopicName); + expect.igExists = igExists; + expect.sql = (char*)pSql; + if (nullptr != pTbname) { + expect.subType = TOPIC_SUB_TYPE__TABLE; + snprintf(expect.subStbName, sizeof(expect.subStbName), "0.%s.%s", pDbName, pTbname); + } else if (nullptr != pAst) { + expect.subType = TOPIC_SUB_TYPE__COLUMN; + expect.ast = (char*)pAst; + } else { + expect.subType = TOPIC_SUB_TYPE__DB; + snprintf(expect.subDbName, sizeof(expect.subDbName), "0.%s", pDbName); + } + }; + + setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) { + ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_TOPIC_STMT); + SCMCreateTopicReq req = {0}; + ASSERT_TRUE(TSDB_CODE_SUCCESS == + tDeserializeSCMCreateTopicReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req)); + + ASSERT_EQ(std::string(req.name), std::string(expect.name)); + ASSERT_EQ(req.igExists, expect.igExists); + ASSERT_EQ(req.subType, expect.subType); + ASSERT_EQ(std::string(req.sql), std::string(expect.sql)); + switch (expect.subType) { + case TOPIC_SUB_TYPE__DB: + ASSERT_EQ(std::string(req.subDbName), std::string(expect.subDbName)); + break; + case TOPIC_SUB_TYPE__TABLE: + ASSERT_EQ(std::string(req.subStbName), std::string(expect.subStbName)); + break; + case TOPIC_SUB_TYPE__COLUMN: + ASSERT_NE(req.ast, nullptr); + break; + default: + ASSERT_TRUE(false); + } + }); + + setCreateTopicReqFunc("tp1", 0, "create topic tp1 as select * from t1", "ast"); run("CREATE TOPIC tp1 AS SELECT * FROM t1"); - run("CREATE TOPIC IF NOT EXISTS tp1 AS SELECT * FROM t1"); + setCreateTopicReqFunc("tp1", 1, "create topic if not exists tp1 as select ts, ceil(c1) from t1", "ast"); + run("CREATE TOPIC IF NOT EXISTS tp1 AS SELECT ts, CEIL(c1) FROM t1"); - run("CREATE TOPIC tp1 AS test"); + setCreateTopicReqFunc("tp1", 0, "create topic tp1 as database test", nullptr, "test"); + run("CREATE TOPIC tp1 AS DATABASE test"); - run("CREATE TOPIC IF NOT EXISTS tp1 AS test"); + setCreateTopicReqFunc("tp1", 1, "create topic if not exists tp1 as stable st1", nullptr, "test", "st1"); + run("CREATE TOPIC IF NOT EXISTS tp1 AS STABLE st1"); } TEST_F(ParserInitialCTest, createUser) { diff --git a/source/libs/parser/test/parInitialDTest.cpp b/source/libs/parser/test/parInitialDTest.cpp index 57d349e7eeecd33fd9855f5a0d8df22548c5ceee..5ad427d964ad1dc47a4fed64b51f89257ae53da6 100644 --- a/source/libs/parser/test/parInitialDTest.cpp +++ b/source/libs/parser/test/parInitialDTest.cpp @@ -32,7 +32,7 @@ TEST_F(ParserInitialDTest, dropBnode) { run("DROP BNODE ON DNODE 1"); } -// DROP CGROUP [ IF EXISTS ] cgroup_name ON topic_name +// DROP CONSUMER GROUP [ IF EXISTS ] cgroup_name ON topic_name TEST_F(ParserInitialDTest, dropCGroup) { useDb("root", "test"); @@ -56,10 +56,10 @@ TEST_F(ParserInitialDTest, dropCGroup) { }); setDropCgroupReqFunc("tp1", "cg1"); - run("DROP CGROUP cg1 ON tp1"); + run("DROP CONSUMER GROUP cg1 ON tp1"); setDropCgroupReqFunc("tp1", "cg1", 1); - run("DROP CGROUP IF EXISTS cg1 ON tp1"); + run("DROP CONSUMER GROUP IF EXISTS cg1 ON tp1"); } // todo drop database diff --git a/source/libs/parser/test/parInsertTest.cpp b/source/libs/parser/test/parInsertTest.cpp index 7fafec88824111ef8b170ba25f3b092fd7ba1f1a..4d313fca766e8ab8f8d6ba404f7faf2fe833e9e6 100644 --- a/source/libs/parser/test/parInsertTest.cpp +++ b/source/libs/parser/test/parInsertTest.cpp @@ -15,6 +15,7 @@ #include +#include "mockCatalogService.h" #include "os.h" #include "parInt.h" @@ -57,6 +58,38 @@ class InsertTest : public Test { return code_; } + int32_t runAsync() { + code_ = parseInsertSyntax(&cxt_, &res_); + if (code_ != TSDB_CODE_SUCCESS) { + cout << "parseInsertSyntax code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; + return code_; + } + + SCatalogReq catalogReq = {0}; + code_ = buildCatalogReq(res_->pMetaCache, &catalogReq); + if (code_ != TSDB_CODE_SUCCESS) { + cout << "buildCatalogReq code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; + return code_; + } + + SMetaData metaData = {0}; + g_mockCatalogService->catalogGetAllMeta(&catalogReq, &metaData); + + code_ = putMetaDataToCache(&catalogReq, &metaData, res_->pMetaCache); + if (code_ != TSDB_CODE_SUCCESS) { + cout << "putMetaDataToCache code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; + return code_; + } + + code_ = parseInsertSql(&cxt_, &res_); + if (code_ != TSDB_CODE_SUCCESS) { + cout << "parseInsertSql code:" << toString(code_) << ", msg:" << errMagBuf_ << endl; + return code_; + } + + return code_; + } + void dumpReslut() { SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_); size_t num = taosArrayGetSize(pStmt->pDataBlocks); @@ -125,7 +158,7 @@ class InsertTest : public Test { SQuery* res_; }; -// INSERT INTO tb_name VALUES (field1_value, ...) +// INSERT INTO tb_name [(field1_name, ...)] VALUES (field1_value, ...) TEST_F(InsertTest, singleTableSingleRowTest) { setDatabase("root", "test"); @@ -133,6 +166,17 @@ TEST_F(InsertTest, singleTableSingleRowTest) { ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(1, 1); + + bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)"); + ASSERT_EQ(run(), TSDB_CODE_SUCCESS); + + bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); + dumpReslut(); + checkReslut(1, 1); + + bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } // INSERT INTO tb_name VALUES (field1_value, ...)(field1_value, ...) @@ -140,11 +184,16 @@ TEST_F(InsertTest, singleTableMultiRowTest) { setDatabase("root", "test"); bind( - "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)(now+2s, 3, 'guangzhou', 9, " - "10, 11)"); + "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)" + "(now+2s, 3, 'guangzhou', 9, 10, 11)"); ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(1, 3); + + bind( + "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)" + "(now+2s, 3, 'guangzhou', 9, 10, 11)"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } // INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...) @@ -155,6 +204,9 @@ TEST_F(InsertTest, multiTableSingleRowTest) { ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(2, 1); + + bind("insert into st1s1 values (now, 1, \"beijing\") st1s2 values (now, 10, \"131028\")"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } // INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...) @@ -167,6 +219,11 @@ TEST_F(InsertTest, multiTableMultiRowTest) { ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(2, 3, 2); + + bind( + "insert into st1s1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")" + " st1s2 values (now, 10, \"131028\")(now+1s, 20, \"132028\")"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } // INSERT INTO @@ -181,6 +238,21 @@ TEST_F(InsertTest, autoCreateTableTest) { ASSERT_EQ(run(), TSDB_CODE_SUCCESS); dumpReslut(); checkReslut(1, 3); + + bind( + "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")" + "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"); + ASSERT_EQ(run(), TSDB_CODE_SUCCESS); + + bind( + "insert into st1s1 using st1 tags(1, 'wxy') values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, " + "\"guangzhou\")"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); + + bind( + "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")" + "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"); + ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS); } TEST_F(InsertTest, toleranceTest) { @@ -190,4 +262,9 @@ TEST_F(InsertTest, toleranceTest) { ASSERT_NE(run(), TSDB_CODE_SUCCESS); bind("insert into t"); ASSERT_NE(run(), TSDB_CODE_SUCCESS); + + bind("insert into"); + ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS); + bind("insert into t"); + ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS); } diff --git a/source/libs/parser/test/parSelectTest.cpp b/source/libs/parser/test/parSelectTest.cpp index 2d4fe41d4fed38bb6f97fcb37c6972aa8c7d65fc..a5192595f0be83afa459429748dab3d8e9b65c4e 100644 --- a/source/libs/parser/test/parSelectTest.cpp +++ b/source/libs/parser/test/parSelectTest.cpp @@ -44,6 +44,8 @@ TEST_F(ParserSelectTest, constant) { "timestamp '2022-02-09 17:30:20', true, false, 15s FROM t1"); run("SELECT 123 + 45 FROM t1 WHERE 2 - 1"); + + run("SELECT * FROM t1 WHERE -2"); } TEST_F(ParserSelectTest, expression) { @@ -76,6 +78,12 @@ TEST_F(ParserSelectTest, pseudoColumnSemanticCheck) { run("SELECT TBNAME FROM (SELECT * FROM st1s1)", TSDB_CODE_PAR_INVALID_TBNAME, PARSER_STAGE_TRANSLATE); } +TEST_F(ParserSelectTest, aggFunc) { + useDb("root", "test"); + + run("SELECT LEASTSQUARES(c1, -1, 1) FROM t1"); +} + TEST_F(ParserSelectTest, multiResFunc) { useDb("root", "test"); @@ -244,6 +252,8 @@ TEST_F(ParserSelectTest, semanticError) { // TSDB_CODE_PAR_AMBIGUOUS_COLUMN run("SELECT c2 FROM t1 tt1, t1 tt2 WHERE tt1.c1 = tt2.c1", TSDB_CODE_PAR_AMBIGUOUS_COLUMN, PARSER_STAGE_TRANSLATE); + run("SELECT c2 FROM (SELECT c1 c2, c2 FROM t1)", TSDB_CODE_PAR_AMBIGUOUS_COLUMN, PARSER_STAGE_TRANSLATE); + // TSDB_CODE_PAR_WRONG_VALUE_TYPE run("SELECT timestamp '2010a' FROM t1", TSDB_CODE_PAR_WRONG_VALUE_TYPE, PARSER_STAGE_TRANSLATE); diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 467b26b7c4af61a8f0cca3d706f34c0133995fe3..eae8799b04bdc03fcb63190a33bfa83838f65ca3 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -124,6 +124,7 @@ static int32_t createChildLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelec SLogicNode* pNode = NULL; int32_t code = func(pCxt, pSelect, &pNode); if (TSDB_CODE_SUCCESS == code && NULL != pNode) { + pNode->precision = pSelect->precision; code = pushLogicNode(pCxt, pRoot, pNode); } if (TSDB_CODE_SUCCESS != code) { @@ -400,6 +401,7 @@ static int32_t createLogicNodeByTable(SLogicPlanContext* pCxt, SSelectStmt* pSel nodesDestroyNode(pNode); return TSDB_CODE_OUT_OF_MEMORY; } + pNode->precision = pSelect->precision; *pLogicNode = pNode; } return code; @@ -485,6 +487,10 @@ static int32_t createWindowLogicNodeFinalize(SLogicPlanContext* pCxt, SSelectStm pWindow->watermark = pCxt->pPlanCxt->watermark; } + if (pCxt->pPlanCxt->rSmaQuery) { + pWindow->filesFactor = pCxt->pPlanCxt->filesFactor; + } + if (TSDB_CODE_SUCCESS == code) { code = rewriteExprForSelect(pWindow->pFuncs, pSelect, SQL_CLAUSE_WINDOW); } diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index adc07fcd0d6b6a0c6f98fdf5032151dab3ae71f3..5f88fc40e54c5e000a6e4506b30a2063acfbc8f1 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -99,7 +99,8 @@ static bool osdMayBeOptimized(SLogicNode* pNode) { return false; } // todo: release after function splitting - if (TSDB_SUPER_TABLE == ((SScanLogicNode*)pNode)->pMeta->tableType) { + if (TSDB_SUPER_TABLE == ((SScanLogicNode*)pNode)->pMeta->tableType && + SCAN_TYPE_STREAM != ((SScanLogicNode*)pNode)->scanType) { return false; } if (NULL == pNode->pParent || (QUERY_NODE_LOGIC_PLAN_WINDOW != nodeType(pNode->pParent) && @@ -226,6 +227,7 @@ static void setScanWindowInfo(SScanLogicNode* pScan) { pScan->triggerType = ((SWindowLogicNode*)pScan->node.pParent)->triggerType; pScan->watermark = ((SWindowLogicNode*)pScan->node.pParent)->watermark; pScan->tsColId = ((SColumnNode*)((SWindowLogicNode*)pScan->node.pParent)->pTspk)->colId; + pScan->filesFactor = ((SWindowLogicNode*)pScan->node.pParent)->filesFactor; } } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index a45eabefb9f1f1f7fe9c97a3f8c7cf16385d2fc3..62fde889da7e71bdd65b9799369cacf0de88d132 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -506,6 +506,7 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp pTableScan->triggerType = pScanLogicNode->triggerType; pTableScan->watermark = pScanLogicNode->watermark; pTableScan->tsColId = pScanLogicNode->tsColId; + pTableScan->filesFactor = pScanLogicNode->filesFactor; return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode); } @@ -917,6 +918,7 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList* pWindow->triggerType = pWindowLogicNode->triggerType; pWindow->watermark = pWindowLogicNode->watermark; + pWindow->filesFactor = pWindowLogicNode->filesFactor; if (TSDB_CODE_SUCCESS == code) { *pPhyNode = (SPhysiNode*)pWindow; diff --git a/source/libs/planner/test/planSTableTest.cpp b/source/libs/planner/test/planSTableTest.cpp index ed75b75e514aede02f41bf29ea044ccf833aef83..d1608cbad1155baf1bda19cf7c06a5121b0d581a 100644 --- a/source/libs/planner/test/planSTableTest.cpp +++ b/source/libs/planner/test/planSTableTest.cpp @@ -27,6 +27,14 @@ TEST_F(PlanSuperTableTest, pseudoCol) { run("SELECT TBNAME, tag1, tag2 FROM st1"); } +TEST_F(PlanSuperTableTest, pseudoColOnChildTable) { + useDb("root", "test"); + + run("SELECT TBNAME FROM st1s1"); + + run("SELECT TBNAME, tag1, tag2 FROM st1s1"); +} + TEST_F(PlanSuperTableTest, orderBy) { useDb("root", "test"); diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h index f6d35ac4c1d57a2448b5e558b138f5b2e73597e3..b0a102069dc7d00e3002d14c76ec9c65f0854d92 100644 --- a/source/libs/qworker/inc/qwInt.h +++ b/source/libs/qworker/inc/qwInt.h @@ -145,6 +145,15 @@ typedef struct SQWSchStatus { SHashObj *tasksHash; // key:queryId+taskId, value: SQWTaskStatus } SQWSchStatus; +typedef struct SQWWaitTimeStat { + uint64_t num; + uint64_t total; +} SQWWaitTimeStat; + +typedef struct SQWStat { + SQWWaitTimeStat msgWait[2]; +} SQWStat; + // Qnode/Vnode level task management typedef struct SQWorker { int64_t refId; @@ -155,9 +164,10 @@ typedef struct SQWorker { tmr_h hbTimer; SRWLatch schLock; // SRWLatch ctxLock; - SHashObj *schHash; // key: schedulerId, value: SQWSchStatus - SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx - SMsgCb msgCb; + SHashObj *schHash; // key: schedulerId, value: SQWSchStatus + SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx + SMsgCb msgCb; + SQWStat stat; } SQWorker; typedef struct SQWorkerMgmt { @@ -322,6 +332,8 @@ int32_t qwDropTask(QW_FPARAMS_DEF); void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx); int32_t qwOpenRef(void); void qwSetHbParam(int64_t refId, SQWHbParam **pParam); +int32_t qwUpdateWaitTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type); +int64_t qwGetWaitTimeInQueue(SQWorker *mgmt, EQueueType type); void qwDbgDumpMgmtInfo(SQWorker *mgmt); int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore); diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c index 46c2084494b2c64f6ffc81ca11205fba6adf890c..b9dc18cd2fd22ff196a300451d1d39b5bcd2353d 100644 --- a/source/libs/qworker/src/qwMsg.c +++ b/source/libs/qworker/src/qwMsg.c @@ -248,7 +248,7 @@ int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo * return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } @@ -257,6 +257,8 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SSubQueryMsg *msg = pMsg->pCont; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, QUERY_QUEUE); + if (NULL == msg || pMsg->contLen <= sizeof(*msg)) { QW_ELOG("invalid query msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -286,7 +288,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { int32_t code = 0; int8_t status = 0; bool queryDone = false; @@ -295,6 +297,8 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SQWTaskCtx * handles = NULL; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, QUERY_QUEUE); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { QW_ELOG("invalid cquery msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -316,7 +320,7 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -324,6 +328,8 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SResFetchReq *msg = pMsg->pCont; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { QW_ELOG("invalid fetch msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -349,13 +355,16 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { + SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + qProcessFetchRsp(NULL, pMsg, NULL); pMsg->pCont = NULL; return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -363,6 +372,9 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SQWorker * mgmt = (SQWorker *)qWorkerMgmt; int32_t code = 0; STaskCancelReq *msg = pMsg->pCont; + + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { qError("invalid task cancel msg"); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -390,7 +402,7 @@ _return: return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -399,6 +411,8 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { STaskDropReq *msg = pMsg->pCont; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { QW_ELOG("invalid task drop msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -429,7 +443,7 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -438,6 +452,8 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SSchedulerHbReq req = {0}; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + if (NULL == pMsg->pCont) { QW_ELOG("invalid hb msg, msg:%p, msgLen:%d", pMsg->pCont, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index a96a3343e701d222e1cc6e0b27fa7ede7e581f02..a4bc22fc88121de7d51e3e67655468046e95c3bf 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -499,4 +499,43 @@ int32_t qwOpenRef(void) { return TSDB_CODE_SUCCESS; } +int32_t qwUpdateWaitTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type) { + if (ts <= 0) { + return TSDB_CODE_SUCCESS; + } + + int64_t duration = taosGetTimestampUs() - ts; + switch (type) { + case QUERY_QUEUE: + ++mgmt->stat.msgWait[0].num; + mgmt->stat.msgWait[0].total += duration; + break; + case FETCH_QUEUE: + ++mgmt->stat.msgWait[1].num; + mgmt->stat.msgWait[1].total += duration; + break; + default: + qError("unsupported queue type %d", type); + return TSDB_CODE_APP_ERROR; + } + + return TSDB_CODE_SUCCESS; +} + +int64_t qwGetWaitTimeInQueue(SQWorker *mgmt, EQueueType type) { + SQWWaitTimeStat *pStat = NULL; + switch (type) { + case QUERY_QUEUE: + pStat = &mgmt->stat.msgWait[0]; + return pStat->num ? (pStat->total/pStat->num) : 0; + case FETCH_QUEUE: + pStat = &mgmt->stat.msgWait[1]; + return pStat->num ? (pStat->total/pStat->num) : 0; + default: + qError("unsupported queue type %d", type); + return -1; + } +} + + diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 3ee152606e78bd309e47c9dee1e1dd91643c0eb1..655dcbc853adba2dcfa2ebedac89bb0b35bb343b 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -79,7 +79,11 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryEnd) { if (taskHandle) { code = qExecTask(taskHandle, &pRes, &useconds); if (code) { - QW_TASK_ELOG("qExecTask failed, code:%x - %s", code, tstrerror(code)); + if (code != TSDB_CODE_OPS_NOT_SUPPORT) { + QW_TASK_ELOG("qExecTask failed, code:%x - %s", code, tstrerror(code)); + } else { + QW_TASK_DLOG("qExecTask failed, code:%x - %s", code, tstrerror(code)); + } QW_ERR_RET(code); } } @@ -950,4 +954,9 @@ void qWorkerDestroy(void **qWorkerMgmt) { } } +int64_t qWorkerGetWaitTimeInQueue(void *qWorkerMgmt, EQueueType type) { + return qwGetWaitTimeInQueue((SQWorker *)qWorkerMgmt, type); +} + + diff --git a/source/libs/qworker/test/qworkerTests.cpp b/source/libs/qworker/test/qworkerTests.cpp index 42596b1cd22f73dd822a1e9c85f04b6a60ecfb3f..1b959fbe633e0c50ddc7b80af321ee0420a9616d 100644 --- a/source/libs/qworker/test/qworkerTests.cpp +++ b/source/libs/qworker/test/qworkerTests.cpp @@ -635,7 +635,7 @@ void *queryThread(void *param) { while (!qwtTestStop) { qwtBuildQueryReqMsg(&queryRpc); - qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); if (qwtTestEnableSleep) { taosUsleep(taosRand()%5); } @@ -657,7 +657,7 @@ void *fetchThread(void *param) { while (!qwtTestStop) { qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc); - code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc); + code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0); if (qwtTestEnableSleep) { taosUsleep(taosRand()%5); } @@ -679,7 +679,7 @@ void *dropThread(void *param) { while (!qwtTestStop) { qwtBuildDropReqMsg(&dropMsg, &dropRpc); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); if (qwtTestEnableSleep) { taosUsleep(taosRand()%5); } @@ -758,9 +758,9 @@ void *queryQueueThread(void *param) { } if (TDMT_VND_QUERY == queryRpc->msgType) { - qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc); + qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc, 0); } else if (TDMT_VND_QUERY_CONTINUE == queryRpc->msgType) { - qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc); + qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc, 0); } else { printf("unknown msg in query queue, type:%d\n", queryRpc->msgType); assert(0); @@ -815,13 +815,13 @@ void *fetchQueueThread(void *param) { switch (fetchRpc->msgType) { case TDMT_VND_FETCH: - qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc); + qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc, 0); break; case TDMT_VND_CANCEL_TASK: - qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc); + qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc, 0); break; case TDMT_VND_DROP_TASK: - qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc); + qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc, 0); break; default: printf("unknown msg type:%d in fetch queue", fetchRpc->msgType); @@ -878,16 +878,16 @@ TEST(seqTest, normalCase) { code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); - code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); ASSERT_EQ(code, 0); //code = qWorkerProcessReadyMsg(mockPointer, mgmt, &readyRpc); //ASSERT_EQ(code, 0); - code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc); + code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0); ASSERT_EQ(code, 0); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); ASSERT_EQ(code, 0); qWorkerDestroy(&mgmt); @@ -914,10 +914,10 @@ TEST(seqTest, cancelFirst) { code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); ASSERT_EQ(code, 0); - code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); ASSERT_TRUE(0 != code); qWorkerDestroy(&mgmt); @@ -959,7 +959,7 @@ TEST(seqTest, randCase) { if (r >= 0 && r < maxr/5) { printf("Query,%d\n", t++); qwtBuildQueryReqMsg(&queryRpc); - code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); } else if (r >= maxr/5 && r < maxr * 2/5) { //printf("Ready,%d\n", t++); //qwtBuildReadyReqMsg(&readyMsg, &readyRpc); @@ -970,14 +970,14 @@ TEST(seqTest, randCase) { } else if (r >= maxr * 2/5 && r < maxr* 3/5) { printf("Fetch,%d\n", t++); qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc); - code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc); + code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0); if (qwtTestEnableSleep) { taosUsleep(1); } } else if (r >= maxr * 3/5 && r < maxr * 4/5) { printf("Drop,%d\n", t++); qwtBuildDropReqMsg(&dropMsg, &dropRpc); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); if (qwtTestEnableSleep) { taosUsleep(1); } diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h index 9dbfeceb5940d4237ead01ff445529c2d7d447ac..1c2e4a358a2c256cf3ed577be568c2e93fe13cbe 100644 --- a/source/libs/scalar/inc/sclInt.h +++ b/source/libs/scalar/inc/sclInt.h @@ -51,7 +51,7 @@ typedef struct SScalarCtx { int32_t doConvertDataType(SValueNode* pValueNode, SScalarParam* out); SColumnInfoData* createColumnInfoData(SDataType* pType, int32_t numOfRows); -void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode); +int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode); #define GET_PARAM_TYPE(_c) ((_c)->columnData->info.type) #define GET_PARAM_BYTES(_c) ((_c)->columnData->info.bytes) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 4317ad325e7e0d7b468dd7929c1f4a7c9ff7c169..195ec8a57791062cbca0e4c1a39ccce1866a5095 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -3553,7 +3553,11 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) { return DEAL_RES_CONTINUE; } - sclConvertToTsValueNode(stat->precision, valueNode); + int32_t code = sclConvertToTsValueNode(stat->precision, valueNode); + if (code) { + stat->code = code; + return DEAL_RES_ERROR; + } return DEAL_RES_CONTINUE; } @@ -3687,7 +3691,7 @@ int32_t fltReviseNodes(SFilterInfo *pInfo, SNode** pNode, SFltTreeStat *pStat) { for (int32_t i = 0; i < nodeNum; ++i) { SValueNode *valueNode = *(SValueNode **)taosArrayGet(pStat->nodeList, i); - sclConvertToTsValueNode(pStat->precision, valueNode); + FLT_ERR_JRET(sclConvertToTsValueNode(pStat->precision, valueNode)); } _return: diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index fb03eaefa4fe79034d731b74de6bd166fa0db83e..d2436b9948f2cf7bfa15d061cdc9bbfdfefd6f08 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -20,17 +20,19 @@ int32_t scalarGetOperatorParamNum(EOperatorType type) { return 2; } -void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) { +int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) { char *timeStr = valueNode->datum.p; - if (convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i) != - TSDB_CODE_SUCCESS) { - valueNode->datum.i = 0; + int32_t code = convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i); + if (code != TSDB_CODE_SUCCESS) { + return code; } taosMemoryFree(timeStr); valueNode->typeData = valueNode->datum.i; valueNode->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP; valueNode->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes; + + return TSDB_CODE_SUCCESS; } @@ -546,6 +548,7 @@ EDealRes sclRewriteBasedOnOptr(SNode** pNode, SScalarCtx *ctx, EOperatorType opT EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { SOperatorNode *node = (SOperatorNode *)*pNode; + int32_t code = 0; if (node->pLeft && (QUERY_NODE_VALUE == nodeType(node->pLeft))) { SValueNode *valueNode = (SValueNode *)node->pLeft; @@ -555,7 +558,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pRight && nodesIsExprNode(node->pRight) && ((SExprNode*)node->pRight)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) { - sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode); + code = sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode); + if (code) { + ctx->code = code; + return DEAL_RES_ERROR; + } } } @@ -567,7 +574,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pLeft && nodesIsExprNode(node->pLeft) && ((SExprNode*)node->pLeft)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) { - sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode); + code = sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode); + if (code) { + ctx->code = code; + return DEAL_RES_ERROR; + } } } diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 18c1f08c6c7288e63619d272a1c71531a423af8f..6ee5f038d661d06090d74487531adabec4c9abf9 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -633,7 +633,7 @@ static int32_t doTrimFunction(SScalarParam *pInput, int32_t inputNum, SScalarPar continue; } - char *input = colDataGetData(pInput[0].columnData, i); + char *input = colDataGetData(pInputData, i); int32_t len = varDataLen(input); int32_t charLen = (type == TSDB_DATA_TYPE_VARCHAR) ? len : len / TSDB_NCHAR_SIZE; trimFn(input, output, type, charLen); @@ -893,7 +893,7 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam * memmove(tzInfo + fracLen, tzInfo, strlen(tzInfo)); } - char tmp[32]; + char tmp[32] = {0}; sprintf(tmp, ".%s", fraction); memcpy(tzInfo, tmp, fracLen); len += fracLen; diff --git a/source/libs/scalar/test/scalar/CMakeLists.txt b/source/libs/scalar/test/scalar/CMakeLists.txt index 672cb5a3de39bfed51c9d399ac3d0431614f50ab..86b936d93ae950e27069835cffcb0e8a99768ac9 100644 --- a/source/libs/scalar/test/scalar/CMakeLists.txt +++ b/source/libs/scalar/test/scalar/CMakeLists.txt @@ -17,9 +17,7 @@ TARGET_INCLUDE_DIRECTORIES( PUBLIC "${TD_SOURCE_DIR}/source/libs/parser/inc" PRIVATE "${TD_SOURCE_DIR}/source/libs/scalar/inc" ) -if(NOT TD_WINDOWS) - add_test( - NAME scalarTest - COMMAND scalarTest - ) -endif(NOT TD_WINDOWS) +add_test( + NAME scalarTest + COMMAND scalarTest +) diff --git a/source/libs/scalar/test/scalar/scalarTests.cpp b/source/libs/scalar/test/scalar/scalarTests.cpp index 3fafc83b18365d490003a792748606c8d4fce804..6a32c6577532c7c7bbb3f07e0f012706dd7163df 100644 --- a/source/libs/scalar/test/scalar/scalarTests.cpp +++ b/source/libs/scalar/test/scalar/scalarTests.cpp @@ -2498,7 +2498,7 @@ TEST(ScalarFunctionTest, tanFunction_column) { code = tanFunction(pInput, 1, pOutput); ASSERT_EQ(code, TSDB_CODE_SUCCESS); for (int32_t i = 0; i < rowNum; ++i) { - ASSERT_EQ(*((double *)colDataGetData(pOutput->columnData, i)), result[i]); + ASSERT_NEAR(*((double *)colDataGetData(pOutput->columnData, i)), result[i], 1e-15); PRINTF("tiny_int after TAN:%f\n", *((double *)colDataGetData(pOutput->columnData, i))); } scltDestroyDataBlock(pInput); @@ -2517,7 +2517,7 @@ TEST(ScalarFunctionTest, tanFunction_column) { code = tanFunction(pInput, 1, pOutput); ASSERT_EQ(code, TSDB_CODE_SUCCESS); for (int32_t i = 0; i < rowNum; ++i) { - ASSERT_EQ(*((double *)colDataGetData(pOutput->columnData, i)), result[i]); + ASSERT_NEAR(*((double *)colDataGetData(pOutput->columnData, i)), result[i], 1e-15); PRINTF("float after TAN:%f\n", *((double *)colDataGetData(pOutput->columnData, i))); } diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index dad4f7196ffaca8ad09da225dd37944210899435..312d587b6f0ee29a9f2da22afc23a2834747b063 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -94,6 +94,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch if (schJobNeedToStop(pJob, &status)) { SCH_TASK_ELOG("rsp not processed cause of job status, job status:%s, rspCode:0x%x", jobTaskStatusStr(status), rspCode); + taosMemoryFreeClear(msg); SCH_RET(atomic_load_32(&pJob->errCode)); } @@ -121,6 +122,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch } SCH_ERR_JRET(rspCode); + taosMemoryFreeClear(msg); + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -145,6 +148,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch } SCH_ERR_JRET(rspCode); + taosMemoryFreeClear(msg); + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -164,6 +169,9 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch if (NULL == msg) { SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); } + + taosMemoryFreeClear(msg); + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -210,6 +218,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_UNLOCK(SCH_WRITE, &pJob->resLock); } + taosMemoryFreeClear(msg); + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); break; @@ -224,6 +234,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_ERR_JRET(rsp->code); SCH_ERR_JRET(schSaveJobQueryRes(pJob, rsp)); + + taosMemoryFreeClear(msg); SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); @@ -275,6 +287,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_ERR_JRET(schProcessOnExplainDone(pJob, pTask, pRsp)); } + taosMemoryFreeClear(msg); + return TSDB_CODE_SUCCESS; } @@ -282,6 +296,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_ERR_JRET(schFetchFromRemote(pJob)); + taosMemoryFreeClear(msg); + return TSDB_CODE_SUCCESS; } @@ -300,6 +316,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_TASK_DLOG("got fetch rsp, rows:%d, complete:%d", htonl(rsp->numOfRows), rsp->completed); + msg = NULL; + schProcessOnDataFetched(pJob); break; } @@ -322,6 +340,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch _return: + taosMemoryFreeClear(msg); + SCH_RET(schProcessOnTaskFailure(pJob, pTask, code)); } diff --git a/source/libs/scheduler/src/schUtil.c b/source/libs/scheduler/src/schUtil.c index cec754bdcdc76a83bf637448c4303037b8b74447..81c95ea976e0c685fa1585df6dbb42bed75fd0c8 100644 --- a/source/libs/scheduler/src/schUtil.c +++ b/source/libs/scheduler/src/schUtil.c @@ -41,7 +41,7 @@ uint64_t schGenUUID(void) { static int32_t requestSerialId = 0; if (hashId == 0) { - char uid[64]; + char uid[64] = {0}; int32_t code = taosGetSystemUUID(uid, tListLen(uid)); if (code != TSDB_CODE_SUCCESS) { qError("Failed to get the system uid, reason:%s", tstrerror(TAOS_SYSTEM_ERROR(errno))); diff --git a/source/libs/stream/src/tstream.c b/source/libs/stream/src/tstream.c index cc9fc8bd803ae68e909c69634e0077e1e5507c90..e033645667b2f1a3953feaaaca8daf4ed4331bf8 100644 --- a/source/libs/stream/src/tstream.c +++ b/source/libs/stream/src/tstream.c @@ -67,7 +67,7 @@ static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMs } else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) { // TODO use general name rule of schemaless - char ctbName[TSDB_TABLE_FNAME_LEN + 22]; + char ctbName[TSDB_TABLE_FNAME_LEN + 22] = {0}; // all groupId must be the same in an array SSDataBlock* pBlock = taosArrayGet(data, 0); sprintf(ctbName, "%s:%ld", pTask->shuffleDispatcher.stbFullName, pBlock->info.groupId); diff --git a/source/libs/stream/src/tstreamUpdate.c b/source/libs/stream/src/tstreamUpdate.c index 7587fcecc99962b2cd0eda135a121acb281a1a48..ada391b40a76af148e07789375a756a6590648b3 100644 --- a/source/libs/stream/src/tstreamUpdate.c +++ b/source/libs/stream/src/tstreamUpdate.c @@ -42,7 +42,7 @@ static void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count) { } static void windowSBfDelete(SUpdateInfo *pInfo, uint64_t count) { - if (count < pInfo->numSBFs - 1) { + if (count < pInfo->numSBFs) { for (uint64_t i = 0; i < count; ++i) { SScalableBf *pTsSBFs = taosArrayGetP(pInfo->pTsSBFs, 0); tScalableBfDestroy(pTsSBFs); @@ -73,8 +73,8 @@ static int64_t adjustInterval(int64_t interval, int32_t precision) { } static int64_t adjustWatermark(int64_t adjInterval, int64_t originInt, int64_t watermark) { - if (watermark <= 0) { - watermark = TMIN(originInt/adjInterval, 1) * adjInterval; + if (watermark <= adjInterval) { + watermark = TMAX(originInt/adjInterval, 1) * adjInterval; } else if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) { watermark = MAX_NUM_SCALABLE_BF * adjInterval; }/* else if (watermark < MIN_NUM_SCALABLE_BF * adjInterval) { diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h index b3174a4b36855b417ababbb0c4614867433a2e83..43d1c0c0c38bc9836fdb9e3210f141af44376700 100644 --- a/source/libs/sync/inc/syncSnapshot.h +++ b/source/libs/sync/inc/syncSnapshot.h @@ -28,10 +28,12 @@ extern "C" { #include "taosdef.h" typedef struct SSyncSnapshotSender { - bool isStart; - int32_t progressIndex; + int32_t sending; + int32_t received; + bool finish; void * pCurrentBlock; - int32_t len; + int32_t blockLen; + int64_t sendingMS; SSnapshot *pSnapshot; SSyncNode *pSyncNode; } SSyncSnapshotSender; @@ -43,7 +45,8 @@ cJSON * snapshotSender2Json(SSyncSnapshotSender *pSender); char * snapshotSender2Str(SSyncSnapshotSender *pSender); typedef struct SSyncSnapshotReceiver { - bool isStart; + bool start; + int32_t received; int32_t progressIndex; void * pCurrentBlock; int32_t len; diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index 46be7597c7f82afefbd83f6f6258c4ba811ae046..3afe7b15e213c0da3760c7a8ef1f313d145cd31f 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -89,7 +89,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnAppendEntriesCb== term:%lu", ths->pRaftStore->currentTerm); syncAppendEntriesLog2(logBuf, pMsg); diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index 77d85e29151205edd31deed1c40f5dbffca90849..4e6d870e194a223bd35d5671dc17532bd5e8626e 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -38,7 +38,7 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnAppendEntriesReplyCb== term:%lu", ths->pRaftStore->currentTerm); syncAppendEntriesReplyLog2(logBuf, pMsg); @@ -57,7 +57,7 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p // } if (pMsg->term > ths->pRaftStore->currentTerm) { - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "syncNodeOnAppendEntriesReplyCb error term, receive:%lu current:%lu", pMsg->term, ths->pRaftStore->currentTerm); syncNodeLog2(logBuf, ths); diff --git a/source/libs/sync/src/syncIO.c b/source/libs/sync/src/syncIO.c index 39760c32e83eddc060aeb9669fb252eaca816e54..e30a39e6342c4b7df77ee9cfdbe4f29333e36c16 100644 --- a/source/libs/sync/src/syncIO.c +++ b/source/libs/sync/src/syncIO.c @@ -74,7 +74,7 @@ int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { { syncUtilMsgNtoH(pMsg->pCont); - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncIOSendMsg== %s:%d", pEpSet->eps[0].fqdn, pEpSet->eps[0].port); syncRpcMsgLog2(logBuf, pMsg); @@ -89,7 +89,7 @@ int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { int32_t syncIOEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; syncRpcMsgLog2((char *)"==syncIOEqMsg==", pMsg); SRpcMsg *pTemp; diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c index b44b0da750a3648e0a9dd834e13d752848cd572d..4d556d21dde7e56c2048cc314f86ad0a8949bc37 100644 --- a/source/libs/sync/src/syncIndexMgr.c +++ b/source/libs/sync/src/syncIndexMgr.c @@ -76,7 +76,7 @@ SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaf } cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pSyncIndexMgr != NULL) { diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 5ad8df11a922e14b470b5e09a916c8cb4c70a239..620fc514c6960754762187445b167098ce4382d3 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -815,7 +815,7 @@ int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, S } cJSON* syncNode2Json(const SSyncNode* pSyncNode) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pSyncNode != NULL) { @@ -1338,7 +1338,7 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) { // on message ---- int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg) { // log state - char logBuf[1024]; + char logBuf[1024] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnPingCb== vgId:%d, state: %d, %s, term:%lu electTimerLogicClock:%lu, " "electTimerLogicClockUser:%lu, electTimerMS:%d", diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c index 57cbdaaf795b025af5f2aa36108b28845c91e1b7..fae069f2e6b13c0073c6309f889dc7f8f92c8c6e 100644 --- a/source/libs/sync/src/syncMessage.c +++ b/source/libs/sync/src/syncMessage.c @@ -215,7 +215,7 @@ SyncTimeout* syncTimeoutFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncTimeout2Json(const SyncTimeout* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -442,7 +442,7 @@ SyncPing* syncPingFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncPing2Json(const SyncPing* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -456,7 +456,7 @@ cJSON* syncPing2Json(const SyncPing* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -471,7 +471,7 @@ cJSON* syncPing2Json(const SyncPing* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -702,7 +702,7 @@ SyncPingReply* syncPingReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncPingReply2Json(const SyncPingReply* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -716,7 +716,7 @@ cJSON* syncPingReply2Json(const SyncPingReply* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -731,7 +731,7 @@ cJSON* syncPingReply2Json(const SyncPingReply* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -869,7 +869,7 @@ SyncClientRequest* syncClientRequestFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncClientRequest2Json(const SyncClientRequest* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -995,7 +995,7 @@ SyncRequestVote* syncRequestVoteFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -1009,7 +1009,7 @@ cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1023,7 +1023,7 @@ cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1144,7 +1144,7 @@ SyncRequestVoteReply* syncRequestVoteReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -1158,7 +1158,7 @@ cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1172,7 +1172,7 @@ cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1292,7 +1292,7 @@ SyncAppendEntries* syncAppendEntriesFromRpcMsg2(const SRpcMsg* pRpcMsg) { } cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -1306,7 +1306,7 @@ cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1321,7 +1321,7 @@ cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1456,7 +1456,7 @@ SyncAppendEntriesReply* syncAppendEntriesReplyFromRpcMsg2(const SRpcMsg* pRpcMsg } cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { @@ -1470,7 +1470,7 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) { { uint64_t u64 = pMsg->srcId.addr; cJSON* pTmp = pSrcId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1485,7 +1485,7 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) { { uint64_t u64 = pMsg->destId.addr; cJSON* pTmp = pDestId; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pTmp, "addr_host", host); @@ -1624,7 +1624,7 @@ void syncApplyMsg2OriginalRpcMsg(const SyncApplyMsg* pMsg, SRpcMsg* pOriginalRpc } cJSON* syncApplyMsg2Json(const SyncApplyMsg* pMsg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pMsg != NULL) { diff --git a/source/libs/sync/src/syncRaftCfg.c b/source/libs/sync/src/syncRaftCfg.c index 70481b853ece4ba5cab45f303184042494b44609..3e1931e2c37e626b7ab049299a9b83b8a78a2cf1 100644 --- a/source/libs/sync/src/syncRaftCfg.c +++ b/source/libs/sync/src/syncRaftCfg.c @@ -28,7 +28,7 @@ SRaftCfg *raftCfgOpen(const char *path) { taosLSeekFile(pCfg->pFile, 0, SEEK_SET); - char buf[1024]; + char buf[1024] = {0}; int len = taosReadFile(pCfg->pFile, buf, sizeof(buf)); assert(len > 0); @@ -51,15 +51,15 @@ int32_t raftCfgPersist(SRaftCfg *pRaftCfg) { char *s = raftCfg2Str(pRaftCfg); taosLSeekFile(pRaftCfg->pFile, 0, SEEK_SET); - char buf[CONFIG_FILE_LEN]; + char buf[CONFIG_FILE_LEN] = {0}; memset(buf, 0, sizeof(buf)); ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN); snprintf(buf, sizeof(buf), "%s", s); int64_t ret = taosWriteFile(pRaftCfg->pFile, buf, sizeof(buf)); assert(ret == sizeof(buf)); - //int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1); - //assert(ret == strlen(s) + 1); + // int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1); + // assert(ret == strlen(s) + 1); taosMemoryFree(s); taosFsyncFile(pRaftCfg->pFile); @@ -67,7 +67,7 @@ int32_t raftCfgPersist(SRaftCfg *pRaftCfg) { } cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pSyncCfg != NULL) { @@ -170,17 +170,17 @@ int32_t raftCfgCreateFile(SSyncCfg *pCfg, int8_t isStandBy, const char *path) { SRaftCfg raftCfg; raftCfg.cfg = *pCfg; raftCfg.isStandBy = isStandBy; - char * s = raftCfg2Str(&raftCfg); + char *s = raftCfg2Str(&raftCfg); - char buf[CONFIG_FILE_LEN]; + char buf[CONFIG_FILE_LEN] = {0}; memset(buf, 0, sizeof(buf)); ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN); snprintf(buf, sizeof(buf), "%s", s); int64_t ret = taosWriteFile(pFile, buf, sizeof(buf)); assert(ret == sizeof(buf)); - //int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1); - //assert(ret == strlen(s) + 1); + // int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1); + // assert(ret == strlen(s) + 1); taosMemoryFree(s); taosCloseFile(&pFile); diff --git a/source/libs/sync/src/syncRaftEntry.c b/source/libs/sync/src/syncRaftEntry.c index 21ee35eaf9c276636d754048095d6b2d44f18796..8755f71654382f3913a3c81b6ee1e9b6e91dbb69 100644 --- a/source/libs/sync/src/syncRaftEntry.c +++ b/source/libs/sync/src/syncRaftEntry.c @@ -107,7 +107,7 @@ SSyncRaftEntry* syncEntryDeserialize(const char* buf, uint32_t len) { } cJSON* syncEntry2Json(const SSyncRaftEntry* pEntry) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); if (pEntry != NULL) { diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index 58fa8c2e8f1e0b4d89f003653efef4f0a3dd4b4b..a6397f8cba24694d6f36847af5e877c72bd1a920 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -190,7 +190,7 @@ SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore) { } cJSON* logStore2Json(SSyncLogStore* pLogStore) { - char u64buf[128]; + char u64buf[128] = {0}; SSyncLogStoreData* pData = (SSyncLogStoreData*)pLogStore->data; cJSON* pRoot = cJSON_CreateObject(); @@ -227,7 +227,7 @@ char* logStore2Str(SSyncLogStore* pLogStore) { } cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore) { - char u64buf[128]; + char u64buf[128] = {0}; SSyncLogStoreData* pData = (SSyncLogStoreData*)pLogStore->data; cJSON* pRoot = cJSON_CreateObject(); diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c index d6f2e91de7739efd535a23427168180fe2aabc86..52e815292607d69e7d364f6a11c31c184f07914a 100644 --- a/source/libs/sync/src/syncRaftStore.c +++ b/source/libs/sync/src/syncRaftStore.c @@ -34,7 +34,7 @@ SRaftStore *raftStoreOpen(const char *path) { memset(pRaftStore, 0, sizeof(*pRaftStore)); snprintf(pRaftStore->path, sizeof(pRaftStore->path), "%s", path); - char storeBuf[RAFT_STORE_BLOCK_SIZE]; + char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0}; memset(storeBuf, 0, sizeof(storeBuf)); if (!raftStoreFileExist(pRaftStore->path)) { @@ -84,7 +84,7 @@ int32_t raftStorePersist(SRaftStore *pRaftStore) { assert(pRaftStore != NULL); int32_t ret; - char storeBuf[RAFT_STORE_BLOCK_SIZE]; + char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0}; ret = raftStoreSerialize(pRaftStore, storeBuf, sizeof(storeBuf)); assert(ret == 0); @@ -107,7 +107,7 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) { cJSON *pRoot = cJSON_CreateObject(); - char u64Buf[128]; + char u64Buf[128] = {0}; snprintf(u64Buf, sizeof(u64Buf), "%lu", pRaftStore->currentTerm); cJSON_AddStringToObject(pRoot, "current_term", u64Buf); @@ -117,7 +117,7 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) { cJSON_AddNumberToObject(pRoot, "vote_for_vgid", pRaftStore->voteFor.vgId); uint64_t u64 = pRaftStore->voteFor.addr; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pRoot, "addr_host", host); @@ -184,7 +184,7 @@ void raftStoreSetTerm(SRaftStore *pRaftStore, SyncTerm term) { int32_t raftStoreFromJson(SRaftStore *pRaftStore, cJSON *pJson) { return 0; } cJSON *raftStore2Json(SRaftStore *pRaftStore) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pRaftStore != NULL) { @@ -196,7 +196,7 @@ cJSON *raftStore2Json(SRaftStore *pRaftStore) { cJSON_AddStringToObject(pVoteFor, "addr", u64buf); { uint64_t u64 = pRaftStore->voteFor.addr; - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); cJSON_AddStringToObject(pVoteFor, "addr_host", host); diff --git a/source/libs/sync/src/syncRequestVote.c b/source/libs/sync/src/syncRequestVote.c index 619a1546a96ad9642272b7227466d99be833be9f..265677129213c6887012ee72da9066aad25adc09 100644 --- a/source/libs/sync/src/syncRequestVote.c +++ b/source/libs/sync/src/syncRequestVote.c @@ -44,7 +44,7 @@ int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnRequestVoteCb== term:%lu", ths->pRaftStore->currentTerm); syncRequestVoteLog2(logBuf, pMsg); diff --git a/source/libs/sync/src/syncRequestVoteReply.c b/source/libs/sync/src/syncRequestVoteReply.c index a6348dff50132f860ada45e9cc3bddfabd6d62d0..75236aee2bcec1ca9c7ae07165c427edbc1e0a04 100644 --- a/source/libs/sync/src/syncRequestVoteReply.c +++ b/source/libs/sync/src/syncRequestVoteReply.c @@ -39,7 +39,7 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg) { int32_t ret = 0; - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "==syncNodeOnRequestVoteReplyCb== term:%lu", ths->pRaftStore->currentTerm); syncRequestVoteReplyLog2(logBuf, pMsg); @@ -56,7 +56,7 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg) // } if (pMsg->term > ths->pRaftStore->currentTerm) { - char logBuf[128]; + char logBuf[128] = {0}; snprintf(logBuf, sizeof(logBuf), "syncNodeOnRequestVoteReplyCb error term, receive:%lu current:%lu", pMsg->term, ths->pRaftStore->currentTerm); syncNodePrint2(logBuf, ths); diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index cf045a692611a64e75c2f4c595180f1e324e75f9..d754acd9f831ac18ce7e28b5ef2fda4b2d8650db 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -43,7 +43,7 @@ void syncUtilnodeInfo2EpSet(const SNodeInfo* pNodeInfo, SEpSet* pEpSet) { } void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet) { - char host[TSDB_FQDN_LEN]; + char host[TSDB_FQDN_LEN] = {0}; uint16_t port; syncUtilU642Addr(raftId->addr, host, sizeof(host), &port); @@ -62,7 +62,7 @@ void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet) { void syncUtilnodeInfo2raftId(const SNodeInfo* pNodeInfo, SyncGroupId vgId, SRaftId* raftId) { uint32_t ipv4 = taosGetIpv4FromFqdn(pNodeInfo->nodeFqdn); assert(ipv4 != 0xFFFFFFFF); - char ipbuf[128]; + char ipbuf[128] = {0}; tinet_ntoa(ipbuf, ipv4); raftId->addr = syncUtilAddr2U64(ipbuf, pNodeInfo->nodePort); raftId->vgId = vgId; @@ -106,7 +106,7 @@ int32_t syncUtilElectRandomMS(int32_t min, int32_t max) { int32_t syncUtilQuorum(int32_t replicaNum) { return replicaNum / 2 + 1; } cJSON* syncUtilNodeInfo2Json(const SNodeInfo* p) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); cJSON_AddStringToObject(pRoot, "nodeFqdn", p->nodeFqdn); @@ -118,12 +118,12 @@ cJSON* syncUtilNodeInfo2Json(const SNodeInfo* p) { } cJSON* syncUtilRaftId2Json(const SRaftId* p) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON* pRoot = cJSON_CreateObject(); snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", p->addr); cJSON_AddStringToObject(pRoot, "addr", u64buf); - char host[128]; + char host[128] = {0}; uint16_t port; syncUtilU642Addr(p->addr, host, sizeof(host), &port); cJSON_AddStringToObject(pRoot, "host", host); diff --git a/source/libs/sync/src/syncVoteMgr.c b/source/libs/sync/src/syncVoteMgr.c index 1c1f0809bd796f562e74cfd1d6b5e14015abd485..528c2f26c85c17f33f0a783def69ef9f26798b1b 100644 --- a/source/libs/sync/src/syncVoteMgr.c +++ b/source/libs/sync/src/syncVoteMgr.c @@ -90,7 +90,7 @@ void voteGrantedReset(SVotesGranted *pVotesGranted, SyncTerm term) { } cJSON *voteGranted2Json(SVotesGranted *pVotesGranted) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pVotesGranted != NULL) { @@ -220,7 +220,7 @@ void votesRespondReset(SVotesRespond *pVotesRespond, SyncTerm term) { } cJSON *votesRespond2Json(SVotesRespond *pVotesRespond) { - char u64buf[128]; + char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); if (pVotesRespond != NULL) { diff --git a/source/libs/sync/test/syncConfigChangeTest.cpp b/source/libs/sync/test/syncConfigChangeTest.cpp index 1755b7a8fd967fa5db22b4fdc523cc3f771d3c4b..1ab3ce203ad4a3968bc45ab2382108fa7d97f40c 100644 --- a/source/libs/sync/test/syncConfigChangeTest.cpp +++ b/source/libs/sync/test/syncConfigChangeTest.cpp @@ -42,7 +42,7 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { } if (cbMeta.index > beginIndex) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==CommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); @@ -53,7 +53,7 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { } void PreCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) { - char logBuf[256]; + char logBuf[256] = {0}; snprintf(logBuf, sizeof(logBuf), "==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag); @@ -84,14 +84,15 @@ void ReConfigCb(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) SSyncFSM* createFsm() { SSyncFSM* pFsm = (SSyncFSM*)taosMemoryMalloc(sizeof(SSyncFSM)); + memset(pFsm, 0, sizeof(*pFsm)); + pFsm->FpCommitCb = CommitCb; pFsm->FpPreCommitCb = PreCommitCb; pFsm->FpRollBackCb = RollBackCb; pFsm->FpGetSnapshot = GetSnapshotCb; pFsm->FpRestoreFinishCb = RestoreFinishCb; - pFsm->FpSnapshotApply = NULL; - pFsm->FpSnapshotRead = NULL; + pFsm->FpReConfigCb = ReConfigCb; diff --git a/source/libs/sync/test/syncSnapshotTest.cpp b/source/libs/sync/test/syncSnapshotTest.cpp index 8ccd69890708781dbfb5b4a3ae835acc5c17d15c..820500e2d8f8b57427fec1f20741755a2ddc2d5c 100644 --- a/source/libs/sync/test/syncSnapshotTest.cpp +++ b/source/libs/sync/test/syncSnapshotTest.cpp @@ -75,6 +75,7 @@ int32_t GetSnapshotCb(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) { void initFsm() { pFsm = (SSyncFSM *)taosMemoryMalloc(sizeof(SSyncFSM)); + memset(pFsm, 0, sizeof(*pFsm)); pFsm->FpCommitCb = CommitCb; pFsm->FpPreCommitCb = PreCommitCb; pFsm->FpRollBackCb = RollBackCb; diff --git a/source/libs/transport/test/CMakeLists.txt b/source/libs/transport/test/CMakeLists.txt index 468b70fb711a15a83c97a5a45adb68dee3d1c368..98a252e008d85b27206fa58055f757dd02d64a78 100644 --- a/source/libs/transport/test/CMakeLists.txt +++ b/source/libs/transport/test/CMakeLists.txt @@ -111,12 +111,10 @@ target_link_libraries (pushServer ) -if(NOT TD_WINDOWS) - add_test( - NAME transUT - COMMAND transUT - ) -endif(NOT TD_WINDOWS) +add_test( + NAME transUT + COMMAND transUT +) add_test( NAME transUtilUt COMMAND transportTest diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 66d6ea3ef39c69cca349caf75c4983617e89630c..178d6e8d2b48a5adc62b6c5d83dd414050ffa9f1 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -74,6 +74,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_REPEAT_INIT, "Repeat initialization TAOS_DEFINE_ERROR(TSDB_CODE_DUP_KEY, "Cannot add duplicate keys to hash") TAOS_DEFINE_ERROR(TSDB_CODE_NEED_RETRY, "Retry needed") TAOS_DEFINE_ERROR(TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE, "Out of memory in rpc queue") +TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TIMESTAMP, "Invalid timestamp format") TAOS_DEFINE_ERROR(TSDB_CODE_REF_NO_MEMORY, "Ref out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_REF_FULL, "too many Ref Objs") diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 94b6d0a06cc381a8a09407445e7f54d8f2ce478a..353e94a49096822fe581d7faa0df8a29a6494c12 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -491,7 +491,7 @@ void taosDumpData(unsigned char *msg, int32_t len) { if (!osLogSpaceAvailable()) return; taosUpdateLogNums(DEBUG_DUMP); - char temp[256]; + char temp[256] = {0}; int32_t i, pos = 0, c = 0; for (i = 0; i < len; ++i) { diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c index 6a10794ea154306f3c26b9666482a7c3a5b61958..37935087fad693eed254549977182ccaca1085f2 100644 --- a/source/util/src/tqueue.c +++ b/source/util/src/tqueue.c @@ -26,6 +26,7 @@ typedef struct STaosQnode STaosQnode; typedef struct STaosQnode { STaosQnode *next; STaosQueue *queue; + int64_t timestamp; int32_t size; int8_t itype; int8_t reserved[3]; @@ -144,6 +145,7 @@ void *taosAllocateQitem(int32_t size, EQItype itype) { STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size); pNode->size = size; pNode->itype = itype; + pNode->timestamp = taosGetTimestampUs(); if (pNode == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -393,7 +395,7 @@ void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue) { int32_t taosGetQueueNumber(STaosQset *qset) { return qset->numOfQueues; } -int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp) { +int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp) { STaosQnode *pNode = NULL; int32_t code = 0; @@ -415,6 +417,7 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FI *ppItem = pNode->item; if (ahandle) *ahandle = queue->ahandle; if (itemFp) *itemFp = queue->itemFp; + if (ts) *ts = pNode->timestamp; queue->head = pNode->next; if (queue->head == NULL) queue->tail = NULL; diff --git a/source/util/src/tstrbuild.c b/source/util/src/tstrbuild.c index 2aae588046402e37569f5a2bde5ed5f72fa24346..c87b889e82ece82c251ddabad1964bc1f0b3ab2f 100644 --- a/source/util/src/tstrbuild.c +++ b/source/util/src/tstrbuild.c @@ -69,13 +69,13 @@ void taosStringBuilderAppendString(SStringBuilder* sb, const char* str) { void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendStringLen(sb, "null", 4); } void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) { - char buf[64]; + char buf[64] = {0}; size_t len = snprintf(buf, sizeof(buf), "%" PRId64, v); taosStringBuilderAppendStringLen(sb, buf, TMIN(len, sizeof(buf))); } void taosStringBuilderAppendDouble(SStringBuilder* sb, double v) { - char buf[512]; + char buf[512] = {0}; size_t len = snprintf(buf, sizeof(buf), "%.9lf", v); taosStringBuilderAppendStringLen(sb, buf, TMIN(len, sizeof(buf))); } diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c index dc48fc3f8d2b2e803e8f1593d5471184fa99e059..686e0696ec689b48ecff8f27c7db2eb86daa5eb2 100644 --- a/source/util/src/tworker.c +++ b/source/util/src/tworker.c @@ -75,19 +75,20 @@ static void *tQWorkerThreadFp(SQWorker *worker) { void *msg = NULL; void *ahandle = NULL; int32_t code = 0; + int64_t ts = 0; taosBlockSIGPIPE(); setThreadName(pool->name); uDebug("worker:%s:%d is running", pool->name, worker->id); while (1) { - if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ahandle, &fp) == 0) { + if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ts, &ahandle, &fp) == 0) { uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset); break; } if (fp != NULL) { - SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num}; + SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num, .timestamp = ts}; (*fp)(&info, msg); } } diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 35abc4802f9de2080a6b6a166daf833c9cf04578..8c791efbc644924cfe4c1d85d6422bb671fd1216 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -14,23 +14,93 @@ import random import string from util.sql import tdSql - +from util.dnodes import tdDnodes +import requests +import time +import socket class TDCom: def init(self, conn, logSql): tdSql.init(conn.cursor(), logSql) - def cleanTb(self): + def preDefine(self): + header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} + sql_url = "http://127.0.0.1:6041/rest/sql" + sqlt_url = "http://127.0.0.1:6041/rest/sqlt" + sqlutc_url = "http://127.0.0.1:6041/rest/sqlutc" + influx_url = "http://127.0.0.1:6041/influxdb/v1/write" + telnet_url = "http://127.0.0.1:6041/opentsdb/v1/put/telnet" + return header, sql_url, sqlt_url, sqlutc_url, influx_url, telnet_url + + def genTcpParam(self): + MaxBytes = 1024*1024 + host ='127.0.0.1' + port = 6046 + return MaxBytes, host, port + + def tcpClient(self, input): + MaxBytes = tdCom.genTcpParam()[0] + host = tdCom.genTcpParam()[1] + port = tdCom.genTcpParam()[2] + sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + sock.connect((host, port)) + sock.send(input.encode()) + sock.close() + + def restApiPost(self, sql): + requests.post(self.preDefine()[1], sql.encode("utf-8"), headers = self.preDefine()[0]) + + def createDb(self, dbname="test", db_update_tag=0, api_type="taosc"): + if api_type == "taosc": + if db_update_tag == 0: + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} precision 'us'") + else: + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} precision 'us' update 1") + elif api_type == "restful": + if db_update_tag == 0: + self.restApiPost(f"drop database if exists {dbname}") + self.restApiPost(f"create database if not exists {dbname} precision 'us'") + else: + self.restApiPost(f"drop database if exists {dbname}") + self.restApiPost(f"create database if not exists {dbname} precision 'us' update 1") + tdSql.execute(f'use {dbname}') + + def genUrl(self, url_type, dbname, precision): + if url_type == "influxdb": + if precision is None: + url = self.preDefine()[4] + "?" + "db=" + dbname + else: + url = self.preDefine()[4] + "?" + "db=" + dbname + "&precision=" + precision + elif url_type == "telnet": + url = self.preDefine()[5] + "/" + dbname + else: + url = self.preDefine()[1] + return url + + def schemalessApiPost(self, sql, url_type="influxdb", dbname="test", precision=None): + if url_type == "influxdb": + url = self.genUrl(url_type, dbname, precision) + elif url_type == "telnet": + url = self.genUrl(url_type, dbname, precision) + res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0]) + return res + + def cleanTb(self, type="taosc"): + ''' + type is taosc or restful + ''' query_sql = "show stables" res_row_list = tdSql.query(query_sql, True) stb_list = map(lambda x: x[0], res_row_list) for stb in stb_list: - tdSql.execute(f'drop table if exists {stb}') + if type == "taosc": + tdSql.execute(f'drop table if exists {stb}') + elif type == "restful": + self.restApiPost(f"drop table if exists {stb}") - query_sql = "show tables" - res_row_list = tdSql.query(query_sql, True) - tb_list = map(lambda x: x[0], res_row_list) - for tb in tb_list: - tdSql.execute(f'drop table if exists {tb}') + def dateToTs(self, datetime_input): + return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) def getLongName(self, len, mode = "mixed"): """ @@ -47,6 +117,52 @@ class TDCom: chars = ''.join(random.choice(string.ascii_letters.lower() + string.digits) for i in range(len)) return chars + def restartTaosd(self, index=1, db_name="db"): + tdDnodes.stop(index) + tdDnodes.startWithoutSleep(index) + tdSql.execute(f"use {db_name}") + + def typeof(self, variate): + v_type=None + if type(variate) is int: + v_type = "int" + elif type(variate) is str: + v_type = "str" + elif type(variate) is float: + v_type = "float" + elif type(variate) is bool: + v_type = "bool" + elif type(variate) is list: + v_type = "list" + elif type(variate) is tuple: + v_type = "tuple" + elif type(variate) is dict: + v_type = "dict" + elif type(variate) is set: + v_type = "set" + return v_type + + def splitNumLetter(self, input_mix_str): + nums, letters = "", "" + for i in input_mix_str: + if i.isdigit(): + nums += i + elif i.isspace(): + pass + else: + letters += i + return nums, letters + + def smlPass(self, func): + smlChildTableName = "no" + def wrapper(*args): + # if tdSql.getVariable("smlChildTableName")[0].upper() == "ID": + if smlChildTableName.upper() == "ID": + return func(*args) + else: + pass + return wrapper + def close(self): self.cursor.close() diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index e8d01de3e5a6a5943472778453d3be28f758f18c..2e11b93e5f4f5fc2ec1edd1fdf73f0a4128d6143 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -169,8 +169,13 @@ class TDDnode: self.cfgDict.update({option: value}) def remoteExec(self, updateCfgDict, execCmd): - remote_conn = Connection(self.remoteIP, port=22, user='root', connect_kwargs={'password':'123456'}) - remote_top_dir = '~/test' + try: + config = eval(self.remoteIP) + remote_conn = Connection(host=config["host"], port=config["port"], user=config["user"], connect_kwargs={'password':config["password"]}) + remote_top_dir = config["path"] + except Exception as r: + remote_conn = Connection(host=self.remoteIP, port=22, user='root', connect_kwargs={'password':'123456'}) + remote_top_dir = '~/test' valgrindStr = '' if (self.valgrind==1): valgrindStr = '-g' @@ -489,6 +494,7 @@ class TDDnodes: self.simDeployed = False self.testCluster = False self.valgrind = 0 + self.killValgrind = 1 def init(self, path, remoteIP = ""): psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" @@ -500,14 +506,15 @@ class TDDnodes: processID = subprocess.check_output( psCmd, shell=True).decode("utf-8") - psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -9 %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") + if self.killValgrind == 1: + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -9 %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") binPath = self.dnodes[0].getPath() + "/../../../" # tdLog.debug("binPath %s" % (binPath)) @@ -544,6 +551,9 @@ class TDDnodes: def setValgrind(self, value): self.valgrind = value + def setKillValgrind(self, value): + self.killValgrind = value + def deploy(self, index, *updatecfgDict): self.sim.setTestCluster(self.testCluster) @@ -617,14 +627,15 @@ class TDDnodes: processID = subprocess.check_output( psCmd, shell=True).decode("utf-8") - psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" - processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") - while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID - os.system(killCmd) - time.sleep(1) - processID = subprocess.check_output( - psCmd, shell=True).decode("utf-8") + if self.killValgrind == 1: + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") # if os.system(cmd) != 0 : # tdLog.exit(cmd) diff --git a/tests/pytest/util/types.py b/tests/pytest/util/types.py new file mode 100644 index 0000000000000000000000000000000000000000..218a4770269328a5ef7161cc56c0e0dc0c420f73 --- /dev/null +++ b/tests/pytest/util/types.py @@ -0,0 +1,38 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from enum import Enum + +class TDSmlProtocolType(Enum): + ''' + Schemaless Protocol types + 0 - unknown + 1 - InfluxDB Line Protocol + 2 - OpenTSDB Telnet Protocl + 3 - OpenTSDB JSON Protocol + ''' + UNKNOWN = 0 + LINE = 1 + TELNET = 2 + JSON = 3 + +class TDSmlTimestampType(Enum): + NOT_CONFIGURED = 0 + HOUR = 1 + MINUTE = 2 + SECOND = 3 + MILLI_SECOND = 4 + MICRO_SECOND = 5 + NANO_SECOND = 6 + + diff --git a/tests/script/general/alter/table.sim b/tests/script/general/alter/table.sim index cd0397760276c775d170e90831f6674880cb8f81..9ca2f60bdc37f827e0832dc59399bf73732d7748 100644 --- a/tests/script/general/alter/table.sim +++ b/tests/script/general/alter/table.sim @@ -252,6 +252,7 @@ endi print ======== step8 sql alter table tb add column h binary(10) +sql select * from tb sql describe tb if $data00 != ts then return -1 @@ -304,7 +305,7 @@ endi if $data80 != h then return -1 endi -if $data81 != BINARY then +if $data81 != VARCHAR then return -1 endi if $data82 != 10 then @@ -371,7 +372,7 @@ endi if $data80 != h then return -1 endi -if $data81 != BINARY then +if $data81 != VARCHAR then return -1 endi if $data82 != 10 then @@ -447,7 +448,7 @@ endi if $data70 != h then return -1 endi -if $data71 != BINARY then +if $data71 != VARCHAR then return -1 endi if $data72 != 10 then @@ -496,7 +497,7 @@ endi if $data60 != h then return -1 endi -if $data61 != BINARY then +if $data61 != VARCHAR then return -1 endi if $data62 != 10 then @@ -539,7 +540,7 @@ endi if $data50 != h then return -1 endi -if $data51 != BINARY then +if $data51 != VARCHAR then return -1 endi if $data52 != 10 then @@ -576,7 +577,7 @@ endi if $data40 != h then return -1 endi -if $data41 != BINARY then +if $data41 != VARCHAR then return -1 endi if $data42 != 10 then @@ -607,7 +608,7 @@ endi if $data30 != h then return -1 endi -if $data31 != BINARY then +if $data31 != VARCHAR then return -1 endi if $data32 != 10 then @@ -632,7 +633,7 @@ endi if $data20 != h then return -1 endi -if $data21 != BINARY then +if $data21 != VARCHAR then return -1 endi if $data22 != 10 then diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 217c23158dd08739caea79d5b74679d4da291968..b2ffe83b0b091940c0bcf4c947893e40734c1e90 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -104,6 +104,10 @@ ./test.sh -f tsim/stable/tag_modify.sim ./test.sh -f tsim/stable/tag_rename.sim ./test.sh -f tsim/stable/alter_comment.sim +./test.sh -f tsim/stable/alter_count.sim +./test.sh -f tsim/stable/alter_insert1.sim +./test.sh -f tsim/stable/alter_insert2.sim +./test.sh -f tsim/stable/alter_import.sim # --- for multi process mode ./test.sh -f tsim/user/basic1.sim -m diff --git a/tests/script/tsim/insert/update0.sim b/tests/script/tsim/insert/update0.sim index 3cb5e4008e3a57e3178721b7e3f5458ef07be52b..0ba3e98c913e1c37c50c351bed7d7385a1cad0d3 100644 --- a/tests/script/tsim/insert/update0.sim +++ b/tests/script/tsim/insert/update0.sim @@ -9,7 +9,7 @@ sql create database d0 keep 365000d,365000d,365000d sql use d0 print =============== create super table and register rsma -sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1 delay 2; +sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1; sql show stables if $rows != 1 then diff --git a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim index 5d9425e5064d3fc65038c174dae109cc6283991e..f929dda18cb1b287c3ffe05487464624ff0eebc5 100644 --- a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim +++ b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim @@ -9,7 +9,7 @@ sql create database d0 retentions 15s:7d,1m:21d,15m:365d; sql use d0 print =============== create super table and register rsma -sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1 delay 2; +sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1; sql show stables if $rows != 1 then diff --git a/tests/script/tsim/sma/tsmaCreateInsertData.sim b/tests/script/tsim/sma/tsmaCreateInsertData.sim index 07c5adef5d8114e65bb82b66e334b30c3b59ad5b..0202c53800260b4974cabe10ff4cbd9f180fd590 100644 --- a/tests/script/tsim/sma/tsmaCreateInsertData.sim +++ b/tests/script/tsim/sma/tsmaCreateInsertData.sim @@ -5,7 +5,7 @@ sleep 50 sql connect print =============== create database -sql create database d1 +sql create database d1 vgroups 1 sql use d1 print =============== create super table, include column type for count/sum/min/max/first diff --git a/tests/script/general/alter/import.sim b/tests/script/tsim/stable/alter_import.sim similarity index 84% rename from tests/script/general/alter/import.sim rename to tests/script/tsim/stable/alter_import.sim index 175e084b7f1aa73a1c8b599752fd0b7de59efda7..cdd7b60e14fc5e8f46f3413e9037a95f534718e1 100644 --- a/tests/script/general/alter/import.sim +++ b/tests/script/tsim/stable/alter_import.sim @@ -29,14 +29,14 @@ if $data00 != 3 then endi print ========= step3 -sql import into tb values(now-23d, -23, 0) -sql import into tb values(now-21d, -21, 0) +sql insert into tb values(now-23d, -23, 0) +sql insert into tb values(now-21d, -21, 0) sql select count(b) from tb if $data00 != 5 then return -1 endi -sql import into tb values(now-29d, -29, 0) +sql insert into tb values(now-29d, -29, 0) sql select count(b) from tb if $data00 != 6 then return -1 diff --git a/tests/script/general/alter/insert1.sim b/tests/script/tsim/stable/alter_insert1.sim similarity index 100% rename from tests/script/general/alter/insert1.sim rename to tests/script/tsim/stable/alter_insert1.sim diff --git a/tests/script/general/alter/insert2.sim b/tests/script/tsim/stable/alter_insert2.sim similarity index 100% rename from tests/script/general/alter/insert2.sim rename to tests/script/tsim/stable/alter_insert2.sim diff --git a/tests/script/general/alter/metrics.sim b/tests/script/tsim/stable/alter_metrics.sim similarity index 97% rename from tests/script/general/alter/metrics.sim rename to tests/script/tsim/stable/alter_metrics.sim index ec8c980c16adcf512975e54fa492d3c22b12c195..f33246dfe2d14c092cb9483ce31c0788da9e5397 100644 --- a/tests/script/general/alter/metrics.sim +++ b/tests/script/tsim/stable/alter_metrics.sim @@ -347,7 +347,7 @@ endi if $data80 != h then return -1 endi -if $data81 != BINARY then +if $data81 != VARCHAR then return -1 endi if $data82 != 10 then @@ -363,9 +363,8 @@ endi print ======== step9 print ======== step10 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 system sh/exec.sh -n dnode1 -s start -sleep 3000 +sql connect sql use d2 sql describe tb @@ -420,7 +419,7 @@ endi if $data80 != h then return -1 endi -if $data81 != BINARY then +if $data81 != VARCHAR then return -1 endi if $data82 != 10 then @@ -502,7 +501,7 @@ endi if $data70 != h then return -1 endi -if $data71 != BINARY then +if $data71 != VARCHAR then return -1 endi if $data72 != 10 then @@ -557,7 +556,7 @@ endi if $data60 != h then return -1 endi -if $data61 != BINARY then +if $data61 != VARCHAR then return -1 endi if $data62 != 10 then @@ -606,7 +605,7 @@ endi if $data50 != h then return -1 endi -if $data51 != BINARY then +if $data51 != VARCHAR then return -1 endi if $data52 != 10 then @@ -649,7 +648,7 @@ endi if $data40 != h then return -1 endi -if $data41 != BINARY then +if $data41 != VARCHAR then return -1 endi if $data42 != 10 then @@ -686,7 +685,7 @@ endi if $data30 != h then return -1 endi -if $data31 != BINARY then +if $data31 != VARCHAR then return -1 endi if $data32 != 10 then @@ -717,7 +716,7 @@ endi if $data20 != h then return -1 endi -if $data21 != BINARY then +if $data21 != VARCHAR then return -1 endi if $data22 != 10 then @@ -758,7 +757,7 @@ endi print ======= over sql drop database d2 sql show databases -if $rows != 0 then +if $rows != 2 then return -1 endi diff --git a/tests/script/tsim/stable/column_modify.sim b/tests/script/tsim/stable/column_modify.sim index 16e7ff8f67f1d9818947c54f6929728b086f44ab..e2752ccf951cef30587aa1f604f92cbbaa265b85 100644 --- a/tests/script/tsim/stable/column_modify.sim +++ b/tests/script/tsim/stable/column_modify.sim @@ -79,28 +79,31 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start sql connect -sql select * from db.ctb + +sql select * from db.stb +print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] +print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6] if $rows != 2 then return -1 endi -#if $data[0][1] != 1 then -# return -1 -#endi -#if $data[0][2] != 1234 then -# return -1 -#endi -#if $data[0][3] != 101 then -# return -1 -#endi -#if $data[1][1] != 1 then -# return -1 -#endi -#if $data[1][2] != 12345 then -# return -1 -#endi -#if $data[1][3] != 101 then -# return -1 -#endi +if $data[0][1] != 1 then + return -1 +endi +if $data[0][2] != 1234 then + return -1 +endi +if $data[0][3] != 101 then + return -1 +endi +if $data[1][1] != 1 then + return -1 +endi +if $data[1][2] != 12345 then + return -1 +endi +if $data[1][3] != 101 then + return -1 +endi system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stable/disk.sim b/tests/script/tsim/stable/disk.sim index eeaa8293a505a7af3b774eb2e0d3b7fab5b6fe49..ff734b4234263ca71253dee97eaa0158fe5221c4 100644 --- a/tests/script/tsim/stable/disk.sim +++ b/tests/script/tsim/stable/disk.sim @@ -49,10 +49,9 @@ if $data00 != $totalNum then return -1 endi -sleep 1000 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 1000 system sh/exec.sh -n dnode1 -s start +sql connect sql use $db sql show vgroups diff --git a/tests/script/tsim/stable/metrics.sim b/tests/script/tsim/stable/metrics.sim index 26323b4a92539ed62fdd060cc7e73dfafec70101..c652670d7f4e904461adf33af8f1d10fc9e9e319 100644 --- a/tests/script/tsim/stable/metrics.sim +++ b/tests/script/tsim/stable/metrics.sim @@ -93,9 +93,6 @@ $i = 2 $tb = $tbPrefix . $i sql insert into $tb values (now + 1m , 1 ) -print sleep 2000 -sleep 2000 - print =============== step6 # sql select * from $mt diff --git a/tests/script/tsim/stream/session0.sim b/tests/script/tsim/stream/session0.sim index 46b343632abd0347502b86e0978f2afd22c139a8..41a8b3371002848dd6909ab1c681bde0628e6324 100644 --- a/tests/script/tsim/stream/session0.sim +++ b/tests/script/tsim/stream/session0.sim @@ -23,7 +23,7 @@ sql insert into t1 values(1648791223001,10,2,3,1.1,2); sql insert into t1 values(1648791233002,3,2,3,2.1,3); sql insert into t1 values(1648791243003,NULL,NULL,NULL,NULL,4); sql insert into t1 values(1648791213002,NULL,NULL,NULL,NULL,5) (1648791233012,NULL,NULL,NULL,NULL,6); - +sleep 300 sql select * from streamt order by s desc; # row 0 @@ -115,7 +115,7 @@ sql insert into t1 values(1648791233002,3,2,3,2.1,9); sql insert into t1 values(1648791243003,4,2,3,3.1,10); sql insert into t1 values(1648791213002,4,2,3,4.1,11) ; sql insert into t1 values(1648791213002,4,2,3,4.1,12) (1648791223009,4,2,3,4.1,13); - +sleep 300 sql select * from streamt order by s desc ; # row 0 diff --git a/tests/script/tsim/stream/session1.sim b/tests/script/tsim/stream/session1.sim index a44639ba7a5e17e51e6ac8190d991bfd2edf1a9e..fb31818f98138948ca91758e14de85146b9940d5 100644 --- a/tests/script/tsim/stream/session1.sim +++ b/tests/script/tsim/stream/session1.sim @@ -22,7 +22,7 @@ sql insert into t1 values(1648791210000,1,1,1,1.1,1); sql insert into t1 values(1648791220000,2,2,2,2.1,2); sql insert into t1 values(1648791230000,3,3,3,3.1,3); sql insert into t1 values(1648791240000,4,4,4,4.1,4); - +sleep 300 sql select * from streamt order by s desc; # row 0 @@ -50,7 +50,7 @@ sql insert into t1 values(1648791250005,5,5,5,5.1,5); sql insert into t1 values(1648791260006,6,6,6,6.1,6); sql insert into t1 values(1648791270007,7,7,7,7.1,7); sql insert into t1 values(1648791240005,5,5,5,5.1,8) (1648791250006,6,6,6,6.1,9); - +sleep 300 sql select * from streamt order by s desc; # row 0 @@ -100,7 +100,7 @@ sql insert into t1 values(1648791260007,7,7,7,7.1,12) (1648791290008,7,7,7,7.1,1 sql insert into t1 values(1648791500000,7,7,7,7.1,15) (1648791520000,8,8,8,8.1,16) (1648791540000,8,8,8,8.1,17); sql insert into t1 values(1648791530000,8,8,8,8.1,18); sql insert into t1 values(1648791220000,10,10,10,10.1,19) (1648791290008,2,2,2,2.1,20) (1648791540000,17,17,17,17.1,21) (1648791500001,22,22,22,22.1,22); - +sleep 300 sql select * from streamt order by s desc; # row 0 diff --git a/tests/script/tsim/stream/triggerInterval0.sim b/tests/script/tsim/stream/triggerInterval0.sim index 6f1d8f4b7bf88913239ccf1cc3a89fb1dbdf6bc9..756f591f3ff8a58586cc77ba5a95acc1f31d46b0 100644 --- a/tests/script/tsim/stream/triggerInterval0.sim +++ b/tests/script/tsim/stream/triggerInterval0.sim @@ -94,92 +94,4 @@ if $data11 != 5 then return -1 endi -sql create table t2(ts timestamp, a int, b int , c int, d double); -sql create stream streams2 trigger window_close watermark 20s into streamt2 as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 interval(10s); -sql insert into t2 values(1648791213000,1,2,3,1.0); -sql insert into t2 values(1648791239999,1,2,3,1.0); -sleep 300 -sql select * from streamt2; -if $rows != 0 then - print ======$rows - return -1 -endi - -sql insert into t2 values(1648791240000,1,2,3,1.0); -sleep 300 -sql select * from streamt2; -if $rows != 1 then - print ======$rows - return -1 -endi -if $data01 != 1 then - print ======$data01 - return -1 -endi - -sql insert into t2 values(1648791250001,1,2,3,1.0) (1648791250002,1,2,3,1.0) (1648791250003,1,2,3,1.0) (1648791240000,1,2,3,1.0); -sleep 300 -sql select * from streamt2; -if $rows != 1 then - print ======$rows - return -1 -endi -if $data01 != 1 then - print ======$data01 - return -1 -endi - -sql insert into t2 values(1648791280000,1,2,3,1.0); -sleep 300 -sql select * from streamt2; -if $rows != 4 then - print ======$rows - return -1 -endi -if $data01 != 1 then - print ======$data01 - return -1 -endi -if $data11 != 1 then - print ======$data11 - return -1 -endi -if $data21 != 1 then - print ======$data21 - return -1 -endi -if $data31 != 3 then - print ======$data31 - return -1 -endi - -sql insert into t2 values(1648791250001,1,2,3,1.0) (1648791250002,1,2,3,1.0) (1648791250003,1,2,3,1.0) (1648791280000,1,2,3,1.0) (1648791280001,1,2,3,1.0) (1648791280002,1,2,3,1.0) (1648791310000,1,2,3,1.0) (1648791280001,1,2,3,1.0); -sleep 300 -sql select * from streamt2; - -if $rows != 5 then - print ======$rows - return -1 -endi -if $data01 != 1 then - print ======$data01 - return -1 -endi -if $data11 != 1 then - print ======$data11 - return -1 -endi -if $data21 != 1 then - print ======$data21 - return -1 -endi -if $data31 != 3 then - print ======$data31 - return -1 -endi -if $data41 != 3 then - print ======$data31 - return -1 -endi - system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py new file mode 100644 index 0000000000000000000000000000000000000000..4c56511d2717167d243e162776d4ffe75fb056f5 --- /dev/null +++ b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py @@ -0,0 +1,1489 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import traceback +import random +from taos.error import SchemalessError +import time +import numpy as np +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +from util.types import TDSmlProtocolType, TDSmlTimestampType +import threading + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + self.smlChildTableName_value = "id" + + def createDb(self, name="test", db_update_tag=0, protocol=None): + if protocol == "telnet-tcp": + name = "opentsdb_telnet" + + if db_update_tag == 0: + tdSql.execute(f"drop database if exists {name}") + tdSql.execute(f"create database if not exists {name} precision 'ms'") + else: + tdSql.execute(f"drop database if exists {name}") + tdSql.execute(f"create database if not exists {name} precision 'ms' update 1") + tdSql.execute(f'use {name}') + + def timeTrans(self, time_value, ts_type): + if int(time_value) == 0: + ts = time.time() + else: + if ts_type == TDSmlTimestampType.MILLI_SECOND.value or ts_type == None: + ts = int(''.join(list(filter(str.isdigit, time_value))))/1000 + elif ts_type == TDSmlTimestampType.SECOND.value: + ts = int(''.join(list(filter(str.isdigit, time_value))))/1 + ulsec = repr(ts).split('.')[1][:6] + if len(ulsec) < 6 and int(ulsec) != 0: + ulsec = int(ulsec) * (10 ** (6 - len(ulsec))) + elif int(ulsec) == 0: + ulsec *= 6 + # * follow two rows added for tsCheckCase + td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + return td_ts + #td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts)) + return td_ts + #return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f")) + + def dateToTs(self, datetime_input): + return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) + + def getTdTypeValue(self, value, vtype="col"): + if vtype == "col": + if value.lower().endswith("i8"): + td_type = "TINYINT" + td_tag_value = ''.join(list(value)[:-2]) + elif value.lower().endswith("i16"): + td_type = "SMALLINT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("i32"): + td_type = "INT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("i64"): + td_type = "BIGINT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("u64"): + td_type = "BIGINT UNSIGNED" + td_tag_value = ''.join(list(value)[:-3]) + elif value.lower().endswith("f32"): + td_type = "FLOAT" + td_tag_value = ''.join(list(value)[:-3]) + td_tag_value = '{}'.format(np.float32(td_tag_value)) + elif value.lower().endswith("f64"): + td_type = "DOUBLE" + td_tag_value = ''.join(list(value)[:-3]) + if "e" in value.lower(): + td_tag_value = str(float(td_tag_value)) + elif value.lower().startswith('l"'): + td_type = "NCHAR" + td_tag_value = ''.join(list(value)[2:-1]) + elif value.startswith('"') and value.endswith('"'): + td_type = "BINARY" + td_tag_value = ''.join(list(value)[1:-1]) + elif value.lower() == "t" or value.lower() == "true": + td_type = "BOOL" + td_tag_value = "True" + elif value.lower() == "f" or value.lower() == "false": + td_type = "BOOL" + td_tag_value = "False" + elif value.isdigit(): + td_type = "DOUBLE" + td_tag_value = str(float(value)) + else: + td_type = "DOUBLE" + if "e" in value.lower(): + td_tag_value = str(float(value)) + else: + td_tag_value = value + elif vtype == "tag": + td_type = "NCHAR" + td_tag_value = str(value) + return td_type, td_tag_value + + def typeTrans(self, type_list): + type_num_list = [] + for tp in type_list: + if tp.upper() == "TIMESTAMP": + type_num_list.append(9) + elif tp.upper() == "BOOL": + type_num_list.append(1) + elif tp.upper() == "TINYINT": + type_num_list.append(2) + elif tp.upper() == "SMALLINT": + type_num_list.append(3) + elif tp.upper() == "INT": + type_num_list.append(4) + elif tp.upper() == "BIGINT": + type_num_list.append(5) + elif tp.upper() == "FLOAT": + type_num_list.append(6) + elif tp.upper() == "DOUBLE": + type_num_list.append(7) + elif tp.upper() == "BINARY": + type_num_list.append(8) + elif tp.upper() == "NCHAR": + type_num_list.append(10) + elif tp.upper() == "BIGINT UNSIGNED": + type_num_list.append(14) + return type_num_list + + def inputHandle(self, input_sql, ts_type, protocol=None): + input_sql_split_list = input_sql.split(" ") + if protocol == "telnet-tcp": + input_sql_split_list.pop(0) + stb_name = input_sql_split_list[0] + stb_tag_list = input_sql_split_list[3:] + stb_tag_list[-1] = stb_tag_list[-1].strip() + stb_col_value = input_sql_split_list[2] + ts_value = self.timeTrans(input_sql_split_list[1], ts_type) + + tag_name_list = [] + tag_value_list = [] + td_tag_value_list = [] + td_tag_type_list = [] + + col_name_list = [] + col_value_list = [] + td_col_value_list = [] + td_col_type_list = [] + + for elm in stb_tag_list: + if self.smlChildTableName_value == "ID": + if "id=" in elm.lower(): + tb_name = elm.split('=')[1] + else: + tag_name_list.append(elm.split("=")[0].lower()) + tag_value_list.append(elm.split("=")[1]) + tb_name = "" + td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + else: + if "id" == elm.split("=")[0].lower(): + tag_name_list.insert(0, elm.split("=")[0]) + tag_value_list.insert(0, elm.split("=")[1]) + td_tag_value_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + else: + tag_name_list.append(elm.split("=")[0]) + tag_value_list.append(elm.split("=")[1]) + tb_name = "" + td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1]) + td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0]) + + col_name_list.append('_value') + col_value_list.append(stb_col_value) + + td_col_value_list.append(self.getTdTypeValue(stb_col_value)[1]) + td_col_type_list.append(self.getTdTypeValue(stb_col_value)[0]) + + final_field_list = [] + final_field_list.extend(col_name_list) + final_field_list.extend(tag_name_list) + + final_type_list = [] + final_type_list.append("TIMESTAMP") + final_type_list.extend(td_col_type_list) + final_type_list.extend(td_tag_type_list) + final_type_list = self.typeTrans(final_type_list) + + final_value_list = [] + final_value_list.append(ts_value) + final_value_list.extend(td_col_value_list) + final_value_list.extend(td_tag_value_list) + return final_value_list, final_field_list, final_type_list, stb_name, tb_name + + def genFullTypeSql(self, stb_name="", tb_name="", value="", t0="", t1="127i8", t2="32767i16", t3="2147483647i32", + t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"", + t8="L\"ncharTagValue\"", ts="1626006833641", + id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None, + t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None, + chinese_tag=None, multi_field_tag=None, point_trans_tag=None, protocol=None, tcp_keyword_tag=None): + if stb_name == "": + stb_name = tdCom.getLongName(len=6, mode="letters") + if tb_name == "": + tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}' + if t0 == "": + t0 = "t" + if value == "": + value = random.choice(["f", "F", "false", "False", "t", "T", "true", "True", "TRUE", "FALSE"]) + if id_upper_tag is not None: + id = "ID" + else: + id = "id" + if id_mixul_tag is not None: + id = random.choice(["iD", "Id"]) + else: + id = "id" + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if id_noexist_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if t_add_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t9={t8}' + if id_change_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} {id}={tb_name} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if id_double_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}_1\" t0={t0} t1={t1} {id}=\"{tb_name}_2\" t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if t_add_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t11={t1} t10={t8}' + if t_mul_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}' + if id_noexist_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}' + if c_multi_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}' + if c_blank_tag is not None: + sql_seq = f'{stb_name} {ts} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}' + if t_blank_tag is not None: + sql_seq = f'{stb_name} {ts} {value}' + if chinese_tag is not None: + sql_seq = f'{stb_name} {ts} L"涛思数据" t0={t0} t1=L"涛思数据"' + if multi_field_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} {value}' + if point_trans_tag is not None: + sql_seq = f'.point.trans.test {ts} {value} t0={t0}' + if tcp_keyword_tag is not None: + sql_seq = f'put {ts} {value} t0={t0}' + if protocol == "telnet-tcp": + sql_seq = 'put ' + sql_seq + '\n' + return sql_seq, stb_name + + def genMulTagColStr(self, genType, count=1): + """ + genType must be tag/col + """ + tag_str = "" + col_str = "" + if genType == "tag": + for i in range(0, count): + if i < (count-1): + tag_str += f't{i}=f ' + else: + tag_str += f't{i}=f' + return tag_str + if genType == "col": + col_str = "t" + return col_str + + def genLongSql(self, tag_count): + stb_name = tdCom.getLongName(7, mode="letters") + tag_str = self.genMulTagColStr("tag", tag_count) + col_str = self.genMulTagColStr("col") + ts = "1626006833641" + long_sql = stb_name + ' ' + ts + ' ' + col_str + ' ' + ' ' + tag_str + return long_sql, stb_name + + def getNoIdTbName(self, stb_name, protocol=None): + query_sql = f"select tbname from {stb_name}" + tb_name = self.resHandle(query_sql, True, protocol)[0][0] + return tb_name + + def resHandle(self, query_sql, query_tag, protocol=None): + tdSql.execute('reset query cache') + if protocol == "telnet-tcp": + time.sleep(0.5) + row_info = tdSql.query(query_sql, query_tag) + col_info = tdSql.getColNameList(query_sql, query_tag) + res_row_list = [] + sub_list = [] + for row_mem in row_info: + for i in row_mem: + sub_list.append(str(i)) + res_row_list.append(sub_list) + res_field_list_without_ts = col_info[0][1:] + res_type_list = col_info[1] + return res_row_list, res_field_list_without_ts, res_type_list + + def resCmp(self, input_sql, stb_name, query_sql="select * from", condition="", ts=None, ts_type=None, id=True, none_check_tag=None, precision=None, protocol=None): + expect_list = self.inputHandle(input_sql, ts_type, protocol) + if protocol == "telnet-tcp": + tdCom.tcpClient(input_sql) + else: + if precision == None: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, ts_type) + else: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, precision) + query_sql = f"{query_sql} {stb_name} {condition}" + res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True, protocol) + if ts == 0: + res_ts = self.dateToTs(res_row_list[0][0]) + current_time = time.time() + if current_time - res_ts < 60: + tdSql.checkEqual(res_row_list[0][1:], expect_list[0][1:]) + else: + print("timeout") + tdSql.checkEqual(res_row_list[0], expect_list[0]) + else: + if none_check_tag is not None: + none_index_list = [i for i,x in enumerate(res_row_list[0]) if x=="None"] + none_index_list.reverse() + for j in none_index_list: + res_row_list[0].pop(j) + expect_list[0].pop(j) + tdSql.checkEqual(res_row_list[0], expect_list[0]) + tdSql.checkEqual(res_field_list_without_ts, expect_list[1]) + for i in range(len(res_type_list)): + tdSql.checkEqual(res_type_list[i], expect_list[2][i]) + + def initCheckCase(self, protocol=None): + """ + normal tags and cols, one for every elm + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def boolTypeCheckCase(self, protocol=None): + """ + check all normal type + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"] + for t_type in full_type_list: + input_sql, stb_name = self.genFullTypeSql(t0=t_type, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def symbolsCheckCase(self, protocol=None): + """ + check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/? + """ + ''' + please test : + binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"' + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"' + nchar_symbols = f'L{binary_symbols}' + input_sql1, stb_name1 = self.genFullTypeSql(value=binary_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol) + input_sql2, stb_name2 = self.genFullTypeSql(value=nchar_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol) + self.resCmp(input_sql1, stb_name1, protocol=protocol) + self.resCmp(input_sql2, stb_name2, protocol=protocol) + + def tsCheckCase(self): + """ + test ts list --> ["1626006833640ms", "1626006834s", "1626006822639022"] + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(ts=1626006833640) + self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value) + input_sql, stb_name = self.genFullTypeSql(ts=1626006833640) + self.resCmp(input_sql, stb_name, ts_type=None) + input_sql, stb_name = self.genFullTypeSql(ts=1626006834) + self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.SECOND.value) + + tdSql.execute(f"drop database if exists test_ts") + tdSql.execute(f"create database if not exists test_ts precision 'ms'") + tdSql.execute("use test_ts") + input_sql = ['test_ms 1626006833640 t t0=t', 'test_ms 1626006833641 f t0=t'] + self._conn.schemaless_insert(input_sql, TDSmlProtocolType.TELNET.value, None) + res = tdSql.query('select * from test_ms', True) + tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.640000") + tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.641000") + + def openTstbTelnetTsCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 0 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name, ts=0) + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006833640 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value) + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006834 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.SECOND.value) + for ts in [1, 12, 123, 1234, 12345, 123456, 1234567, 12345678, 162600683, 16260068341, 162600683412, 16260068336401]: + try: + input_sql = f'{tdCom.getLongName(len=10, mode="letters")} {ts} 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64' + self._conn.schemaless_insert(input_sql, TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def idSeqCheckCase(self, protocol=None): + """ + check id.index in tags + eg: t0=**,id=**,t1=** + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def idLetterCheckCase(self, protocol=None): + """ + check id param + eg: id and ID + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + input_sql, stb_name = self.genFullTypeSql(id_mixul_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def noIdCheckCase(self, protocol=None): + """ + id not exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True, protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + query_sql = f"select tbname from {stb_name}" + res_row_list = self.resHandle(query_sql, True)[0] + if len(res_row_list[0][0]) > 0: + tdSql.checkColNameList(res_row_list, res_row_list) + else: + tdSql.checkColNameList(res_row_list, "please check noIdCheckCase") + + def maxColTagCheckCase(self): + """ + max tag count is 128 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + for input_sql in [self.genLongSql(128)[0]]: + tdCom.cleanTb() + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + for input_sql in [self.genLongSql(129)[0]]: + tdCom.cleanTb() + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def stbTbNameCheckCase(self, protocol=None): + """ + test illegal id name + mix "`~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?" + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + rstr = list("~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?") + for i in rstr: + input_sql, stb_name = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"", protocol=protocol) + self.resCmp(input_sql, f'`{stb_name}`', protocol=protocol) + tdSql.execute(f'drop table if exists `{stb_name}`') + + def idStartWithNumCheckCase(self, protocol=None): + """ + id is start with num + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(tb_name="1aaabbb", protocol=protocol) + self.resCmp(input_sql, stb_name, protocol=protocol) + + def nowTsCheckCase(self): + """ + check now unsupported + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="now")[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def dateFormatTsCheckCase(self): + """ + check date format ts unsupported + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def illegalTsCheckCase(self): + """ + check ts format like 16260068336390us19 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="16260068336390us19")[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def tbnameCheckCase(self): + """ + check length 192 + check upper tbname + chech upper tag + length of stb_name tb_name <= 192 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + stb_name_192 = tdCom.getLongName(len=192, mode="letters") + tb_name_192 = tdCom.getLongName(len=192, mode="letters") + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name_192, tb_name=tb_name_192) + self.resCmp(input_sql, stb_name) + tdSql.query(f'select * from {stb_name}') + tdSql.checkRows(1) + if self.smlChildTableName_value == "ID": + for input_sql in [self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0], self.genFullTypeSql(tb_name=tdCom.getLongName(len=193, mode="letters"))[0]]: + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + input_sql = 'Abcdffgg 1626006833640 False T1=127i8 id=Abcddd' + else: + input_sql = self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + input_sql = 'Abcdffgg 1626006833640 False T1=127i8' + stb_name = f'`{input_sql.split(" ")[0]}`' + self.resCmp(input_sql, stb_name) + tdSql.execute('drop table `Abcdffgg`') + + def tagNameLengthCheckCase(self): + """ + check tag name limit <= 62 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tag_name = tdCom.getLongName(61, "letters") + tag_name = f'T{tag_name}' + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833640 L"bcdaaa" {tag_name}=f' + self.resCmp(input_sql, stb_name) + input_sql = f'{stb_name} 1626006833640 L"gggcdaaa" {tdCom.getLongName(65, "letters")}=f' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def tagValueLengthCheckCase(self): + """ + check full type tag value limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # nchar + # * legal nchar could not be larger than 16374/4 + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4093, "letters")}' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + + input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4094, "letters")}' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def colValueLengthCheckCase(self): + """ + check full type col value limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # i8 + for value in ["-128i8", "127i8"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-129i8", "128i8"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + # i16 + tdCom.cleanTb() + for value in ["-32768i16"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-32769i16", "32768i16"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i32 + tdCom.cleanTb() + for value in ["-2147483648i32"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-2147483649i32", "2147483648i32"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i64 + tdCom.cleanTb() + for value in ["-9223372036854775808i64"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-9223372036854775809i64", "9223372036854775808i64"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f32 + tdCom.cleanTb() + for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + # * limit set to 4028234664*(10**38) + tdCom.cleanTb() + for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f64 + tdCom.cleanTb() + for value in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + # # * limit set to 1.797693134862316*(10**308) + # tdCom.cleanTb() + # for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: + # input_sql = self.genFullTypeSql(value=value)[0] + # try: + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + # # # binary + # tdCom.cleanTb() + # stb_name = tdCom.getLongName(7, "letters") + # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t' + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + + # tdCom.cleanTb() + # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t' + # try: + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + # # nchar + # # * legal nchar could not be larger than 16374/4 + # tdCom.cleanTb() + # stb_name = tdCom.getLongName(7, "letters") + # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4093, "letters")}" t0=t' + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + + # tdCom.cleanTb() + # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4094, "letters")}" t0=t' + # try: + # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + # raise Exception("should not reach here") + # except SchemalessError as err: + # tdSql.checkNotEqual(err.errno, 0) + + def tagColIllegalValueCheckCase(self): + + """ + test illegal tag col value + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # bool + for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]: + input_sql1, stb_name = self.genFullTypeSql(t0=i) + self.resCmp(input_sql1, stb_name) + input_sql2, stb_name = self.genFullTypeSql(value=i) + self.resCmp(input_sql2, stb_name) + + # i8 i16 i32 i64 f32 f64 + for input_sql in [ + self.genFullTypeSql(value="1s2i8")[0], + self.genFullTypeSql(value="1s2i16")[0], + self.genFullTypeSql(value="1s2i32")[0], + self.genFullTypeSql(value="1s2i64")[0], + self.genFullTypeSql(value="11.1s45f32")[0], + self.genFullTypeSql(value="11.1s45f64")[0], + ]: + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + # check accepted binary and nchar symbols + # # * ~!@#$¥%^&*()-+={}|[]、「」:; + for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'): + input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc{symbol}aaa" t0=t' + input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=t t1="abc{symbol}aaa"' + self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.TELNET.value, None) + # self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.TELNET.value, None) + + def blankCheckCase(self): + ''' + check blank case + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + # input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc aaa" t0=t', + # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"', + # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"', + # f'{tdCom.getLongName(7, "letters")} 1626006833640 L"aba aa" t0=L"abcaaa3" '] + input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"', + f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"'] + for input_sql in input_sql_list: + stb_name = input_sql.split(" ")[0] + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + tdSql.query(f'select * from {stb_name}') + tdSql.checkRows(1) + + def duplicateIdTagColInsertCheckCase(self): + """ + check duplicate Id Tag Col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql_id = self.genFullTypeSql(id_double_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + input_sql = self.genFullTypeSql()[0] + input_sql_tag = input_sql.replace("t5", "t6") + try: + self._conn.schemaless_insert([input_sql_tag], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + ##### stb exist ##### + @tdCom.smlPass + def noIdStbExistCheckCase(self): + """ + case no id when stb exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f") + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", value="f") + self.resCmp(input_sql, stb_name, condition='where tbname like "t_%"') + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + def duplicateInsertExistCheckCase(self): + """ + check duplicate insert when stb exist + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + self.resCmp(input_sql, stb_name) + + @tdCom.smlPass + def tagColBinaryNcharLengthCheckCase(self): + """ + check length increase + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + tb_name = tdCom.getLongName(5, "letters") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name,t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"") + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"') + + @tdCom.smlPass + def tagColAddDupIDCheckCase(self): + """ + check tag count add, stb and tb duplicate + * tag: alter table ... + * col: when update==0 and ts is same, unchange + * so this case tag&&value will be added, + * col is added without value when update==0 + * col is added with value when update==1 + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + for db_update_tag in [0, 1]: + if db_update_tag == 1 : + self.createDb("test_update", db_update_tag=db_update_tag) + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="t", value="t") + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="t", value="f", t_add_tag=True) + if db_update_tag == 1 : + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"') + tdSql.checkData(0, 11, None) + tdSql.checkData(0, 12, None) + else: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"') + tdSql.checkData(0, 1, True) + tdSql.checkData(0, 11, None) + tdSql.checkData(0, 12, None) + self.createDb() + + @tdCom.smlPass + def tagColAddCheckCase(self): + """ + check tag count add + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f") + self.resCmp(input_sql, stb_name) + tb_name_1 = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name_1, t0="f", value="f", t_add_tag=True) + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name_1}"') + res_row_list = self.resHandle(f"select t10,t11 from {tb_name}", True)[0] + tdSql.checkEqual(res_row_list[0], ['None', 'None']) + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + + def tagMd5Check(self): + """ + condition: stb not change + insert two table, keep tag unchange, change col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(t0="f", value="f", id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + tb_name1 = self.getNoIdTbName(stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + tb_name2 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(1) + tdSql.checkEqual(tb_name1, tb_name2) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True, t_add_tag=True) + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + tb_name3 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + tdSql.checkNotEqual(tb_name1, tb_name3) + + # * tag nchar max is 16374/4, col+ts nchar max 49151 + def tagColNcharMaxLengthCheckCase(self): + """ + check nchar length limit + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833640 f t2={tdCom.getLongName(1, "letters")}' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + + # * legal nchar could not be larger than 16374/4 + input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(1, "letters")}' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(2, "letters")}' + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + def batchInsertCheckCase(self): + """ + test batch insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') + + lines = ["st123456 1626006833640 1i64 t1=3i64 t2=4f64 t3=\"t3\"", + "st123456 1626006833641 2i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64", + f'{stb_name} 1626006833642 3i64 t2=5f64 t3=L\"ste\"', + "stf567890 1626006833643 4i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64", + "st123456 1626006833644 5i64 t1=4i64 t2=5f64 t3=\"t4\"", + f'{stb_name} 1626006833645 6i64 t2=5f64 t3=L\"ste2\"', + f'{stb_name} 1626006833646 7i64 t2=5f64 t3=L\"ste2\"', + "st123456 1626006833647 8i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64", + "st123456 1626006833648 9i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64" + ] + self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.MILLI_SECOND.value) + tdSql.query('show stables') + tdSql.checkRows(3) + tdSql.query('show tables') + tdSql.checkRows(6) + tdSql.query('select * from st123456') + tdSql.checkRows(5) + + def multiInsertCheckCase(self, count): + """ + test multi insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + sql_list = [] + stb_name = tdCom.getLongName(8, "letters") + tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 nchar(10))') + for i in range(count): + input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0] + sql_list.append(input_sql) + self._conn.schemaless_insert(sql_list, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.MILLI_SECOND.value) + tdSql.query('show tables') + tdSql.checkRows(count) + + def batchErrorInsertCheckCase(self): + """ + test batch error insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + lines = ["st123456 1626006833640 3i 64 t1=3i64 t2=4f64 t3=\"t3\"", + f"{stb_name} 1626056811823316532ns tRue t2=5f64 t3=L\"ste\""] + try: + self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def multiColsInsertCheckCase(self): + """ + test multi cols insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(c_multi_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def blankColInsertCheckCase(self): + """ + test blank col insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(c_blank_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def blankTagInsertCheckCase(self): + """ + test blank tag insert + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(t_blank_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def chineseCheckCase(self): + """ + check nchar ---> chinese + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(chinese_tag=True) + self.resCmp(input_sql, stb_name) + + def multiFieldCheckCase(self): + ''' + multi_field + ''' + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(multi_field_tag=True)[0] + try: + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + raise Exception("should not reach here") + except SchemalessError as err: + tdSql.checkNotEqual(err.errno, 0) + + def spellCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + input_sql_list = [f'{stb_name}_1 1626006833640 127I8 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_2 1626006833640 32767I16 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_3 1626006833640 2147483647I32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_4 1626006833640 9223372036854775807I64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_5 1626006833640 11.12345027923584F32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_6 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_7 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_8 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_9 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64', + f'{stb_name}_10 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64'] + for input_sql in input_sql_list: + stb_name = input_sql.split(' ')[0] + self.resCmp(input_sql, stb_name) + + def pointTransCheckCase(self, protocol=None): + """ + metric value "." trans to "_" + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(point_trans_tag=True, protocol=protocol)[0] + if protocol == 'telnet-tcp': + stb_name = f'`{input_sql.split(" ")[1]}`' + else: + stb_name = f'`{input_sql.split(" ")[0]}`' + self.resCmp(input_sql, stb_name, protocol=protocol) + tdSql.execute("drop table `.point.trans.test`") + + def defaultTypeCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + input_sql_list = [f'{stb_name}_1 1626006833640 9223372036854775807 t0=f t1=127 t2=32767i16 t3=2147483647i32 t4=9223372036854775807 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_2 1626006833641 22.123456789 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_3 1626006833642 10e5F32 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10e5F64 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_4 1626006833643 10.0e5F64 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10.0e5F32 t7="vozamcts" t8=L"ncharTagValue"', \ + f'{stb_name}_5 1626006833644 -10.0e5 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=-10.0e5 t7="vozamcts" t8=L"ncharTagValue"'] + for input_sql in input_sql_list: + stb_name = input_sql.split(" ")[0] + self.resCmp(input_sql, stb_name) + + def tbnameTagsColsNameCheckCase(self): + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + if self.smlChildTableName_value == "ID": + input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + query_sql = 'select * from `rFa$sta`' + query_res = tdSql.query(query_sql, True) + tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, 'true', '127Ii8', '32767i16', '2147483647i32', '9223372036854775807i64', '11.12345f32', '22.123456789f64', '"ddzhiksj"', 'L"ncharTagValue"')]) + col_tag_res = tdSql.getColNameList(query_sql) + tdSql.checkEqual(col_tag_res, ['ts', '_value', 'tt!0', 'tt@1', 't#2', '"t$3"', 't%4', 't^5', 't&6', 't*7', 't!@#$%^&*()_+[];:<>?,9']) + tdSql.execute('drop table `rFa$sta`') + else: + input_sql = 'rFa$sta 1626006834 9223372036854775807 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + query_sql = 'select * from `rFa$sta`' + query_res = tdSql.query(query_sql, True) + tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, '2147483647i32', 'L"ncharTagValue"', '32767i16', '9223372036854775807i64', '22.123456789f64', '"ddzhiksj"', '11.12345f32', 'true', '127Ii8')]) + col_tag_res = tdSql.getColNameList(query_sql) + tdSql.checkEqual(col_tag_res, ['_ts', '_value', '"t$3"', 't!@#$%^&*()_+[];:<>?,9', 't#2', 't%4', 't&6', 't*7', 't^5', 'Tt!0', 'tT@1']) + tdSql.execute('drop table `rFa$sta`') + + def tcpKeywordsCheckCase(self, protocol="telnet-tcp"): + """ + stb = "put" + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genFullTypeSql(tcp_keyword_tag=True, protocol=protocol)[0] + stb_name = f'`{input_sql.split(" ")[1]}`' + self.resCmp(input_sql, stb_name, protocol=protocol) + + def genSqlList(self, count=5, stb_name="", tb_name=""): + """ + stb --> supertable + tb --> table + ts --> timestamp, same default + col --> column, same default + tag --> tag, same default + d --> different + s --> same + a --> add + m --> minus + """ + d_stb_d_tb_list = list() + s_stb_s_tb_list = list() + s_stb_s_tb_a_tag_list = list() + s_stb_s_tb_m_tag_list = list() + s_stb_d_tb_list = list() + s_stb_d_tb_m_tag_list = list() + s_stb_d_tb_a_tag_list = list() + s_stb_s_tb_d_ts_list = list() + s_stb_s_tb_d_ts_m_tag_list = list() + s_stb_s_tb_d_ts_a_tag_list = list() + s_stb_d_tb_d_ts_list = list() + s_stb_d_tb_d_ts_m_tag_list = list() + s_stb_d_tb_d_ts_a_tag_list = list() + for i in range(count): + d_stb_d_tb_list.append(self.genFullTypeSql(t0="f", value="f")) + s_stb_s_tb_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"')) + s_stb_s_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_add_tag=True)) + s_stb_s_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_mul_tag=True)) + s_stb_d_tb_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)) + s_stb_d_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_mul_tag=True)) + s_stb_d_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_add_tag=True)) + s_stb_s_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0)) + s_stb_s_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_mul_tag=True)) + s_stb_s_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_add_tag=True)) + s_stb_d_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0)) + s_stb_d_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_mul_tag=True)) + s_stb_d_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_add_tag=True)) + + return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_tag_list, s_stb_s_tb_m_tag_list, \ + s_stb_d_tb_list, s_stb_d_tb_m_tag_list, s_stb_d_tb_a_tag_list, s_stb_s_tb_d_ts_list, \ + s_stb_s_tb_d_ts_m_tag_list, s_stb_s_tb_d_ts_a_tag_list, s_stb_d_tb_d_ts_list, \ + s_stb_d_tb_d_ts_m_tag_list, s_stb_d_tb_d_ts_a_tag_list + + + def genMultiThreadSeq(self, sql_list): + tlist = list() + for insert_sql in sql_list: + t = threading.Thread(target=self._conn.schemaless_insert,args=([insert_sql[0]], TDSmlProtocolType.TELNET.value, None)) + tlist.append(t) + return tlist + + def multiThreadRun(self, tlist): + for t in tlist: + t.start() + for t in tlist: + t.join() + + def stbInsertMultiThreadCheckCase(self): + """ + thread input different stb + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql = self.genSqlList()[0] + print(input_sql) + self.multiThreadRun(self.genMultiThreadSeq(input_sql)) + tdSql.query(f"show tables;") + tdSql.checkRows(5) + + def sStbStbDdataInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[1] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + + def sStbStbDdataAtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, add columes and tags, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[2] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + + def sStbStbDdataMtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, minus columes and tags, result keep first data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[3] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) + if self.smlChildTableName_value == "ID": + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) + + def sStbDtbDdataInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataMtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833640 "omfdhyom" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "vqowydbc" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "plgkckpv" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "cujyqvlj" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833640 "twjxisat" t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(3) + + def sStbDtbDdataAtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data, add tag, mul col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name)[6] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbStbDdataDtsInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_list = [(f'{stb_name} 0 "hkgjiwdj" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "rljjrrul" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="bmcanhbs" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "basanglx" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enqkyvmb" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "clsajzpp" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="eivaegjk" t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "jitwseso" id={tb_name} t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="yhlwkddq" t8=L"ncharTagValue"', 'dwpthv')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + + def sStbStbDdataDtsMtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[8] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name} where t8 is not NULL") + tdSql.checkRows(6) if self.smlChildTableName_value == "ID" else tdSql.checkRows(1) + + def sStbStbDdataDtsAtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add tag, mul col + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_a_tag_list = [(f'{stb_name} 0 "clummqfy" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="hpxzrdiw" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "yqeztggb" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="gdtblmrc" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "gbkinqdk" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="iqniuvco" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "ldxxejbd" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vxkipags" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "tlvzwjes" id={tb_name} t0=true t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enwrlrtj" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + for t in ["t10", "t11"]: + tdSql.query(f"select * from {stb_name} where {t} is not NULL;") + tdSql.checkRows(0) if self.smlChildTableName_value == "ID" else tdSql.checkRows(5) + + def sStbDtbDdataDtsInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, data, ts + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataDtsMtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, data, ts, add col, mul tag + """ + tdLog.info(f'{sys._getframe().f_code.co_name}() function is running') + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "zbvwckcd" t0=True t1=126i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "vymcjfwc" t0=False t1=125i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "laumkwfn" t0=False t1=124i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "nyultzxr" t0=false t1=123i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def test(self): + try: + input_sql = f'test_nchar 0 L"涛思数据" t0=f t1=L"涛思数据" t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64' + self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None) + except SchemalessError as err: + print(err.errno) + + def runAll(self): + self.initCheckCase() + self.boolTypeCheckCase() + self.symbolsCheckCase() + self.tsCheckCase() + self.openTstbTelnetTsCheckCase() + self.idSeqCheckCase() + self.idLetterCheckCase() + self.noIdCheckCase() + self.maxColTagCheckCase() + self.stbTbNameCheckCase() + self.idStartWithNumCheckCase() + self.nowTsCheckCase() + self.dateFormatTsCheckCase() + self.illegalTsCheckCase() + self.tbnameCheckCase() + self.tagNameLengthCheckCase() + # self.tagValueLengthCheckCase() + self.colValueLengthCheckCase() + self.tagColIllegalValueCheckCase() + self.blankCheckCase() + self.duplicateIdTagColInsertCheckCase() + self.noIdStbExistCheckCase() + self.duplicateInsertExistCheckCase() + self.tagColBinaryNcharLengthCheckCase() + self.tagColAddDupIDCheckCase() + self.tagColAddCheckCase() + self.tagMd5Check() + # self.tagColNcharMaxLengthCheckCase() + # self.batchInsertCheckCase() + # self.multiInsertCheckCase(10) + self.batchErrorInsertCheckCase() + self.multiColsInsertCheckCase() + self.blankColInsertCheckCase() + self.blankTagInsertCheckCase() + self.chineseCheckCase() + self.multiFieldCheckCase() + self.spellCheckCase() + self.pointTransCheckCase() + self.defaultTypeCheckCase() + self.tbnameTagsColsNameCheckCase() + # # # MultiThreads + # self.stbInsertMultiThreadCheckCase() + # self.sStbStbDdataInsertMultiThreadCheckCase() + # self.sStbStbDdataAtInsertMultiThreadCheckCase() + # self.sStbStbDdataMtInsertMultiThreadCheckCase() + # self.sStbDtbDdataInsertMultiThreadCheckCase() + # self.sStbDtbDdataMtInsertMultiThreadCheckCase() + # self.sStbDtbDdataAtInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsInsertMultiThreadCheckCase() + # # self.sStbStbDdataDtsMtInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsAtInsertMultiThreadCheckCase() + # self.sStbDtbDdataDtsInsertMultiThreadCheckCase() + # self.sStbDtbDdataDtsMtInsertMultiThreadCheckCase() + + def run(self): + print("running {}".format(__file__)) + + try: + self.createDb() + self.runAll() + # self.createDb(protocol="telnet-tcp") + # self.initCheckCase('telnet-tcp') + # self.boolTypeCheckCase('telnet-tcp') + # self.symbolsCheckCase('telnet-tcp') + # self.idSeqCheckCase('telnet-tcp') + # self.idLetterCheckCase('telnet-tcp') + # self.noIdCheckCase('telnet-tcp') + # self.stbTbNameCheckCase('telnet-tcp') + # self.idStartWithNumCheckCase('telnet-tcp') + # self.pointTransCheckCase('telnet-tcp') + # self.tcpKeywordsCheckCase() + except Exception as err: + print(''.join(traceback.format_exception(None, err, err.__traceback__))) + raise err + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/1-insert/performanceInsert.json b/tests/system-test/1-insert/performanceInsert.json new file mode 100644 index 0000000000000000000000000000000000000000..de410c30f2fa1846d0318def447d1d09aff2cfea --- /dev/null +++ b/tests/system-test/1-insert/performanceInsert.json @@ -0,0 +1,79 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos/", + "host": "test216", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 8, + "thread_count_create_tbl": 8, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 1000, + "num_of_records_per_req": 100000, + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "vgroups": 24 + }, + "super_tables": [ + { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 100000, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "batch_create_tbl_num": 50000, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 5, + "interlace_rows": 100000, + "insert_interval": 0, + "max_sql_len": 10000000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2022-05-01 00:00:00.000", + "sample_format": "csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + }, + { + "type": "TINYINT", + "count": 1 + }, + {"type": "DOUBLE"}, + + { + "type": "BINARY", + "len": 40, + "count": 1 + }, + { + "type": "nchar", + "len": 20, + "count": 1 + } + ], + "tags": [ + { + "type": "TINYINT", + "count": 1 + }, + { + "type": "BINARY", + "len": 16, + "count": 1 + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/system-test/1-insert/performanceQuery.json b/tests/system-test/1-insert/performanceQuery.json new file mode 100644 index 0000000000000000000000000000000000000000..fe2991bd0f5f74401b437e24b6a6f8e4cd5ed721 --- /dev/null +++ b/tests/system-test/1-insert/performanceQuery.json @@ -0,0 +1,42 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "test216", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 100, + "query_mode": "taosc", + "specified_table_query": { + "query_interval": 0, + "threads": 8, + "sqls": [ + { + "sql": "select count(*) from stb_0 ", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb_1 ", + "result": "./query_res1.txt" + }, + { + "sql": "select last(*) from stb_2 ", + "result": "./query_res2.txt" + }, + { + "sql": "select first(*) from stb_3 ", + "result": "./query_res3.txt" + }, + { + "sql": "select avg(c0),min(c2),max(c1) from stb_4", + "result": "./query_res4.txt" + }, + { + "sql": "select avg(c0),min(c2),max(c1) from stb_5 where ts <= '2022-05-01 20:00:00.500' and ts >= '2022-05-01 00:00:00.000' ", + "result": "./query_res5.txt" + } + ] + } +} \ No newline at end of file diff --git a/tests/system-test/1-insert/test_stmt_insert_query.py b/tests/system-test/1-insert/test_stmt_insert_query_ex.py similarity index 88% rename from tests/system-test/1-insert/test_stmt_insert_query.py rename to tests/system-test/1-insert/test_stmt_insert_query_ex.py index c6faedd35ee9f08e50310e5570a9be284d16ecc4..376b60d615941323bedcf40d591817e30c8da05a 100644 --- a/tests/system-test/1-insert/test_stmt_insert_query.py +++ b/tests/system-test/1-insert/test_stmt_insert_query_ex.py @@ -132,11 +132,11 @@ class TDTestCase: querystmt.bind_param(queryparam) querystmt.execute() result=querystmt.use_result() - rows=result.fetch_all() - print( querystmt.use_result()) + # rows=result.fetch_all() + # print( querystmt.use_result()) # result = conn.query("select * from log") - # rows=result.fetch_all() + rows=result.fetch_all() # rows=result.fetch_all() print(rows) assert rows[1][0] == "ts" @@ -213,7 +213,7 @@ class TDTestCase: params[11].float([3, None, 1]) params[12].double([3, None, 1.2]) params[13].binary(["abc", "dddafadfadfadfadfa", None]) - params[14].nchar(["涛思数据", None, "a long string with 中文字符"]) + params[14].nchar(["涛思数据", None, "a? long string with 中文字符"]) params[15].timestamp([None, None, 1626861392591]) stmt.bind_param_batch(params) @@ -230,9 +230,31 @@ class TDTestCase: querystmt1.execute() result1=querystmt1.use_result() rows1=result1.fetch_all() - assert str(rows1[0][0]) == "2021-07-21 17:56:32.589111" - assert rows1[0][10] == 3 - assert rows1[1][10] == 4 + print("1",rows1) + + querystmt2=conn.statement("select abs(?) from log where bu < ?") + queryparam2=new_bind_params(2) + print(type(queryparam2)) + queryparam2[0].int(5) + queryparam2[1].int(5) + querystmt2.bind_param(queryparam2) + querystmt2.execute() + result2=querystmt2.use_result() + rows2=result2.fetch_all() + print("2",rows2) + + querystmt3=conn.statement("select abs(?) from log where nn= 'a? long string with 中文字符' ") + queryparam3=new_bind_params(1) + print(type(queryparam3)) + queryparam3[0].int(5) + querystmt3.bind_param(queryparam3) + querystmt3.execute() + result3=querystmt3.use_result() + rows3=result3.fetch_all() + print("3",rows3) + # assert str(rows1[0][0]) == "2021-07-21 17:56:32.589111" + # assert rows1[0][10] == 3 + # assert rows1[1][10] == 4 # conn.execute("drop database if exists %s" % dbname) conn.close() @@ -247,7 +269,6 @@ class TDTestCase: config = buildPath+ "../sim/dnode1/cfg/" host="localhost" connectstmt=self.newcon(host,config) - print(connectstmt) self.test_stmt_insert_multi(connectstmt) connectstmt=self.newcon(host,config) self.test_stmt_set_tbname_tag(connectstmt) diff --git a/tests/system-test/1-insert/test_stmt_muti_insert_query.py b/tests/system-test/1-insert/test_stmt_muti_insert_query.py new file mode 100644 index 0000000000000000000000000000000000000000..486bcd806219c73fa344e5422727c46fe03cde5e --- /dev/null +++ b/tests/system-test/1-insert/test_stmt_muti_insert_query.py @@ -0,0 +1,181 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import threading as thd +import multiprocessing as mp +from numpy.lib.function_base import insert +import taos +from taos import * +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import datetime as dt +from datetime import datetime +from ctypes import * +import time +# constant define +WAITS = 5 # wait seconds + +class TDTestCase: + # + # --------------- main frame ------------------- + def caseDescription(self): + ''' + limit and offset keyword function test cases; + case1: limit offset base function test + case2: offset return valid + ''' + return + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # init + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + # tdSql.prepare() + # self.create_tables(); + self.ts = 1500000000000 + + # stop + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + + # --------------- case ------------------- + + + def newcon(self,host,cfg): + user = "root" + password = "taosdata" + port =6030 + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + print(con) + return con + + def test_stmt_insert_multi(self,conn): + # type: (TaosConnection) -> None + + dbname = "pytest_taos_stmt_multi" + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) + conn.select_db(dbname) + + conn.execute( + "create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\ + bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)", + ) + # conn.load_table_info("log") + + start = datetime.now() + stmt = conn.statement("insert into log values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") + + params = new_multi_binds(16) + params[0].timestamp((1626861392589, 1626861392590, 1626861392591)) + params[1].bool((True, None, False)) + params[2].tinyint([-128, -128, None]) # -128 is tinyint null + params[3].tinyint([0, 127, None]) + params[4].smallint([3, None, 2]) + params[5].int([3, 4, None]) + params[6].bigint([3, 4, None]) + params[7].tinyint_unsigned([3, 4, None]) + params[8].smallint_unsigned([3, 4, None]) + params[9].int_unsigned([3, 4, None]) + params[10].bigint_unsigned([3, 4, None]) + params[11].float([3, None, 1]) + params[12].double([3, None, 1.2]) + params[13].binary(["abc", "dddafadfadfadfadfa", None]) + params[14].nchar(["涛思数据", None, "a long string with 中文字符"]) + params[15].timestamp([None, None, 1626861392591]) + # print(type(stmt)) + stmt.bind_param_batch(params) + stmt.execute() + end = datetime.now() + print("elapsed time: ", end - start) + assert stmt.affected_rows == 3 + + #query + querystmt=conn.statement("select ?,bu from log") + queryparam=new_bind_params(1) + print(type(queryparam)) + queryparam[0].binary("ts") + querystmt.bind_param(queryparam) + querystmt.execute() + result=querystmt.use_result() + # rows=result.fetch_all() + # print( querystmt.use_result()) + + # result = conn.query("select * from log") + rows=result.fetch_all() + # rows=result.fetch_all() + print(rows) + assert rows[1][0] == "ts" + assert rows[0][1] == 3 + + #query + querystmt1=conn.statement("select * from log where bu < ?") + queryparam1=new_bind_params(1) + print(type(queryparam1)) + queryparam1[0].int(4) + querystmt1.bind_param(queryparam1) + querystmt1.execute() + result1=querystmt1.use_result() + rows1=result1.fetch_all() + print(rows1) + assert str(rows1[0][0]) == "2021-07-21 17:56:32.589000" + assert rows1[0][10] == 3 + + + stmt.close() + + # conn.execute("drop database if exists %s" % dbname) + conn.close() + + except Exception as err: + # conn.execute("drop database if exists %s" % dbname) + conn.close() + raise err + + def run(self): + buildPath = self.getBuildPath() + config = buildPath+ "../sim/dnode1/cfg/" + host="localhost" + connectstmt=self.newcon(host,config) + self.test_stmt_insert_multi(connectstmt) + return + + +# add case with filename +# +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/1-insert/test_stmt_set_tbname_tag.py b/tests/system-test/1-insert/test_stmt_set_tbname_tag.py new file mode 100644 index 0000000000000000000000000000000000000000..54d5cfbafb0b3f98d55f310accccb19ef693c08b --- /dev/null +++ b/tests/system-test/1-insert/test_stmt_set_tbname_tag.py @@ -0,0 +1,176 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import threading as thd +import multiprocessing as mp +from numpy.lib.function_base import insert +import taos +from taos import * +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import datetime as dt +from datetime import datetime +from ctypes import * +import time +# constant define +WAITS = 5 # wait seconds + +class TDTestCase: + # + # --------------- main frame ------------------- + def caseDescription(self): + ''' + limit and offset keyword function test cases; + case1: limit offset base function test + case2: offset return valid + ''' + return + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # init + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + # tdSql.prepare() + # self.create_tables(); + self.ts = 1500000000000 + + # stop + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + + # --------------- case ------------------- + + + def newcon(self,host,cfg): + user = "root" + password = "taosdata" + port =6030 + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + print(con) + return con + + def test_stmt_set_tbname_tag(self,conn): + dbname = "pytest_taos_stmt_set_tbname_tag" + + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s PRECISION 'us' " % dbname) + conn.select_db(dbname) + conn.execute("create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\ + bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(100), nn nchar(100), tt timestamp , vc varchar(100)) tags (t1 timestamp, t2 bool,\ + t3 tinyint, t4 tinyint, t5 smallint, t6 int, t7 bigint, t8 tinyint unsigned, t9 smallint unsigned, \ + t10 int unsigned, t11 bigint unsigned, t12 float, t13 double, t14 binary(100), t15 nchar(100), t16 timestamp)") + + stmt = conn.statement("insert into ? using log tags (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \ + values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + tags = new_bind_params(16) + tags[0].timestamp(1626861392589123, PrecisionEnum.Microseconds) + tags[1].bool(True) + tags[2].null() + tags[3].tinyint(2) + tags[4].smallint(3) + tags[5].int(4) + tags[6].bigint(5) + tags[7].tinyint_unsigned(6) + tags[8].smallint_unsigned(7) + tags[9].int_unsigned(8) + tags[10].bigint_unsigned(9) + tags[11].float(10.1) + tags[12].double(10.11) + tags[13].binary("hello") + tags[14].nchar("stmt") + tags[15].timestamp(1626861392589, PrecisionEnum.Milliseconds) + stmt.set_tbname_tags("tb1", tags) + params = new_multi_binds(16) + params[0].timestamp((1626861392589111, 1626861392590111, 1626861392591111)) + params[1].bool((True, None, False)) + params[2].tinyint([-128, -128, None]) # -128 is tinyint null + params[3].tinyint([0, 127, None]) + params[4].smallint([3, None, 2]) + params[5].int([3, 4, None]) + params[6].bigint([3, 4, None]) + params[7].tinyint_unsigned([3, 4, None]) + params[8].smallint_unsigned([3, 4, None]) + params[9].int_unsigned([3, 4, None]) + params[10].bigint_unsigned([3, 4, 5]) + params[11].float([3, None, 1]) + params[12].double([3, None, 1.2]) + params[13].binary(["abc", "dddafadfadfadfadfa", None]) + params[14].nchar(["涛思数据", None, "a long string with 中文字符"]) + params[15].timestamp([None, None, 1626861392591]) + params[16].binary(["涛思数据16", None, "a long string with 中文-字符"]) + + stmt.bind_param_batch(params) + stmt.execute() + + assert stmt.affected_rows == 3 + + #query + querystmt1=conn.statement("select * from log where bu < ?") + queryparam1=new_bind_params(1) + print(type(queryparam1)) + queryparam1[0].int(5) + querystmt1.bind_param(queryparam1) + querystmt1.execute() + result1=querystmt1.use_result() + rows1=result1.fetch_all() + print(rows1) + # assert str(rows1[0][0]) == "2021-07-21 17:56:32.589111" + # assert rows1[0][10] == 3 + # assert rows1[1][10] == 4 + + # conn.execute("drop database if exists %s" % dbname) + conn.close() + + except Exception as err: + # conn.execute("drop database if exists %s" % dbname) + conn.close() + raise err + + def run(self): + buildPath = self.getBuildPath() + config = buildPath+ "../sim/dnode1/cfg/" + host="localhost" + connectstmt=self.newcon(host,config) + self.test_stmt_set_tbname_tag(connectstmt) + + return + + +# add case with filename +# +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/apercentile.py b/tests/system-test/2-query/apercentile.py new file mode 100644 index 0000000000000000000000000000000000000000..150c4d3f17e30ab5f4d25fb19af2bb80ee202776 --- /dev/null +++ b/tests/system-test/2-query/apercentile.py @@ -0,0 +1,107 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.rowNum = 10 + self.ts = 1537146000000 + + def check_apercentile(self,data,expect_data,param,percent,column): + if param == "default": + if abs((expect_data-data) <= expect_data * 0.2): + tdLog.info(f"apercentile function values check success with col{column}, param = {param},percent = {percent}") + else: + tdLog.notice(f"apercentile function value has not as expected with col{column}, param = {param},percent = {percent}") + sys.exit(1) + elif param == "t-digest": + if abs((expect_data-data) <= expect_data * 0.2): + tdLog.info(f"apercentile function values check success with col{column}, param = {param},percent = {percent}") + else: + tdLog.notice(f"apercentile function value has not as expected with col{column}, param = {param},percent = {percent}") + sys.exit(1) + + def run(self): + tdSql.prepare() + + intData = [] + floatData = [] + percent_list = [0,50,100] + param_list = ['default','t-digest'] + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + for i in range(self.rowNum): + tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + intData.append(i + 1) + floatData.append(i + 0.1) + + # percentile verifacation + + tdSql.error("select apercentile(ts ,20) from test") + tdSql.error("select apercentile(col7 ,20) from test") + tdSql.error("select apercentile(col8 ,20) from test") + tdSql.error("select apercentile(col9 ,20) from test") + + column_list = [1,2,3,4,5,6,11,12,13,14] + + for i in column_list: + for j in percent_list: + for k in param_list: + tdSql.query(f"select apercentile(col{i},{j},'{k}') from test") + data = tdSql.getData(0, 0) + tdSql.query(f"select percentile(col{i},{j}) from test") + expect_data = tdSql.getData(0, 0) + self.check_apercentile(data,expect_data,k,j,i) + + error_param_list = [-1,101,'"a"'] + for i in error_param_list: + tdSql.error(f'select apercentile(col1,{i}) from test') + + tdSql.execute("create table meters (ts timestamp, voltage int) tags(loc nchar(20))") + tdSql.execute("create table t0 using meters tags('beijing')") + tdSql.execute("create table t1 using meters tags('shanghai')") + for i in range(self.rowNum): + tdSql.execute("insert into t0 values(%d, %d)" % (self.ts + i, i + 1)) + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i, i + 1)) + + column_list = ['voltage'] + for i in column_list: + for j in percent_list: + for k in param_list: + tdSql.query(f"select apercentile({i}, {j},'{k}') from t0") + data = tdSql.getData(0, 0) + tdSql.query(f"select percentile({i},{j}) from t0") + expect_data = tdSql.getData(0,0) + self.check_apercentile(data,expect_data,k,j,i) + tdSql.query(f"select apercentile({i}, {j},'{k}') from meters") + tdSql.checkRows(1) + table_list = ["meters","t0"] + for i in error_param_list: + for j in table_list: + for k in column_list: + tdSql.error(f'select apercentile({k},{i}) from {j}') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/avg.py b/tests/system-test/2-query/avg.py new file mode 100644 index 0000000000000000000000000000000000000000..20ee6df7fcf94e3b02641b735c6ad7fd1ce862ff --- /dev/null +++ b/tests/system-test/2-query/avg.py @@ -0,0 +1,424 @@ +import taos +import sys +import datetime +import inspect + +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), True) + + def prepare_datas(self): + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + def check_avg(self ,origin_query , check_query): + avg_result = tdSql.getResult(origin_query) + origin_result = tdSql.getResult(check_query) + + check_status = True + for row_index , row in enumerate(avg_result): + for col_index , elem in enumerate(row): + if avg_result[row_index][col_index] != origin_result[row_index][col_index]: + check_status = False + if not check_status: + tdLog.notice("avg function value has not as expected , sql is \"%s\" "%origin_query ) + sys.exit(1) + else: + tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query ) + + def test_errors(self): + error_sql_lists = [ + "select avg from t1", + # "select avg(-+--+c1) from t1", + # "select +-avg(c1) from t1", + # "select ++-avg(c1) from t1", + # "select ++--avg(c1) from t1", + # "select - -avg(c1)*0 from t1", + # "select avg(tbname+1) from t1 ", + "select avg(123--123)==1 from t1", + "select avg(c1) as 'd1' from t1", + "select avg(c1 ,c2 ) from t1", + "select avg(c1 ,NULL) from t1", + "select avg(,) from t1;", + "select avg(avg(c1) ab from t1)", + "select avg(c1) as int from t1", + "select avg from stb1", + # "select avg(-+--+c1) from stb1", + # "select +-avg(c1) from stb1", + # "select ++-avg(c1) from stb1", + # "select ++--avg(c1) from stb1", + # "select - -avg(c1)*0 from stb1", + # "select avg(tbname+1) from stb1 ", + "select avg(123--123)==1 from stb1", + "select avg(c1) as 'd1' from stb1", + "select avg(c1 ,c2 ) from stb1", + "select avg(c1 ,NULL) from stb1", + "select avg(,) from stb1;", + "select avg(avg(c1) ab from stb1)", + "select avg(c1) as int from stb1" + ] + for error_sql in error_sql_lists: + tdSql.error(error_sql) + + def support_types(self): + type_error_sql_lists = [ + "select avg(ts) from t1" , + "select avg(c7) from t1", + "select avg(c8) from t1", + "select avg(c9) from t1", + "select avg(ts) from ct1" , + "select avg(c7) from ct1", + "select avg(c8) from ct1", + "select avg(c9) from ct1", + "select avg(ts) from ct3" , + "select avg(c7) from ct3", + "select avg(c8) from ct3", + "select avg(c9) from ct3", + "select avg(ts) from ct4" , + "select avg(c7) from ct4", + "select avg(c8) from ct4", + "select avg(c9) from ct4", + "select avg(ts) from stb1" , + "select avg(c7) from stb1", + "select avg(c8) from stb1", + "select avg(c9) from stb1" , + + "select avg(ts) from stbbb1" , + "select avg(c7) from stbbb1", + + "select avg(ts) from tbname", + "select avg(c9) from tbname" + + ] + + for type_sql in type_error_sql_lists: + tdSql.error(type_sql) + + + type_sql_lists = [ + "select avg(c1) from t1", + "select avg(c2) from t1", + "select avg(c3) from t1", + "select avg(c4) from t1", + "select avg(c5) from t1", + "select avg(c6) from t1", + + "select avg(c1) from ct1", + "select avg(c2) from ct1", + "select avg(c3) from ct1", + "select avg(c4) from ct1", + "select avg(c5) from ct1", + "select avg(c6) from ct1", + + "select avg(c1) from ct3", + "select avg(c2) from ct3", + "select avg(c3) from ct3", + "select avg(c4) from ct3", + "select avg(c5) from ct3", + "select avg(c6) from ct3", + + "select avg(c1) from stb1", + "select avg(c2) from stb1", + "select avg(c3) from stb1", + "select avg(c4) from stb1", + "select avg(c5) from stb1", + "select avg(c6) from stb1", + + "select avg(c6) as alisb from stb1", + "select avg(c6) alisb from stb1", + ] + + for type_sql in type_sql_lists: + tdSql.query(type_sql) + + def basic_avg_function(self): + + # basic query + tdSql.query("select c1 from ct3") + tdSql.checkRows(0) + tdSql.query("select c1 from t1") + tdSql.checkRows(12) + tdSql.query("select c1 from stb1") + tdSql.checkRows(25) + + # used for empty table , ct3 is empty + tdSql.query("select avg(c1) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c2) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c3) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c4) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c5) from ct3") + tdSql.checkRows(0) + tdSql.query("select avg(c6) from ct3") + + # used for regular table + tdSql.query("select avg(c1) from t1") + tdSql.checkData(0, 0, 5.000000000) + + + tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1") + tdSql.checkData(1, 5, 1.11000) + tdSql.checkData(3, 4, 33) + tdSql.checkData(5, 5, None) + self.check_avg(" select avg(c1) , avg(c2) , avg(c3) from t1 " , " select sum(c1)/count(c1) , sum(c2)/count(c2) , sum(c3)/count(c3) from t1 ") + + # used for sub table + tdSql.query("select avg(c1) from ct1") + tdSql.checkData(0, 0, 4.846153846) + + tdSql.query("select avg(c1) from ct3") + tdSql.checkRows(0) + + self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from t1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from t1 ") + self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from stb1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from stb1 ") + + # used for stable table + + tdSql.query("select avg(c1) from stb1") + tdSql.checkRows(1) + + self.check_avg(" select avg(abs(ceil(c1))) , avg(abs(ceil(c2))) , avg(abs(ceil(c3))) from stb1 " , " select sum(abs(ceil(c1)))/count(c1) , sum(abs(ceil(c2)))/count(c2) , sum(abs(ceil(c3)))/count(c3) from stb1 ") + + # used for not exists table + tdSql.error("select avg(c1) from stbbb1") + tdSql.error("select avg(c1) from tbname") + tdSql.error("select avg(c1) from ct5") + + # mix with common col + tdSql.error("select c1, avg(c1) from ct1") + tdSql.error("select c1, avg(c1) from ct4") + + + # mix with common functions + tdSql.error("select c1, avg(c1),c5, floor(c5) from ct4 ") + tdSql.error("select c1, avg(c1),c5, floor(c5) from stb1 ") + + # mix with agg functions , not support + tdSql.error("select c1, avg(c1),c5, count(c5) from stb1 ") + tdSql.error("select c1, avg(c1),c5, count(c5) from ct1 ") + tdSql.error("select c1, count(c5) from ct1 ") + tdSql.error("select c1, count(c5) from stb1 ") + + # agg functions mix with agg functions + + tdSql.query(" select max(c5), count(c5) , avg(c5) from stb1 ") + tdSql.checkData(0, 0, 8.88000 ) + tdSql.checkData(0, 1, 22 ) + tdSql.checkData(0, 2, 2.270454591 ) + + tdSql.query(" select max(c5), count(c5) , avg(c5) ,elapsed(ts) , spread(c1) from ct1; ") + tdSql.checkData(0, 0, 8.88000 ) + tdSql.checkData(0, 1, 13 ) + tdSql.checkData(0, 2, 0.768461603 ) + + # bug fix for count + tdSql.query("select count(c1) from ct4 ") + tdSql.checkData(0,0,9) + tdSql.query("select count(*) from ct4 ") + tdSql.checkData(0,0,12) + tdSql.query("select count(c1) from stb1 ") + tdSql.checkData(0,0,22) + tdSql.query("select count(*) from stb1 ") + tdSql.checkData(0,0,25) + + # bug fix for compute + tdSql.error("select c1, avg(c1) -0 ,ceil(c1)-0 from ct4 ") + tdSql.error(" select c1, avg(c1) -0 ,avg(ceil(c1-0.1))-0.1 from ct4") + + # mix with nest query + self.check_avg("select avg(col) from (select abs(c1) col from stb1)" , "select avg(abs(c1)) from stb1") + self.check_avg("select avg(col) from (select ceil(abs(c1)) col from stb1)" , "select avg(abs(c1)) from stb1") + + tdSql.query(" select abs(avg(abs(abs(c1)))) from stb1 ") + tdSql.checkData(0, 0, 4.500000000) + tdSql.query(" select abs(avg(abs(abs(c1)))) from t1 ") + tdSql.checkData(0, 0, 5.000000000) + + tdSql.query(" select abs(avg(abs(abs(c1)))) from stb1 ") + tdSql.checkData(0, 0, 4.500000000) + + tdSql.query(" select avg(c1) from stb1 where c1 is null ") + tdSql.checkRows(0) + + + def avg_func_filter(self): + tdSql.execute("use db") + tdSql.query(" select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from ct4 where c1>5 ") + tdSql.checkRows(1) + tdSql.checkData(0,0,7.000000000) + tdSql.checkData(0,1,7.000000000) + tdSql.checkData(0,2,7.000000000) + tdSql.checkData(0,3,6.900000000) + tdSql.checkData(0,4,3.000000000) + + tdSql.query("select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from ct4 where c1=5 ") + tdSql.checkRows(1) + tdSql.checkData(0,0,5.000000000) + tdSql.checkData(0,1,5.000000000) + tdSql.checkData(0,2,5.000000000) + tdSql.checkData(0,3,4.900000000) + tdSql.checkData(0,4,2.000000000) + + tdSql.query("select avg(c1) ,avg(c2) , avg(c1) -0 , avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2))-0.5) from ct4 where c1>log(c1,2) limit 1 ") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 4.500000000) + tdSql.checkData(0, 1, 49999.500000000) + tdSql.checkData(0, 5, 1.625000000) + + def avg_Arithmetic(self): + pass + + def check_boundary_values(self): + + tdSql.execute("drop database if exists bound_test") + tdSql.execute("create database if not exists bound_test") + time.sleep(3) + tdSql.execute("use bound_test") + tdSql.execute( + "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + ) + tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute( + f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + tdSql.execute( + f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483645, 9223372036854775805, 32765, 125, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483644, 9223372036854775804, 32764, 124, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + + tdSql.error( + f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + self.check_avg("select avg(c1), avg(c2), avg(c3) , avg(c4), avg(c5) ,avg(c6) from sub1_bound " , " select sum(c1)/count(c1), sum(c2)/count(c2) ,sum(c3)/count(c3), sum(c4)/count(c4), sum(c5)/count(c5) ,sum(c6)/count(c6) from sub1_bound ") + + + # check basic elem for table per row + tdSql.query("select avg(c1) ,avg(c2) , avg(c3) , avg(c4), avg(c5), avg(c6) from sub1_bound ") + tdSql.checkRows(1) + tdSql.checkData(0,0,920350133.571428537) + tdSql.checkData(0,1,1.3176245766935393e+18) + tdSql.checkData(0,2,14042.142857143) + tdSql.checkData(0,3,53.571428571) + tdSql.checkData(0,4,5.828571332045761e+37) + # tdSql.checkData(0,5,None) + + + # check + - * / in functions + tdSql.query(" select avg(c1+1) ,avg(c2) , avg(c3*1) , avg(c4/2), avg(c5)/2, avg(c6) from sub1_bound ") + tdSql.checkData(0,0,920350134.5714285) + tdSql.checkData(0,1,1.3176245766935393e+18) + tdSql.checkData(0,2,14042.142857143) + tdSql.checkData(0,3,26.785714286) + tdSql.checkData(0,4,2.9142856660228804e+37) + # tdSql.checkData(0,5,None) + + + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table ==============") + + self.prepare_datas() + + tdLog.printNoPrefix("==========step2:test errors ==============") + + self.test_errors() + + tdLog.printNoPrefix("==========step3:support types ============") + + self.support_types() + + tdLog.printNoPrefix("==========step4: avg basic query ============") + + self.basic_avg_function() + + tdLog.printNoPrefix("==========step5: avg boundary query ============") + + self.check_boundary_values() + + tdLog.printNoPrefix("==========step6: avg filter query ============") + + self.avg_func_filter() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/bottom.py b/tests/system-test/2-query/bottom.py index 96ae73c6c46fc447692f8e3eec93bfa668f24887..a4390372dfa13ae4d6db6e545fc472b0395aed53 100644 --- a/tests/system-test/2-query/bottom.py +++ b/tests/system-test/2-query/bottom.py @@ -80,6 +80,9 @@ class TDTestCase: tdSql.checkRows(2) tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + tdSql.query("select bottom(col13,50) from test") + tdSql.checkRows(10) + tdSql.query("select bottom(col14, 2) from test") tdSql.checkRows(2) tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) @@ -91,6 +94,7 @@ class TDTestCase: tdSql.query('select bottom(col2,1) from test interval(1y) order by col2') tdSql.checkData(0,0,1) + tdSql.error('select * from test where bottom(col2,1)=1') diff --git a/tests/system-test/2-query/csum.py b/tests/system-test/2-query/csum.py new file mode 100644 index 0000000000000000000000000000000000000000..a331311fd2e841da5fd4f6da86ccb27834fcbc69 --- /dev/null +++ b/tests/system-test/2-query/csum.py @@ -0,0 +1,428 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def csum_query_form(self, col="c1", alias="", table_expr="t1", condition=""): + + ''' + csum function: + :param col: string, column name, required parameters; + :param alias: string, result column another name,or add other funtion; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)',or give result column another name, like 'c2' + :return: csum query statement,default: select csum(c1) from t1 + ''' + + return f"select csum({col}) {alias} from {table_expr} {condition}" + + def checkcsum(self,col="c1", alias="", table_expr="t1", condition="" ): + line = sys._getframe().f_back.f_lineno + pre_sql = self.csum_query_form( + col=col, table_expr=table_expr, condition=condition + ).replace("csum", "count") + tdSql.query(pre_sql) + + if tdSql.queryRows == 0: + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + if "order by tbname" in condition: + tdSql.error(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + return + + if "group" in condition: + + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition) + + tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}") + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + print("data is ", pre_data) + pre_csum = np.cumsum(pre_data) + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for j in range(len(pre_csum)): + print(f"case in {line}:", end='') + tdSql.checkData(pre_row+j, 1, pre_csum[j]) + pre_row += len(pre_csum) + return + elif "union" in condition: + union_sql_0 = self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + union_csum_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + union_csum_1 = tdSql.queryResult + + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if i < row_union_0: + tdSql.checkData(i, 0, union_csum_0[i][0]) + else: + tdSql.checkData(i, 0, union_csum_1[i-row_union_0][0]) + return + + else: + tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_csum = np.cumsum(pre_result)[offset_val:] + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if pre_csum[i] >1.7e+308 or pre_csum[i] < -1.7e+308: + continue + else: + tdSql.checkData(i, 0, pre_csum[i]) + + pass + + def csum_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checkcsum() + case2 = {"col": "c2"} + self.checkcsum(**case2) + case3 = {"col": "c5"} + self.checkcsum(**case3) + case4 = {"col": "c7"} + self.checkcsum(**case4) + case5 = {"col": "c8"} + self.checkcsum(**case5) + case6 = {"col": "c9"} + self.checkcsum(**case6) + + # case7~8: nested query + # case7 = {"table_expr": "(select c1 from stb1)"} + # self.checkcsum(**case7) + # case8 = {"table_expr": "(select csum(c1) c1 from stb1 group by tbname)"} + # self.checkcsum(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checkcsum(**case9) + # case10 = {"alias": ", _c0"} + # self.checkcsum(**case10) + # case11 = {"alias": ", st1"} + # self.checkcsum(**case11) + # case12 = {"alias": ", c1"} + # self.checkcsum(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checkcsum(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checkcsum(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checkcsum(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checkcsum(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checkcsum(**case17) + # case18~19: with group by + # case18 = { + # "table_expr": "t1", + # "condition": "group by c6" + # } + # self.checkcsum(**case18) + # case19 = { + # "table_expr": "stb1", + # "condition": "partition by tbname" # partition by tbname + # } + # self.checkcsum(**case19) + + # # case20~21: with order by + # case20 = {"condition": "order by ts"} + # self.checkcsum(**case20) + + # # case22: with union + # case22 = { + # "condition": "union all select csum(c1) from t2" + # } + # self.checkcsum(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checkcsum(**case23) + # case24 = { + # "table_expr": "stb1", + # "condition": "group by tbname slimit 1 soffset 1" + # } + # self.checkcsum(**case24) + + pass + + def csum_error_query(self) -> None : + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.csum_query_form(col="")) # no col + tdSql.error("csum(c1) from stb1") # no select + tdSql.error("select csum from t1") # no csum condition + tdSql.error("select csum c1 from t1") # no brackets + tdSql.error("select csum(c1) t1") # no from + tdSql.error("select csum( c1 ) from ") # no table_expr + # tdSql.error(self.csum_query_form(col="st1")) # tag col + tdSql.error(self.csum_query_form(col=1)) # col is a value + tdSql.error(self.csum_query_form(col="'c1'")) # col is a string + tdSql.error(self.csum_query_form(col=None)) # col is NULL 1 + tdSql.error(self.csum_query_form(col="NULL")) # col is NULL 2 + tdSql.error(self.csum_query_form(col='""')) # col is "" + tdSql.error(self.csum_query_form(col='c%')) # col is spercial char 1 + tdSql.error(self.csum_query_form(col='c_')) # col is spercial char 2 + tdSql.error(self.csum_query_form(col='c.')) # col is spercial char 3 + tdSql.error(self.csum_query_form(col='c3')) # timestamp col + tdSql.error(self.csum_query_form(col='ts')) # Primary key + tdSql.error(self.csum_query_form(col='avg(c1)')) # expr col + tdSql.error(self.csum_query_form(col='c6')) # bool col + tdSql.error(self.csum_query_form(col='c4')) # binary col + tdSql.error(self.csum_query_form(col='c10')) # nachr col + tdSql.error(self.csum_query_form(col='c10')) # not table_expr col + tdSql.error(self.csum_query_form(col='t1')) # tbname + tdSql.error(self.csum_query_form(col='stb1')) # stbname + tdSql.error(self.csum_query_form(col='db')) # datbasename + tdSql.error(self.csum_query_form(col=True)) # col is BOOL 1 + tdSql.error(self.csum_query_form(col='True')) # col is BOOL 2 + tdSql.error(self.csum_query_form(col='*')) # col is all col + tdSql.error("select csum[c1] from t1") # sql form error 1 + tdSql.error("select csum{c1} from t1") # sql form error 2 + tdSql.error(self.csum_query_form(col="[c1]")) # sql form error 3 + # tdSql.error(self.csum_query_form(col="c1, c2")) # sql form error 3 + # tdSql.error(self.csum_query_form(col="c1, 2")) # sql form error 3 + tdSql.error(self.csum_query_form(alias=", count(c1)")) # mix with aggregate function 1 + tdSql.error(self.csum_query_form(alias=", avg(c1)")) # mix with aggregate function 2 + tdSql.error(self.csum_query_form(alias=", min(c1)")) # mix with select function 1 + tdSql.error(self.csum_query_form(alias=", top(c1, 5)")) # mix with select function 2 + tdSql.error(self.csum_query_form(alias=", spread(c1)")) # mix with calculation function 1 + tdSql.error(self.csum_query_form(alias=", diff(c1)")) # mix with calculation function 2 + # tdSql.error(self.csum_query_form(alias=" + 2")) # mix with arithmetic 1 + tdSql.error(self.csum_query_form(alias=" + avg(c1)")) # mix with arithmetic 2 + tdSql.error(self.csum_query_form(alias=", c2")) # mix with other 1 + # tdSql.error(self.csum_query_form(table_expr="stb1")) # select stb directly + stb_join = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + tdSql.error(self.csum_query_form(**stb_join)) # stb join + interval_sql = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + tdSql.error(self.csum_query_form(**interval_sql)) # interval + group_normal_col = { + "table_expr": "t1", + "condition": "group by c6" + } + tdSql.error(self.csum_query_form(**group_normal_col)) # group by normal col + slimit_soffset_sql = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + # tdSql.error(self.csum_query_form(**slimit_soffset_sql)) + order_by_tbname_sql = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" + } + tdSql.error(self.csum_query_form(**order_by_tbname_sql)) + + pass + + def csum_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def csum_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def csum_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 2 + self.csum_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + self.csum_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + self.csum_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.csum_test_table(tbnum) + self.csum_test_data(tbnum, per_table_rows, nowtime) + self.csum_current_query() + self.csum_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.csum_current_query() + self.csum_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.csum_current_query() + self.csum_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.csum_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/elapsed.py b/tests/system-test/2-query/elapsed.py new file mode 100644 index 0000000000000000000000000000000000000000..017090128d40f66eb7f395c75c41cafff2934a47 --- /dev/null +++ b/tests/system-test/2-query/elapsed.py @@ -0,0 +1,1604 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record + self.num = 10 + + def caseDescription(self): + + ''' + case1 : [TD-11804] test case for elapsed function : + + this test case is for aggregate function elapsed , elapsed function can only used for the timestamp primary key column (ts) , + it has two input parameters, the first parameter is necessary, basic SQL as follow: + + =================================================================================================================================== + SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; + =================================================================================================================================== + + elapsed function can acting on ordinary tables and super tables , notice that this function is related to the timeline. + If it acts on a super table , it must be group by tbname . by the way ,this function support nested query. + + The scenarios covered by the test cases are as follows: + + ==================================================================================================================================== + + case: select * from table|stable[group by tbname]|regular_table + + case:select elapsed(ts) from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + case:select elapsed(ts) , elapsed(ts,unit_time1)*regular_num1 , elapsed(ts,unit_time1)+regular_num2 from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + //mixup with all functions only once query (it's different with nest query) + case:select elapsed(ts), count(*), avg(col), twa(col), irate(col), sum(col), stddev(col), leastsquares(col, 1, 1),min(col), max(col), first(col), last(col), percentile(col, 20), apercentile(col, 30), last_row(col), spread(col)from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + //mixup with ordinary col + case:select ts ,elapsed(ts)*10 ,col+5 from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + //nest query + case:select elapsed(ts) from (select elapsed(ts), count(*), avg(col), twa(col), irate(col), sum(col), stddev(col), leastsquares(col, 1, 1),min(col), max(col), first(col), last(col), percentile(col, 20), apercentile(col, 30), last_row(col), spread(col)from table|stable where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]) where clause interval (units) [fill(LINEAR,NEXT,PREV,VALUE,NULL)] [group by tbname] order [by ts desc asc|desc]; + + //clause about filter condition + case:select elapsed(ts) from table|stable[group by tbname] where [ts|col|tag >|<|=|>=|<=|=|<>|!= value] | [between ... and ...] |[in] |[is null|not null] interval (unit_time) ; + case:select elapsed(ts) from table|stable[group by tbname] where clause1 and clause 2 and clause3 interval (unit_time) ; + + //JOIN query + case:select elapsed(ts) from TABLE1 as tb1 , TABLE2 as tb2 where join_condition [TABLE1 and TABLE2 can be stable|table|sub_table|empty_table] + + //UNION ALL query + case:select elapsed(ts) from TABLE1 union all select elapsed(ts) from TABLE2 [TABLE1 and TABLE2 can be stable|table|sub_table|empty_table] + + // Window aggregation + + case:select elapsed(ts) from t1 where clause session(ts, time_units) ; + case:select elapsed(ts) from t1 where clause state_window(regular_nums); + + // Continuous query + case:create table select elapsed(ts) ,avg(col) from (select elapsed(ts) ts_inter ,avg(col) col from stable|table interval (unit_time) [fill(LINEAR,NEXT,PREV,VALUE,NULL)][group by tbname]) interval (unit_time) [fill(LINEAR,NEXT,PREV,VALUE,NULL) sliding(unit_time_windows); + + ======================================================================================================================================== + + this test case notice successful execution and correctness of results. + + ''' + return + + def prepare_data(self): + + tdLog.info (" ====================================== prepare data ==================================================") + + tdSql.execute('drop database if exists testdb ;') + tdSql.execute('create database testdb keep 36500;') + tdSql.execute('use testdb;') + + tdSql.execute('create stable stable_1(ts timestamp ,tscol timestamp, q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, q_float float ,\ + q_double double , bin_chars binary(20)) tags(loc nchar(20) ,ind int,tstag timestamp);') + tdSql.execute('create stable stable_2(ts timestamp ,tscol timestamp, q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, q_float float ,\ + q_double double, bin_chars binary(20) ) tags(loc nchar(20),ind int,tstag timestamp);') + # create empty stables + tdSql.execute('create stable stable_empty(ts timestamp ,tscol timestamp, q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, q_float float ,\ + q_double double, bin_chars binary(20) ) tags(loc nchar(20),ind int,tstag timestamp);') + tdSql.execute('create stable stable_sub_empty(ts timestamp ,tscol timestamp, q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, q_float float ,\ + q_double double, bin_chars binary(20) ) tags(loc nchar(20),ind int,tstag timestamp);') + + # create empty sub_talbes and regular tables + tdSql.execute('create table sub_empty_1 using stable_sub_empty tags("sub_empty_1",3,"2015-01-01 00:02:00")') + tdSql.execute('create table sub_empty_2 using stable_sub_empty tags("sub_empty_2",3,"2015-01-01 00:02:00")') + tdSql.execute('create table regular_empty (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , bin_chars binary(20)) ;') + + tdSql.execute('create table sub_table1_1 using stable_1 tags("sub1_1",1,"2015-01-01 00:00:00")') + tdSql.execute('create table sub_table1_2 using stable_1 tags("sub1_2",2,"2015-01-01 00:01:00")') + tdSql.execute('create table sub_table1_3 using stable_1 tags("sub1_3",3,"2015-01-01 00:02:00")') + + tdSql.execute('create table sub_table2_1 using stable_2 tags("sub2_1",1,"2015-01-01 00:00:00")') + tdSql.execute('create table sub_table2_2 using stable_2 tags("sub2_2",2,"2015-01-01 00:01:00")') + tdSql.execute('create table sub_table2_3 using stable_2 tags("sub2_3",3,"2015-01-01 00:02:00")') + + tdSql.execute('create table regular_table_1 (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double, bin_chars binary(20)) ;') + tdSql.execute('create table regular_table_2 (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , bin_chars binary(20)) ;') + tdSql.execute('create table regular_table_3 (ts timestamp , tscol timestamp ,q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , bin_chars binary(20)) ;') + + tablenames = ["sub_table1_1","sub_table1_2","sub_table1_3","sub_table2_1","sub_table2_2","sub_table2_3","regular_table_1","regular_table_2","regular_table_3"] + + tdLog.info("insert into records ") + + for tablename in tablenames: + + for i in range(self.num): + sql= 'insert into %s values(%d, %d,%d, %d, %d, %d, %f, %f, "%s")' % (tablename,self.ts + i*10000, self.ts + i*10,2147483647-i, 9223372036854775807-i, 32767-i, 127-i, i, i,("bintest"+str(i))) + print(sql) + tdSql.execute(sql) + + tdLog.info("=============================================data prepared done!=========================") + + def abnormal_common_test(self): + + tdLog.info (" ====================================== elapsed illeagal params ==================================================") + + tablenames = ["sub_table1_1","sub_table1_2","sub_table1_3","sub_table2_1","sub_table2_2","sub_table2_3","regular_table_1","regular_table_2","regular_table_3"] + + abnormal_list = ["()","(NULL)","(*)","(abc)","( , )","(NULL,*)","( ,NULL)","(%)","(+)","(*,)","(*, /)","(ts,*)" "(ts,tbname*10)","(ts,tagname)", + "(ts,2d+3m-2s,NULL)","(ts+1d,10s)","(ts+10d,NULL)" ,"(ts,now -1m%1d)","(ts+10d)","(ts+10d,_c0)","(ts+10d,)","(ts,%)","(ts, , m)","(ts,abc)","(ts,/)","(ts,*)","(ts,1s,100)", + "(ts,1s,abc)","(ts,1s,_c0)","(ts,1s,*)","(ts,1s,NULL)","(ts,,_c0)","(ts,tbname,ts)","(ts,0,tbname)","('2021-11-18 00:00:10')","('2021-11-18 00:00:10', 1s)", + "('2021-11-18T00:00:10+0800', '1s')","('2021-11-18T00:00:10Z', '1s')","('2021-11-18T00:00:10+0800', 10000000d,)","('ts', ,2021-11-18T00:00:10+0800, )"] + + for tablename in tablenames: + for abnormal_param in abnormal_list: + + if tablename.startswith("stable"): + basic_sql= "select elapsed" + abnormal_param + " from " + tablename + " group by tbname ,ind order by tbname;" #stables + else: + basic_sql= "select elapsed" + abnormal_param + " from " + tablename + ";" # regular table + tdSql.error(basic_sql) + + def abnormal_use_test(self): + + tdLog.info (" ====================================== elapsed use abnormal ==================================================") + + sqls_list = ["select elapsed(ts) from regular_empty group by tbname,ind order by desc; ", + "select elapsed(ts) from regular_empty group by tbname,ind order by desc; ", + "select elapsed(ts) from regular_table_1 group by tbname,ind order by desc; ", + "select elapsed(ts) from sub_table1_1 group by tbname,ind order by desc; ", + "select elapsed(ts) from sub_table1_1 group by tbname,ind order by desc; ", + # "select elapsed(ts,10s) from stable_empty group by ts order by ts;", + "select elapsed(ts,10s) from stable_1 group by ind order by ts;", + "select elapsed(ts,10s) from stable_2 group by tstag order by ts;", + "select elapsed(ts,10s) from stable_1 group by tbname,tstag,tscol order by ts;", + "select elapsed(ts,10s),ts from stable_1 group by tbname ,ind order by ts;", + "select ts,elapsed(ts,10s),tscol*100 from stable_1 group by tbname ,ind order by ts;", + "select elapsed(ts) from stable_1 group by tstag order by ts;", + "select elapsed(ts) from sub_empty_1 group by tbname,ind ,tscol order by ts desc;", + "select tbname, tscol,elapsed(ts) from sub_table1_1 group by tbname ,ind order by ts desc;", + "select elapsed(tscol) from sub_table1_1 order by ts desc;", + "select elapsed(tstag) from sub_table1_1 order by ts desc;", + "select elapsed(ind) from sub_table1_1 order by ts desc;", + "select elapsed(tscol) from sub_empty_1 order by ts desc;", + "select elapsed(tstag) from sub_empty_1 order by ts desc;", + "select elapsed(ind) from sub_table1_1 order by ts desc;", + "select elapsed(ind,10s) from sub_table1_1 order by ts desc;", + "select elapsed(tscol,10s) from sub_table1_1 order by ts desc;", + "select elapsed(tstag,10s) from sub_table1_1 order by ts desc;", + "select elapsed(q_int,10s) from sub_table1_1 order by ts desc;", + "select elapsed(loc,10s) from sub_table1_1 order by ts desc;", + "select elapsed(q_bigint,10s) from sub_table1_1 order by ts desc;", + "select elapsed(bin_chars,10s) from sub_table1_1 order by ts desc;"] + for sql in sqls_list : + tdSql.error(sql) + + def query_filter(self): + + tdLog.info (" ====================================== elapsed query filter ==================================================") + + for i in range(self.num): + ts_start_time = self.ts + i*10000 + ts_col_start_time = self.ts + i*10 + ts_tag_time = "2015-01-01 00:01:00" + ts_end_time = self.ts + (self.num-1-i)*10000 + ts_col_end_time = self.ts + (self.num-1-i)*10 + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts >= %d group by tbname " %(ts_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num -i-1)) + tdSql.checkData(1,0,float(self.num -i-1)) + tdSql.checkData(2,0,float(self.num -i-1)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts >= %d " %(ts_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-1)) + + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts >= %d and tscol >= %d and tstag='2015-01-01 00:01:00'group by tbname " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-1)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts >= %d and tscol >= %d " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-1)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts >= %d and tscol > %d and tstag='2015-01-01 00:01:00' group by tbname" %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts >= %d and tscol > %d " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts > %d and tscol > %d and tstag < '2015-01-01 00:01:00' group by tbname " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts > %d and tscol > %d " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts > %d and tscol <= %d and tstag < '2015-01-01 00:01:00' group by tbname" %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(0) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts > %d and tscol <= %d " %(ts_start_time,ts_col_start_time) + tdSql.query(filter_sql) + tdSql.checkRows(0) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts < %d and tscol <= %d and tstag < '2015-01-01 00:01:00' group by tbname" %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts < %d and tscol <= %d " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts < %d and tscol <= %d group by tbname " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num - i - 2)) + tdSql.checkData(1,0,float(self.num - i - 2)) + tdSql.checkData(2,0,float(self.num - i - 2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts < %d and tscol <= %d " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num - i - 2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where ts = %d and tscol < %d group by tbname " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + tdSql.checkRows(0) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where ts = %d and tscol < %d " %(ts_end_time,ts_col_end_time) + tdSql.query(filter_sql) + tdSql.checkRows(0) + + filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint != %d and tscol < %d group by tbname " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num -i-2)) + tdSql.checkData(1,0,float(self.num -i-2)) + tdSql.checkData(2,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint != %d and tscol < %d " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint != %d and tscol <= %d group by tbname " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num - i - 1)) + tdSql.checkData(1,0,float(self.num - i - 1)) + tdSql.checkData(2,0,float(self.num - i - 1)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint != %d and tscol <= %d " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num - i - 1)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint <> %d and tscol < %d group by tbname " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num -i-2)) + tdSql.checkData(1,0,float(self.num -i-2)) + tdSql.checkData(2,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint <> %d and tscol < %d " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num-1: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num -i-2)) + + filter_sql = "select elapsed(ts,10s) from stable_1 where q_tinyint <> %d and tscol <= %d group by tbname " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num: + tdSql.checkRows(0) + else: + tdSql.checkRows(3) + tdSql.checkData(0,0,float(self.num - i - 1)) + tdSql.checkData(1,0,float(self.num - i - 1)) + tdSql.checkData(2,0,float(self.num - i - 1)) + + filter_sql = "select elapsed(ts,10s) from sub_table1_1 where q_tinyint <> %d and tscol <= %d " %(i,ts_col_end_time) + tdSql.query(filter_sql) + + if i == self.num: + tdSql.checkRows(0) + else: + tdSql.checkRows(1) + tdSql.checkData(0,0,float(self.num - i - 1)) + + # filter between and + tdSql.query("select elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' ") + tdSql.checkData(0,0,2) + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and \ + q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,2) + tdSql.checkData(1,0,2) + tdSql.checkData(2,0,2) + + # filter in and or + tdSql.query("select elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' ") + tdSql.checkData(0,0,2) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint between 125 and 127 and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,2) + tdSql.checkData(1,0,2) + tdSql.checkData(2,0,2) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and q_tinyint in (125,126,127) and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,2) + tdSql.checkData(1,0,2) + tdSql.checkData(2,0,2) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars in ('bintest0','bintest1') and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,1) + tdSql.checkData(1,0,1) + tdSql.checkData(2,0,1) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars in ('bintest0','bintest1') and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,1) + tdSql.checkData(1,0,1) + tdSql.checkData(2,0,1) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars like 'bintest_' and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars like 'bintest_' and tscol <= '2015-01-01 00:01:00.000' group by tbname ") + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars is not null and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars is null and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars match '^b' and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkRows(3) + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars nmatch '^a' and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkRows(3) + tdSql.checkData(0,0,6) + tdSql.checkData(1,0,6) + tdSql.checkData(2,0,6) + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and bin_chars ='bintest1' or bin_chars ='bintest2' and tscol <= '2015-01-01 00:01:00.000' group by tbname; ") + tdSql.checkRows(3) + tdSql.query("select elapsed(ts,10s) from stable_1 where (ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000') or (ts between '2015-01-01 00:01:00.000' and '2015-01-01 00:02:00.000') group by tbname; ") + tdSql.checkRows(3) + tdSql.checkData(0,0,9) + tdSql.checkData(1,0,9) + tdSql.checkData(2,0,9) + + def query_interval(self): + + tdLog.info (" ====================================== elapsed interval sliding fill ==================================================") + + # empty interval + tdSql.query("select max(q_int)*10 from stable_empty where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev);") + tdSql.checkRows(0) + tdSql.query("select max(q_int)*10 from sub_empty_2 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev);") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s)*10 from stable_empty where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev) group by tbname;") + tdSql.checkRows(0) + tdSql.query("select elapsed(ts,10s)*10 from sub_empty_2 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev);") + tdSql.checkRows(0) + + for i in range(self.num): + ts_start_time = self.ts + i*10000 + ts_col_start_time = self.ts + i*10 + ts_tag_time = "2015-01-01 00:01:00" + ts_end_time = self.ts + (self.num-1-i)*10000 + ts_col_end_time = self.ts + (self.num-1-i)*10 + + + # only interval + interval_sql = "select elapsed(ts,10s) from stable_1 where ts <=%d interval(10s) group by tbname " %(ts_start_time) + tdSql.query(interval_sql) + tdSql.checkRows(3*(i+1)) + + interval_sql = "select elapsed(ts,10s) from sub_table1_1 where ts <=%d interval(10s) " %(ts_start_time) + tdSql.query(interval_sql) + tdSql.checkRows(i+1) + for x in range(i+1): + if x == i: + tdSql.checkData(x,1,0) + else : + tdSql.checkData(x,1,1) + + # interval and fill , fill_type = ["NULL","value,100","prev","next","linear"] + + # interval (10s) and time range is outer records + + tdSql.query("select elapsed(ts,10s)*10 from stable_empty where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev) group by tbname;") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s)*10 from sub_empty_2 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev);") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(prev) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(59,1,0) + tdSql.checkData(60,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(next) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(10,1,None) + tdSql.checkData(59,1,None) + tdSql.checkData(60,1,10) + tdSql.checkData(61,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(linear) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(10,1,None) + tdSql.checkData(59,1,None) + tdSql.checkData(60,1,10) + tdSql.checkData(61,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(NULL) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(10,1,None) + tdSql.checkData(59,1,None) + tdSql.checkData(60,1,10) + tdSql.checkData(61,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(10s) fill(value ,2) group by tbname;") + tdSql.checkRows(180) + tdSql.checkData(0,1,10) + tdSql.checkData(9,1,0) + tdSql.checkData(10,1,2) + tdSql.checkData(59,1,2) + tdSql.checkData(60,1,10) + tdSql.checkData(61,1,10) + + # interval (20s) and time range is outer records + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(prev) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,10) + tdSql.checkData(29,1,10) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(next) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,None) + tdSql.checkData(29,1,None) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(linear) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,None) + tdSql.checkData(29,1,None) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,None) + tdSql.checkData(29,1,None) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:10:00.000' interval(20s) fill(value ,2) group by tbname,ind ;") + tdSql.checkRows(90) + tdSql.checkData(0,1,20) + tdSql.checkData(4,1,10) + tdSql.checkData(5,1,2) + tdSql.checkData(29,1,2) + tdSql.checkData(30,1,20) + tdSql.checkData(31,1,20) + + # interval (20s) and time range is in records + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(prev) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(next) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(linear) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(value ,2 ) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2015-01-01 00:00:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) group by tbname,ind ;") + tdSql.checkRows(9) + tdSql.checkData(0,1,20) + tdSql.checkData(2,1,10) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(8,1,10) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2014-12-31 23:59:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(18) + tdSql.checkData(0,1,None) + tdSql.checkData(2,1,None) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,None) + tdSql.checkData(8,1,None) + tdSql.checkData(9,1,20) + + # interval sliding + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2014-12-31 23:59:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) sliding(20s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(18) + tdSql.checkData(0,1,None) + tdSql.checkData(2,1,None) + tdSql.checkData(3,1,20) + tdSql.checkData(5,1,10) + tdSql.checkData(7,1,None) + tdSql.checkData(8,1,None) + tdSql.checkData(9,1,20) + + tdSql.query("select elapsed(ts,10s)*10 from stable_1 where ts >= '2014-12-31 23:59:00.000' and ts <'2015-01-01 00:01:00.000' interval(20s) sliding(10s) fill(NULL) group by tbname,ind ;") + tdSql.checkRows(39) + tdSql.checkData(0,1,None) + tdSql.checkData(2,1,None) + tdSql.checkData(6,1,10) + tdSql.checkData(7,1,20) + tdSql.checkData(12,1,0) + tdSql.checkData(13,1,None) + tdSql.checkData(15,1,None) + tdSql.checkData(19,1,10) + tdSql.checkData(20,1,20) + tdSql.checkData(25,1,0) + + def query_mix_common(self): + + tdLog.info (" ======================================elapsed mixup with common col, it will not support =======================================") + + tdSql.query("select elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' and ind =1 group by tbname; ") + tdSql.checkRows(1) + tdSql.checkData(0,0,6) + + tdSql.query("select elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.checkRows(1) + tdSql.checkData(0,0,6) + + tdSql.error("select ts,elapsed(ts,10s) from sub_empty_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.error("select ts,elapsed(ts,10s) from stable_empty where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' group by tbname; ") + + tdSql.error("select ts,elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.error("select ts,elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' group by tbname; ") + + tdSql.error("select q_int,elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.error("select q_int,elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' group by tbname; ") + + tdSql.error("select ts,q_int,elapsed(ts,10s) from sub_table1_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' ; ") + tdSql.error("select ts,q_int,elapsed(ts,10s) from stable_1 where ts between '2015-01-01 00:00:00.000' and '2015-01-01 00:01:00.000' group by tbname; ") + + def query_mix_Aggregate(self): + + tdLog.info (" ====================================== elapsed mixup with aggregate ==================================================") + + tdSql.query("select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) from sub_table1_1 ; ") + + data = tdSql.getResult("select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) from sub_table1_1 ; ") + + querys = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)","LEASTSQUARES(q_int,0,1)", "elapsed(ts,10s)"] + + for index , query in enumerate(querys): + sql = "select %s from sub_table1_1 " %(query) + tdSql.query(sql) + tdSql.checkData(0,0,data[0][index]) + + tdSql.query("select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) from stable_1 group by tbname; ") + + # Arithmetic with elapsed for common table + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)", "elapsed(ts,10s)"] + + for operator in operators: + + query_datas=[] + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + query_data = tdSql.getResult("select %s from sub_table1_1;"%query) + + query_datas.append(query_data[0][0]) + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from sub_table1_1;" + + tdSql.query(sql_common) + results= query_datas[0] + if operator == "+": + for data in query_datas[1:]: + results += data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "-": + for data in query_datas[1:]: + results -= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "*": + for data in query_datas[1:]: + results *= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "/": + for data in query_datas[1:]: + results /= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "%": + for data in query_datas[1:]: + results %= data + tdSql.checkData(0,0,results) + + + # Arithmetic with elapsed for super table + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)", "elapsed(ts,10s)"] + + for operator in operators: + + query_datas=[] + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + query_data = tdSql.getResult("select %s from stable_1 group by tbname;"%query) + + query_datas.append(query_data[0][0]) + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from stable_1 group by tbname;" + + tdSql.query(sql_common) + results= query_datas[0] + if operator == "+": + for data in query_datas[1:]: + results += data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + + results= query_datas[0] + if operator == "-": + for data in query_datas[1:]: + results -= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "*": + for data in query_datas[1:]: + results *= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "/": + for data in query_datas[1:]: + results /= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "%": + for data in query_datas[1:]: + results %= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + def query_mix_select(self): + + tdLog.info (" ====================================== elapsed mixup with select function =================================================") + + querys = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(*)","last(q_int)","last(*)","PERCENTILE(q_int,10)","APERCENTILE(q_int,10)","elapsed(ts,10s)"] + + + querys_mix = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(q_int)","last(q_int)","PERCENTILE(q_int,10)","APERCENTILE(q_int,10)","elapsed(ts,10s)"] + + tdSql.query("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),PERCENTILE(q_int,10),APERCENTILE(q_int,10) ,elapsed(ts,10s) from sub_table1_1 ; ") + + data = tdSql.getResult("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),PERCENTILE(q_int,10),APERCENTILE(q_int,10) ,elapsed(ts,10s) from sub_table1_1 ; ") + + for index , query in enumerate(querys_mix): + sql = "select %s from sub_table1_1 " %(query) + tdSql.query(sql) + tdSql.checkData(0,0,data[0][index]) + + tdSql.query("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),APERCENTILE(q_int,10) ,elapsed(ts,10s) from stable_1 group by tbname ; ") + + data = tdSql.getResult("select max(q_int),min(q_int) , first(q_tinyint), first(q_int),last(q_int),APERCENTILE(q_int,10) ,elapsed(ts,10s) from stable_1 group by tbname ; ") + + querys_mix = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(q_int)","last(q_int)","APERCENTILE(q_int,10)","elapsed(ts,10s)"] + + for index , query in enumerate(querys_mix): + sql = "select %s from stable_1 group by tbname " %(query) + tdSql.query(sql) + tdSql.checkData(0,0,data[0][index]) + tdSql.checkData(1,0,data[0][index]) + tdSql.checkData(2,0,data[0][index]) + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = querys_mix + + for operator in operators: + + query_datas=[] + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + query_data = tdSql.getResult("select %s from sub_table1_1;"%query) + + query_datas.append(query_data[0][0]) + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from sub_table1_1;" + + tdSql.query(sql_common) + results= query_datas[0] + if operator == "+": + for data in query_datas[1:]: + results += data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "-": + for data in query_datas[1:]: + results -= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "*": + for data in query_datas[1:]: + results *= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "/": + for data in query_datas[1:]: + results /= data + tdSql.checkData(0,0,results) + + results= query_datas[0] + if operator == "%": + for data in query_datas[1:]: + results %= data + tdSql.checkData(0,0,results) + + + # Arithmetic with elapsed for super table + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = querys_mix + + for operator in operators: + + query_datas=[] + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + query_data = tdSql.getResult("select %s from stable_1 group by tbname;"%query) + + query_datas.append(query_data[0][0]) + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from stable_1 group by tbname;" + + tdSql.query(sql_common) + results= query_datas[0] + if operator == "+": + for data in query_datas[1:]: + results += data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + + results= query_datas[0] + if operator == "-": + for data in query_datas[1:]: + results -= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "*": + for data in query_datas[1:]: + results *= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "/": + for data in query_datas[1:]: + results /= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + results= query_datas[0] + if operator == "%": + for data in query_datas[1:]: + results %= data + tdSql.checkData(0,0,results) + tdSql.checkData(1,0,results) + tdSql.checkData(2,0,results) + + def query_mix_compute(self): + + tdLog.info (" ====================================== elapsed mixup with compute function =================================================") + + querys = ["diff(q_int)","DERIVATIVE(q_int,1s,1)","spread(ts)","spread(q_tinyint)","ceil(q_float)","floor(q_float)","round(q_float)"] + + for index , query in enumerate(querys): + + sql1 = "select elapsed(ts,10s),%s from sub_table1_1 " %(query) + sql2 = "select elapsed(ts,10s),%s from stable_1 group by tbname" %(query) + if query in ["diff(q_int)","DERIVATIVE(q_int,1s,1)","ceil(q_float)","floor(q_float)","round(q_float)"]: + tdSql.error(sql1) + tdSql.error(sql2) + continue + tdSql.query(sql1) + tdSql.query(sql2) + + # only support mixup with spread + + sql = "select spread(ts)*10,spread(q_tinyint)-10,elapsed(ts,10s) from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" + tdSql.execute(sql) + + data = tdSql.getResult(sql) + + sql = "select spread(ts)*10,spread(q_tinyint)-10,elapsed(ts,10s) from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" + tdSql.execute(sql) + + querys_mix = ["spread(ts)","spread(q_tinyint)-10","elapsed(ts,10s)"] + + for index , query in enumerate(querys_mix): + sql = "select %s from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; " %(query) + tdSql.query(sql) + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = querys_mix + + for operator in operators: + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" + + tdSql.query(sql_common) + + for index , query in enumerate(querys_mix): + sql = "select %s from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; " %(query) + tdSql.query(sql) + + operators = ["+" ,"-" , "*" ,"/" ,"%"] + querys_oper = querys_mix + + for operator in operators: + + sql_common= "select " + + for index , query in enumerate(querys_oper): + + sql_common += " %s %s " %(query,operator) + sql_common=sql_common[:-2] + " from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ;" + + tdSql.query(sql_common) + + def query_mix_arithmetic(self): + + tdLog.info (" ====================================== elapsed mixup with arithmetic =================================================") + + tdSql.execute("select elapsed(ts,10s)+1 ,elapsed(ts,10s)-2,elapsed(ts,10s)*3,elapsed(ts,10s)/4,elapsed(ts,10s)%5 from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; ") + tdSql.execute("select elapsed(ts,10s)+1 ,elapsed(ts,10s)-2,elapsed(ts,10s)*3,elapsed(ts,10s)/4,elapsed(ts,10s)%5 from stable_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" ; ") + + # queries = ["elapsed(ts,10s)+1" ,"elapsed(ts,10s)-2","elapsed(ts,10s)*3","elapsed(ts,10s)/4","elapsed(ts,10s)%5" ] + + # for index ,query in enumerate(queries): + # sql = "select %s from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev) ;" % (query) + # data = tdSql.getResult(sql) + # tdSql.query("select elapsed(ts,10s)+1 ,elapsed(ts,10s)-2,elapsed(ts,10s)*3,elapsed(ts,10s)/4,elapsed(ts,10s)%5 from sub_table1_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev) ; ") + # tdSql.checkData(0,index+1,data[0][1]) + + def query_with_join(self): + + tdLog.info (" ====================================== elapsed mixup with join =================================================") + + tdSql.error("select elapsed(ts,10s) from stable_empty TABLE1 , stable_empty TABLE2 where TABLE1.ts =TABLE2.ts; ") + tdSql.error("select elapsed(ts,10s) from stable_empty TABLE1 , stable_empty TABLE2 where TABLE1.ts =TABLE2.ts group by tbname; ") + + tdSql.execute("select elapsed(ts,10s) from sub_empty_1 TABLE1 , sub_empty_2 TABLE2 where TABLE1.ts =TABLE2.ts; ") + tdSql.error("select elapsed(ts,10s) from stable_1 TABLE1 , stable_2 TABLE2 where TABLE1.ts =TABLE2.ts and TABLE1.ind =TABLE2.ind; ") + tdSql.error("select elapsed(ts,10s) from stable_1 TABLE1 , stable_2 TABLE2 where TABLE1.ts =TABLE2.ts and TABLE1.ind =TABLE2.ind group by tbname,ind; ") # join not support group by + + tdSql.error("select elapsed(ts,10s) from sub_empty_1 TABLE1 , stable_2 TABLE2 where TABLE1.ts =TABLE2.ts and TABLE1.ind =TABLE2.ind ; ") + tdSql.execute("select elapsed(ts,10s) from sub_empty_1 TABLE1 , sub_empty_2 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + + tdSql.query("select elapsed(ts,10s) from sub_table1_1 TABLE1 , sub_table1_2 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkData(0,0,9) + + tdSql.query("select elapsed(ts,10s) from sub_empty_1 TABLE1 , sub_table1_2 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s) from sub_empty_1 TABLE1 , regular_empty TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s) from sub_empty_1 TABLE1 , regular_table_1 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkRows(0) + + tdSql.query("select elapsed(ts,10s) from sub_table1_3 TABLE1 , regular_table_1 TABLE2 where TABLE1.ts =TABLE2.ts ; ") + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query("select elapsed(ts,10s) from regular_table_1 ; ") + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + def query_with_union(self): + + tdLog.info (" ====================================== elapsed mixup with union all =================================================") + + # union all with empty + + tdSql.query("select elapsed(ts,10s) from regular_table_1 union all select elapsed(ts,10s) from regular_table_2;") + + tdSql.query("select elapsed(ts,10s) from regular_table_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev) union all \ + select elapsed(ts,10s) from regular_table_2 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);") + tdSql.checkRows(1200) + tdSql.checkData(0,1,0.1) + tdSql.checkData(500,1,0) + + tdSql.query("select elapsed(ts,10s) from sub_empty_1 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev) union all \ + select elapsed(ts,10s) from regular_table_2 where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);") + tdSql.checkRows(600) + tdSql.checkData(0,1,0.1) + tdSql.checkData(500,0,0) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from sub_empty_2;') + tdSql.checkRows(0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 union all select elapsed(ts,10s) from sub_empty_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from regular_table_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from sub_table1_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 union all select elapsed(ts,10s) from sub_empty_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from regular_table_1;') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.error('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from stable_sub_empty group by tbname;') + + tdSql.error('select elapsed(ts,10s) from regular_table_1 union all select elapsed(ts,10s) from stable_sub_empty group by tbname;') + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev) union all select elapsed(ts,10s) from sub_empty_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev);') + tdSql.checkRows(0) + + tdSql.error('select elapsed(ts,10s) from sub_empty_1 union all select elapsed(ts,10s) from stable_empty group by tbname;') + + tdSql.error('select elapsed(ts,10s) from sub_empty_1 interval(1s) union all select elapsed(ts,10s) from stable_empty interval(1s) group by tbname;') + + # tdSql.error('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev) union all select elapsed(ts,10s) from stable_empty where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(1s) fill(prev) group by tbname;') + + tdSql.query("select elapsed(ts,10s) from stable_empty group by tbname union all select elapsed(ts,10s) from stable_empty group by tbname ;") + tdSql.checkRows(0) + + # case : TD-12229 + tdSql.query("select elapsed(ts,10s) from stable_empty group by tbname union all select elapsed(ts,10s) from stable_1 group by tbname ;") + tdSql.checkRows(3) + + tdSql.query("select elapsed(ts,10s) from stable_1 group by tbname union all select elapsed(ts,10s) from stable_1 group by tbname ;") + tdSql.checkRows(6) + tdSql.checkData(0,0,9) + tdSql.checkData(5,0,9) + + tdSql.query("select elapsed(ts,10s) from stable_1 group by tbname union all select elapsed(ts,10s) from stable_2 group by tbname ;") + tdSql.checkRows(6) + tdSql.checkData(0,0,9) + tdSql.checkData(5,0,9) + + tdSql.query('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname union all\ + select elapsed(ts,10s) from stable_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname ;') + tdSql.checkRows(360) + tdSql.checkData(0,1,1) + tdSql.checkData(50,1,0) + + #case : TD-12229 + tdSql.query('select elapsed(ts,10s) from stable_empty group by tbname union all select elapsed(ts,10s) from stable_2 group by tbname ;') + tdSql.checkRows(3) + + tdSql.query('select elapsed(ts,10s) from stable_1 group by tbname union all select elapsed(ts,10s) from stable_empty group by tbname ;') + tdSql.checkRows(3) + + + tdSql.query('select elapsed(ts,10s) from stable_empty where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname union all\ + select elapsed(ts,10s) from stable_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname ;') + tdSql.checkRows(180) + + tdSql.query('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname union all\ + select elapsed(ts,10s) from stable_empty where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname ;') + tdSql.checkRows(180) + + # union all with sub table and regular table + + # sub_table with sub_table + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from sub_table2_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from regular_table_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from regular_table_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(120) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from regular_table_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(60) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) union all\ + select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(60) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + # stable with stable + + tdSql.query('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname union all\ + select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname;') + tdSql.checkRows(360) + tdSql.checkData(0,1,1) + tdSql.checkData(12,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_2 interval(10s) union all select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev);') + tdSql.checkRows(10) + tdSql.checkData(0,1,1) + tdSql.checkData(9,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_2 interval(10s) union all select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) ;') + tdSql.checkRows(70) + tdSql.checkData(0,1,1) + tdSql.checkData(9,1,0) + + tdSql.query('select elapsed(ts,10s) from regular_table_2 interval(10s) order by ts desc union all select elapsed(ts,10s) from regular_table_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) order by ts asc;') + tdSql.checkRows(70) + tdSql.checkData(0,1,0) + tdSql.checkData(1,1,1) + tdSql.checkData(9,1,1) + + tdSql.query('select elapsed(ts,10s) from stable_1 group by tbname, ind order by ts desc union all select elapsed(ts,10s) from stable_2 group by tbname, ind order by ts asc ;') + tdSql.checkRows(6) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from stable_1 group by tbname, ind order by ts desc union all select elapsed(ts,10s) from stable_1 group by tbname, ind order by ts asc ;') + tdSql.checkRows(6) + tdSql.checkData(0,0,9) + + tdSql.query('select elapsed(ts,10s) from stable_1 interval(10s) group by tbname,ind order by ts desc union all select elapsed(ts,10s) from stable_2 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname,ind order by ts asc ;') + tdSql.checkRows(210) + tdSql.checkData(0,1,0) + tdSql.checkData(1,1,1) + tdSql.checkData(9,1,1) + + tdSql.query('select elapsed(ts,10s) from stable_2 interval(10s) group by tbname,ind order by ts desc union all select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname,ind order by ts asc ;') + tdSql.checkRows(210) + tdSql.checkData(0,1,0) + tdSql.checkData(1,1,1) + tdSql.checkData(9,1,1) + + tdSql.query('select elapsed(ts,10s) from stable_1 interval(10s) group by tbname,ind order by ts desc union all select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(10s) fill(prev) group by tbname,ind order by ts asc ;') + tdSql.checkRows(210) + tdSql.checkData(0,1,0) + tdSql.checkData(1,1,1) + tdSql.checkData(9,1,1) + + def query_nest(self): + + tdLog.info (" ====================================== elapsed query for nest =================================================") + + # ===============================================outer nest============================================ + + # regular table + + # ts can't be used at outer query + + tdSql.query("select elapsed(ts,10s) from (select ts from regular_table_1 );") + + # case : TD-12164 + + tdSql.error("select elapsed(ts,10s) from (select qint ts from regular_table_1 );") + tdSql.error("select elapsed(tbname ,10s) from (select qint tbname from regular_table_1 );") + tdSql.error("select elapsed(tsc ,1s) from (select q_int tsc from regular_table_1) ;") + tdSql.error("select elapsed(tsv ,1s) from (select elapsed(ts,1s) tsv from regular_table_1);") + tdSql.error("select elapsed(ts ,1s) from (select elapsed(ts,1s) ts from regular_table_1);") + # # bug fix + # tdSql.error("select elapsed(tsc ,1s) from (select tscol tsc from regular_table_1) ;") + + # case TD-12276 + # tdSql.error("select elapsed(ts,10s) from (select ts,tbname from regular_table_1 order by ts asc );") + + # tdSql.error("select elapsed(ts,10s) from (select ts,tbname from regular_table_1 order by ts desc );") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from regular_table_1 order by ts ) interval(1s);") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,q_int,tbname from regular_table_1 order by ts ) interval(1s);") + + # sub table + + tdSql.query("select elapsed(ts,10s) from (select ts from sub_table1_1 );") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from sub_table1_1 order by ts ) interval(1s);") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,q_int,tbname from sub_table1_1 order by ts ) interval(1s);") + + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname,top(q_int,3) from sub_table1_1 ) interval(10s);") + + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname,bottom(q_int,3) from sub_table1_1 ) interval(10s);") + + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname from sub_table1_1 ) interval(10s);") + + tdSql.query("select elapsed(ts,10s) from (select ts ,tbname from sub_table1_1 ) interval(10s);") + + # tdSql.error("select elapsed(ts,10s) from (select ts ,count(*),tbname from sub_table1_1 order by ts ) interval(1s);") + + querys = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)","LEASTSQUARES(q_int,0,1)","elapsed(ts,10s)"] + + for query in querys: + sql1 = "select elapsed(ts,10s) from (select %s from regular_table_1 order by ts ) interval(1s); " % query + sql2 = "select elapsed(ts,10s) from (select ts , tbname ,%s from regular_table_1 order by ts ) interval(1s); " % query + sql3 = "select elapsed(ts,10s) from (select ts , tbname ,%s from stable_1 group by tbname, ind order by ts ) interval(1s); " % query + sql4 = "select elapsed(ts,10s) from (select %s from sub_table2_1 order by ts ) interval(1s); " % query + sql5 = "select elapsed(ts,10s) from (select ts , tbname ,%s from sub_table2_1 order by ts ) interval(1s); " % query + + tdSql.error(sql1) + tdSql.error(sql2) + tdSql.error(sql3) + tdSql.error(sql4) + tdSql.error(sql5) + + + # case TD-12164 + tdSql.error( "select elapsed(ts00 ,1s) from (select elapsed(ts,1s) ts00 from regular_table_1) ; " ) + tdSql.error( "select elapsed(ts ,1s) from (select elapsed(ts,1s) ts from regular_table_1) ; " ) + + tdSql.error( "select elapsed(ts00 ,1s) from (select elapsed(ts,1s) ts00 from stable_1 group by tbname ) ; " ) + tdSql.error( "select elapsed(ts ,1s) from (select elapsed(ts,1s) ts from stable_1 group by tbname) ; " ) + + + # stable + + tdSql.error("select elapsed(ts,10s) from (select ts from stable_1 ) group by tbname ;") + + tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from stable_1 group by tbname order by ts ) interval(1s) group by tbname;") + + tdSql.error("select elapsed(ts,10s) from (select ts ,q_int,tbname from stable_1 order by ts ) interval(1s) group by tbname;") + + # mixup with aggregate + + querys = ["max(q_int)","min(q_int)" , "first(q_tinyint)", "first(*)","last(q_int)","last(*)","top(q_double,1)", + "bottom(q_float,1)","PERCENTILE(q_int,10)","APERCENTILE(q_int,10)" ,"elapsed(ts,10s)"] + + for index , query in enumerate(querys): + + sql1 = "select elapsed(ts,10s) from (select %s from sub_table1_1) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(10s) fill(prev) ; " %(query) + sql2 = "select elapsed(ts,10s) from (select %s from stable_1 ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(10s) fill(prev) group by tbname; " %(query) + sql3 = "select elapsed(ts,10s) from (select %s from stable_1 group by tbname) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(10s) fill(prev) group by tbname; " %(query) + + if query in ["interp(q_int)" ]: + # print(sql1 ) + # print(sql2) + tdSql.query(sql1) + tdSql.error(sql2) + else: + tdSql.error(sql1) + tdSql.error(sql2) + tdSql.error(sql3) + + tdSql.error("select elapsed(ts,10s) from (select ts,tbname from regular_table_1 order by ts ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);") + + tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from regular_table_1 order by ts ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);") + + # ===============================================inner nest============================================ + + # sub table + + tdSql.query("select data from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from sub_table1_1 ); ") + tdSql.checkData(0,0,9) + + # tdSql.query("select data from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from sub_table1_1 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + # tdSql.checkData(0,0,0.1) + + tdSql.query("select * from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 ); ") + tdSql.checkData(0,5,9) + + # tdSql.query("select * from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + # tdSql.checkData(0,0,0.1) + + tdSql.query("select max(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 ); ") + tdSql.checkData(0,0,9) + + # tdSql.query("select max(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(1) + # tdSql.checkData(0,0,0.1) + + # tdSql.query("select max(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from sub_empty_2 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(0) + + # tdSql.query("select max(data),min(data),avg(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(1) + + # tdSql.query("select ceil(data),floor(data),round(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select spread(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(1) + + # tdSql.query("select diff(data) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(599) + + # tdSql.query("select DERIVATIVE(data ,1s ,1) from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(598) + + # tdSql.query("select ceil(data)from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select floor(data)from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select round(data)from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select data*10+2 from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + # tdSql.query("select data*10+2 from (select count(*),avg(q_int) , sum(q_double),stddev(q_float),LEASTSQUARES(q_int,0,1), elapsed(ts,10s) data from regular_table_3 \ + # where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev)); ") + # tdSql.checkRows(600) + + def query_session_windows(self): + + # case TD-12344 + # session not support stable + tdSql.error('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts ,10s) group by tbname,ind order by ts asc ') + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 session(ts,1w) ; ') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + tdSql.query('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1w) ; ') + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + + tdSql.error('select elapsed(ts,10s) from ( select * from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ') + + tdSql.error('select elapsed(ts,10s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ') + + tdSql.error('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(20s) fill (next) session(ts,1w) ; ') + + tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1w) ; ') + tdSql.checkRows(0) + + # windows state + # not support stable + + tdSql.error('select elapsed(ts,10s) from stable_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" state_window(q_int) group by tbname,ind order by ts asc ') + + tdSql.query('select elapsed(ts,10s) from sub_table1_1 state_window(q_int) ; ') + tdSql.checkRows(10) + tdSql.checkData(0,0,0) + tdSql.query('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" state_window(q_int) ; ') + tdSql.checkRows(10) + tdSql.checkData(0,0,0) + + # tdSql.error('select elapsed(ts,10s) from ( select * from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") state_window(q_int) ; ') + + # tdSql.error('select elapsed(ts,10s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") state_window(q_int) ; ') + + # tdSql.error('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(20s) fill (next) state_window(q_int) ; ') + + # tdSql.query('select elapsed(ts,10s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" state_window(q_int); ') + # tdSql.checkRows(0) + + + def continuous_query(self): + tdSql.error('create table elapsed_t as select elapsed(ts) from sub_table1_1 interval(1m) sliding(30s);') + tdSql.error('create table elapsed_tb as select elapsed(ts) from stable_1 interval(1m) sliding(30s) group by tbname;') + tdSql.error('create table elapsed_tc as select elapsed(ts) from stable_1 interval(10s) sliding(5s) interval(1m) sliding(30s) group by tbname;') + + def query_precision(self): + def generate_data(precision="ms"): + + tdSql.execute("create database if not exists db_%s precision '%s';" %(precision, precision)) + tdSql.execute("use db_%s;" %precision) + tdSql.execute("create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision) + tdSql.execute("create table db_%s.tb1 using st tags(1);"%precision) + tdSql.execute("create table db_%s.tb2 using st tags(2);"%precision) + + if precision == "ms": + start_ts = self.ts + step = 10000 + elif precision == "us": + start_ts = self.ts*1000 + step = 10000000 + elif precision == "ns": + start_ts = self.ts*1000000 + step = 10000000000 + else: + pass + + for i in range(10): + + sql1 = "insert into db_%s.tb1 values (%d,%d)"%(precision ,start_ts+i*step,i) + sql2 = "insert into db_%s.tb1 values (%d,%d)"%(precision, start_ts+i*step,i) + tdSql.execute(sql1) + tdSql.execute(sql2) + + time_units = ["10s","10a","10u","10b"] + + precision_list = ["ms","us","ns"] + for pres in precision_list: + generate_data(pres) + + for index,unit in enumerate(time_units): + + if pres == "ms": + if unit in ["10u","10b"]: + tdSql.error("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + pass + else: + tdSql.query("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + elif pres == "us" and unit in ["10b"]: + if unit in ["10b"]: + tdSql.error("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + pass + else: + tdSql.query("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + else: + + tdSql.query("select elapsed(ts,%s) from db_%s.st group by tbname "%(unit,pres)) + basic_result = 9 + tdSql.checkData(0,0,basic_result*pow(1000,index)) + + def run(self): + tdSql.prepare() + self.prepare_data() + self.abnormal_common_test() + self.abnormal_use_test() + self.query_filter() + # self.query_interval() + self.query_mix_common() + self.query_mix_Aggregate() + self.query_mix_select() + self.query_mix_compute() + self.query_mix_arithmetic() + # self.query_with_join() + # self.query_with_union() + self.query_nest() + self.query_session_windows() + self.continuous_query() + self.query_precision() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/system-test/2-query/function_diff.py b/tests/system-test/2-query/function_diff.py new file mode 100644 index 0000000000000000000000000000000000000000..325bd2bc8ebd79f3e58daf6690492dc8ca329dda --- /dev/null +++ b/tests/system-test/2-query/function_diff.py @@ -0,0 +1,432 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def diff_query_form(self, col="c1", alias="", table_expr="t1", condition=""): + + ''' + diff function: + :param col: string, column name, required parameters; + :param alias: string, result column another name,or add other funtion; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)',or give result column another name, like 'c2' + :return: diff query statement,default: select diff(c1) from t1 + ''' + + return f"select diff({col}) {alias} from {table_expr} {condition}" + + def checkdiff(self,col="c1", alias="", table_expr="t1", condition="" ): + line = sys._getframe().f_back.f_lineno + pre_sql = self.diff_query_form( + col=col, table_expr=table_expr, condition=condition + ).replace("diff", "count") + tdSql.query(pre_sql) + + if tdSql.queryRows == 0: + tdSql.query(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + if "order by tbname" in condition: + tdSql.error(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + return + + if "group" in condition: + + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition) + + tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}") + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_diff = np.diff(pre_data) + # trans precision for data + tdSql.query(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for j in range(len(pre_diff)): + print(f"case in {line}:", end='') + if isinstance(pre_diff[j] , float) : + pass + else: + tdSql.checkData(pre_row+j, 1, pre_diff[j] ) + pre_row += len(pre_diff) + return + elif "union" in condition: + union_sql_0 = self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + union_diff_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + union_diff_1 = tdSql.queryResult + + tdSql.query(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if i < row_union_0: + tdSql.checkData(i, 0, union_diff_0[i][0]) + else: + tdSql.checkData(i, 0, union_diff_1[i-row_union_0][0]) + return + + else: + tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_diff = np.diff(pre_result)[offset_val:] + tdSql.query(self.diff_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if isinstance(pre_diff[i] , float ): + pass + else: + tdSql.checkData(i, 0, pre_diff[i]) + + pass + + def diff_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checkdiff() + case2 = {"col": "c2"} + self.checkdiff(**case2) + case3 = {"col": "c5"} + self.checkdiff(**case3) + case4 = {"col": "c7"} + self.checkdiff(**case4) + case5 = {"col": "c8"} + self.checkdiff(**case5) + case6 = {"col": "c9"} + self.checkdiff(**case6) + + # case7~8: nested query + # case7 = {"table_expr": "(select c1 from stb1)"} + # self.checkdiff(**case7) + # case8 = {"table_expr": "(select diff(c1) c1 from stb1 group by tbname)"} + # self.checkdiff(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checkdiff(**case9) + # case10 = {"alias": ", _c0"} + # self.checkdiff(**case10) + # case11 = {"alias": ", st1"} + # self.checkdiff(**case11) + # case12 = {"alias": ", c1"} + # self.checkdiff(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checkdiff(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checkdiff(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checkdiff(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checkdiff(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checkdiff(**case17) + # case18~19: with group by + # case18 = { + # "table_expr": "t1", + # "condition": "group by c6" + # } + # self.checkdiff(**case18) + # case19 = { + # "table_expr": "stb1", + # "condition": "partition by tbname" # partition by tbname + # } + # self.checkdiff(**case19) + + # # case20~21: with order by + # case20 = {"condition": "order by ts"} + # self.checkdiff(**case20) + + # # case22: with union + # case22 = { + # "condition": "union all select diff(c1) from t2" + # } + # self.checkdiff(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checkdiff(**case23) + # case24 = { + # "table_expr": "stb1", + # "condition": "group by tbname slimit 1 soffset 1" + # } + # self.checkdiff(**case24) + + pass + + def diff_error_query(self) -> None : + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.diff_query_form(col="")) # no col + tdSql.error("diff(c1) from stb1") # no select + tdSql.error("select diff from t1") # no diff condition + tdSql.error("select diff c1 from t1") # no brackets + tdSql.error("select diff(c1) t1") # no from + tdSql.error("select diff( c1 ) from ") # no table_expr + # tdSql.error(self.diff_query_form(col="st1")) # tag col + tdSql.query("select diff(st1) from t1 ") + # tdSql.error(self.diff_query_form(col=1)) # col is a value + tdSql.error(self.diff_query_form(col="'c1'")) # col is a string + tdSql.error(self.diff_query_form(col=None)) # col is NULL 1 + tdSql.error(self.diff_query_form(col="NULL")) # col is NULL 2 + tdSql.error(self.diff_query_form(col='""')) # col is "" + tdSql.error(self.diff_query_form(col='c%')) # col is spercial char 1 + tdSql.error(self.diff_query_form(col='c_')) # col is spercial char 2 + tdSql.error(self.diff_query_form(col='c.')) # col is spercial char 3 + tdSql.error(self.diff_query_form(col='c3')) # timestamp col + tdSql.error(self.diff_query_form(col='ts')) # Primary key + tdSql.error(self.diff_query_form(col='avg(c1)')) # expr col + # tdSql.error(self.diff_query_form(col='c6')) # bool col + tdSql.query("select diff(c6) from t1") + tdSql.error(self.diff_query_form(col='c4')) # binary col + tdSql.error(self.diff_query_form(col='c10')) # nachr col + tdSql.error(self.diff_query_form(col='c10')) # not table_expr col + tdSql.error(self.diff_query_form(col='t1')) # tbname + tdSql.error(self.diff_query_form(col='stb1')) # stbname + tdSql.error(self.diff_query_form(col='db')) # datbasename + # tdSql.error(self.diff_query_form(col=True)) # col is BOOL 1 + # tdSql.error(self.diff_query_form(col='True')) # col is BOOL 2 + tdSql.error(self.diff_query_form(col='*')) # col is all col + tdSql.error("select diff[c1] from t1") # sql form error 1 + tdSql.error("select diff{c1} from t1") # sql form error 2 + tdSql.error(self.diff_query_form(col="[c1]")) # sql form error 3 + # tdSql.error(self.diff_query_form(col="c1, c2")) # sql form error 3 + # tdSql.error(self.diff_query_form(col="c1, 2")) # sql form error 3 + tdSql.error(self.diff_query_form(alias=", count(c1)")) # mix with aggregate function 1 + tdSql.error(self.diff_query_form(alias=", avg(c1)")) # mix with aggregate function 2 + tdSql.error(self.diff_query_form(alias=", min(c1)")) # mix with select function 1 + tdSql.error(self.diff_query_form(alias=", top(c1, 5)")) # mix with select function 2 + tdSql.error(self.diff_query_form(alias=", spread(c1)")) # mix with calculation function 1 + tdSql.error(self.diff_query_form(alias=", diff(c1)")) # mix with calculation function 2 + # tdSql.error(self.diff_query_form(alias=" + 2")) # mix with arithmetic 1 + tdSql.error(self.diff_query_form(alias=" + avg(c1)")) # mix with arithmetic 2 + tdSql.error(self.diff_query_form(alias=", c2")) # mix with other 1 + # tdSql.error(self.diff_query_form(table_expr="stb1")) # select stb directly + stb_join = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + tdSql.error(self.diff_query_form(**stb_join)) # stb join + interval_sql = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + tdSql.error(self.diff_query_form(**interval_sql)) # interval + group_normal_col = { + "table_expr": "t1", + "condition": "group by c6" + } + tdSql.error(self.diff_query_form(**group_normal_col)) # group by normal col + slimit_soffset_sql = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + # tdSql.error(self.diff_query_form(**slimit_soffset_sql)) + order_by_tbname_sql = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" + } + tdSql.error(self.diff_query_form(**order_by_tbname_sql)) + + pass + + def diff_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def diff_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def diff_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 10 + self.diff_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.diff_current_query() + self.diff_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.diff_current_query() + self.diff_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + self.diff_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + self.diff_current_query() + self.diff_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + self.diff_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + self.diff_current_query() + self.diff_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.diff_test_table(tbnum) + self.diff_test_data(tbnum, per_table_rows, nowtime) + self.diff_current_query() + self.diff_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.diff_current_query() + self.diff_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.diff_current_query() + self.diff_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.diff_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/mavg.py b/tests/system-test/2-query/mavg.py new file mode 100644 index 0000000000000000000000000000000000000000..c8cbd269f9ce2c3ae0e5bd6b0361f0eb4252b1a9 --- /dev/null +++ b/tests/system-test/2-query/mavg.py @@ -0,0 +1,677 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re +import taos + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def mavg_query_form(self, sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + ''' + mavg function: + + :param sel: string, must be "select", required parameters; + :param func: string, in this case must be "mavg(", otherwise return other function, required parameters; + :param col: string, column name, required parameters; + :param m_comm: string, comma between col and k , required parameters; + :param k: int/float,the width of the sliding window, [1,100], required parameters; + :param r_comm: string, must be ")", use with "(" in func, required parameters; + :param alias: string, result column another name,or add other funtion; + :param fr: string, must be "from", required parameters; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :return: mavg query statement,default: select mavg(c1, 1) from t1 + ''' + + return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}" + + def checkmavg(self,sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + # print(self.mavg_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition)) + line = sys._getframe().f_back.f_lineno + + if not all([sel , func , col , m_comm , k , r_comm , fr , table_expr]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + sql = "select * from t1" + collist = tdSql.getColNameList(sql) + + if not isinstance(col, str): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if len([x for x in col.split(",") if x.strip()]) != 1: + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + col = col.replace(",", "").replace(" ", "") + + if any([re.compile('^[a-zA-Z]{1}.*$').match(col) is None , not col.replace(".","").isalnum()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + # if all(["," in col , len(col.split(",")) != 2]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.mavg_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # + # if ("," in col): + # if (not col.split(",")[0].strip()) ^ (not col.split(",")[1].strip()): + # col = col.strip().split(",")[0] if not col.split(",")[1].strip() else col.strip().split(",")[1] + # else: + # print(f"case in {line}: ", end='') + # return tdSql.error(self.mavg_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # pass + + if '.' in col: + if any([col.split(".")[0] not in table_expr, col.split(".")[1] not in collist]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if "." not in col: + if col not in collist: + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + colname = col if "." not in col else col.split(".")[1] + col_index = collist.index(colname) + if any([tdSql.cursor.istype(col_index, "TIMESTAMP"), tdSql.cursor.istype(col_index, "BOOL")]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if any([tdSql.cursor.istype(col_index, "BINARY") , tdSql.cursor.istype(col_index,"NCHAR")]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if any( [func != "mavg(" , r_comm != ")" , fr != "from", sel != "select"]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["(" not in table_expr, "stb" in table_expr, "group" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if "order by tbname" in condition.lower(): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["group" in condition.lower(), "tbname" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + alias_list = ["tbname", "_c0", "st", "ts"] + if all([alias, "," not in alias, not alias.isalnum()]): + # actually, column alias also support "_", but in this case,forbidden that。 + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all([alias, "," in alias]): + if all(parm != alias.lower().split(",")[1].strip() for parm in alias_list): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + condition_exception = [ "~", "^", "insert", "distinct", + "count", "avg", "twa", "irate", "sum", "stddev", "leastquares", + "min", "max", "first", "last", "top", "bottom", "percentile", + "apercentile", "last_row", "interp", "diff", "derivative", + "spread", "ceil", "floor", "round", "interval", "fill", "slimit", "soffset"] + if "union" not in condition.lower(): + if any(parm in condition.lower().strip() for parm in condition_exception): + + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if not any([isinstance(k, int) , isinstance(k, float)]) : + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + if not(1 <= k < 1001): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + k = int(k // 1) + pre_sql = re.sub("mavg\([a-z0-9 .,]*\)", f"count({col})", self.mavg_query_form( + col=col, table_expr=table_expr, condition=condition + )) + tdSql.query(pre_sql) + + if tdSql.queryRows == 0: + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + if "group" in condition: + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition) + + tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}") + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_mavg = np.convolve(pre_data, np.ones(k), "valid")/k + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for j in range(len(pre_mavg)): + print(f"case in {line}:", end='') + tdSql.checkData(pre_row+j, 0, pre_mavg[j]) + pre_row += len(pre_mavg) + return + elif "union" in condition: + union_sql_0 = self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + union_mavg_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + union_mavg_1 = tdSql.queryResult + + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if i < row_union_0: + tdSql.checkData(i, 0, union_mavg_0[i][0]) + else: + tdSql.checkData(i, 0, union_mavg_1[i-row_union_0][0]) + return + + else: + tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_mavg = pre_mavg = np.convolve(pre_result, np.ones(k), "valid")[offset_val:]/k + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + tdSql.checkData(i, 0, pre_mavg[i]) + + pass + + def mavg_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checkmavg() + case2 = {"col": "c2"} + self.checkmavg(**case2) + case3 = {"col": "c5"} + self.checkmavg(**case3) + case4 = {"col": "c7"} + self.checkmavg(**case4) + case5 = {"col": "c8"} + self.checkmavg(**case5) + case6 = {"col": "c9"} + self.checkmavg(**case6) + + # # case7~8: nested query + # case7 = {"table_expr": "(select c1 from stb1)"} + # self.checkmavg(**case7) + # case8 = {"table_expr": "(select mavg(c1, 1) c1 from stb1 group by tbname)"} + # self.checkmavg(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checkmavg(**case9) + # case10 = {"alias": ", _c0"} + # self.checkmavg(**case10) + # case11 = {"alias": ", st1"} + # self.checkmavg(**case11) + # case12 = {"alias": ", c1"} + # self.checkmavg(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checkmavg(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checkmavg(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checkmavg(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checkmavg(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checkmavg(**case17) + # # case18~19: with group by + # case19 = { + # "table_expr": "stb1", + # "condition": "partition by tbname" + # } + # self.checkmavg(**case19) + + # case20~21: with order by + # case20 = {"condition": "order by ts"} + # self.checkmavg(**case20) + #case21 = { + # "table_expr": "stb1", + # "condition": "group by tbname order by tbname" + #} + #self.checkmavg(**case21) + + # # case22: with union + # case22 = { + # "condition": "union all select mavg( c1 , 1 ) from t2" + # } + # self.checkmavg(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checkmavg(**case23) + + # case24: value k range[1, 100], can be int or float, k = floor(k) + case24 = {"k": 3} + self.checkmavg(**case24) + case25 = {"k": 2.999} + self.checkmavg(**case25) + case26 = {"k": 1000} + self.checkmavg(**case26) + + pass + + def mavg_error_query(self) -> None : + # unusual test + + # form test + err1 = {"col": ""} + self.checkmavg(**err1) # no col + err2 = {"sel": ""} + self.checkmavg(**err2) # no select + err3 = {"func": "mavg", "col": "", "m_comm": "", "k": "", "r_comm": ""} + self.checkmavg(**err3) # no mavg condition: select mavg from + err4 = {"col": "", "m_comm": "", "k": ""} + self.checkmavg(**err4) # no mavg condition: select mavg() from + err5 = {"func": "mavg", "r_comm": ""} + self.checkmavg(**err5) # no brackets: select mavg col, k from + err6 = {"fr": ""} + self.checkmavg(**err6) # no from + err7 = {"k": ""} + self.checkmavg(**err7) # no k + err8 = {"table_expr": ""} + self.checkmavg(**err8) # no table_expr + + # err9 = {"col": "st1"} + # self.checkmavg(**err9) # col: tag + err10 = {"col": 1} + self.checkmavg(**err10) # col: value + err11 = {"col": "NULL"} + self.checkmavg(**err11) # col: NULL + err12 = {"col": "%_"} + self.checkmavg(**err12) # col: %_ + err13 = {"col": "c3"} + self.checkmavg(**err13) # col: timestamp col + err14 = {"col": "_c0"} + self.checkmavg(**err14) # col: Primary key + err15 = {"col": "avg(c1)"} + self.checkmavg(**err15) # expr col + err16 = {"col": "c4"} + self.checkmavg(**err16) # binary col + err17 = {"col": "c10"} + self.checkmavg(**err17) # nchar col + err18 = {"col": "c6"} + self.checkmavg(**err18) # bool col + err19 = {"col": "'c1'"} + self.checkmavg(**err19) # col: string + err20 = {"col": None} + self.checkmavg(**err20) # col: None + err21 = {"col": "''"} + self.checkmavg(**err21) # col: '' + err22 = {"col": "tt1.c1"} + self.checkmavg(**err22) # not table_expr col + err23 = {"col": "t1"} + self.checkmavg(**err23) # tbname + err24 = {"col": "stb1"} + self.checkmavg(**err24) # stbname + err25 = {"col": "db"} + self.checkmavg(**err25) # datbasename + err26 = {"col": "True"} + self.checkmavg(**err26) # col: BOOL 1 + err27 = {"col": True} + self.checkmavg(**err27) # col: BOOL 2 + err28 = {"col": "*"} + self.checkmavg(**err28) # col: all col + err29 = {"func": "mavg[", "r_comm": "]"} + self.checkmavg(**err29) # form: mavg[col, k] + err30 = {"func": "mavg{", "r_comm": "}"} + self.checkmavg(**err30) # form: mavg{col, k} + err31 = {"col": "[c1]"} + self.checkmavg(**err31) # form: mavg([col], k) + err32 = {"col": "c1, c2"} + self.checkmavg(**err32) # form: mavg(col, col2, k) + err33 = {"col": "c1, 2"} + self.checkmavg(**err33) # form: mavg(col, k1, k2) + err34 = {"alias": ", count(c1)"} + self.checkmavg(**err34) # mix with aggregate function 1 + err35 = {"alias": ", avg(c1)"} + self.checkmavg(**err35) # mix with aggregate function 2 + err36 = {"alias": ", min(c1)"} + self.checkmavg(**err36) # mix with select function 1 + err37 = {"alias": ", top(c1, 5)"} + self.checkmavg(**err37) # mix with select function 2 + err38 = {"alias": ", spread(c1)"} + self.checkmavg(**err38) # mix with calculation function 1 + err39 = {"alias": ", diff(c1)"} + self.checkmavg(**err39) # mix with calculation function 2 + # err40 = {"alias": "+ 2"} + # self.checkmavg(**err40) # mix with arithmetic 1 + #tdSql.query(" select mavg( c1 , 1 ) + 2 from t1 ") + err41 = {"alias": "+ avg(c1)"} + self.checkmavg(**err41) # mix with arithmetic 2 + err42 = {"alias": ", c1"} + self.checkmavg(**err42) # mix with other col + # err43 = {"table_expr": "stb1"} + # self.checkmavg(**err43) # select stb directly + err44 = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + self.checkmavg(**err44) # stb join + err45 = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + self.checkmavg(**err45) # interval + err46 = { + "table_expr": "t1", + "condition": "group by c6" + } + self.checkmavg(**err46) # group by normal col + err47 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 " + } + # self.checkmavg(**err47) # with slimit + err48 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + # self.checkmavg(**err48) # with soffset + err49 = {"k": "2021-01-01 00:00:00.000"} + self.checkmavg(**err49) # k: timestamp + err50 = {"k": False} + self.checkmavg(**err50) # k: False + err51 = {"k": "%"} + self.checkmavg(**err51) # k: special char + err52 = {"k": ""} + self.checkmavg(**err52) # k: "" + err53 = {"k": None} + self.checkmavg(**err53) # k: None + err54 = {"k": "NULL"} + self.checkmavg(**err54) # k: null + err55 = {"k": "binary(4)"} + self.checkmavg(**err55) # k: string + err56 = {"k": "c1"} + self.checkmavg(**err56) # k: sring,col name + err57 = {"col": "c1, 1, c2"} + self.checkmavg(**err57) # form: mavg(col1, k1, col2, k2) + err58 = {"col": "c1 cc1"} + self.checkmavg(**err58) # form: mavg(col newname, k) + err59 = {"k": "'1'"} + # self.checkmavg(**err59) # formL mavg(colm, "1") + err60 = {"k": "-1-(-2)"} + # self.checkmavg(**err60) # formL mavg(colm, -1-2) + err61 = {"k": 1001} + self.checkmavg(**err61) # k: right out of [1, 1000] + err62 = {"k": -1} + self.checkmavg(**err62) # k: negative number + err63 = {"k": 0} + self.checkmavg(**err63) # k: 0 + err64 = {"k": 2**63-1} + self.checkmavg(**err64) # k: max(bigint) + err65 = {"k": 1-2**63} + # self.checkmavg(**err65) # k: min(bigint) + err66 = {"k": -2**63} + self.checkmavg(**err66) # k: NULL + err67 = {"k": 0.999999} + self.checkmavg(**err67) # k: left out of [1, 1000] + err68 = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" # order by tbname not supported + } + self.checkmavg(**err68) + + pass + + def mavg_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def mavg_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def mavg_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 2 + self.mavg_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.mavg_current_query() + self.mavg_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.mavg_current_query() + self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + # self.mavg_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # self.mavg_current_query() + # self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + # self.mavg_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + # self.mavg_current_query() + # self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.mavg_test_table(tbnum) + self.mavg_test_data(tbnum, per_table_rows, nowtime) + self.mavg_current_query() + self.mavg_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.mavg_current_query() + self.mavg_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.mavg_current_query() + self.mavg_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.mavg_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/nestedQuery_str.py b/tests/system-test/2-query/nestedQuery_str.py new file mode 100755 index 0000000000000000000000000000000000000000..8214c98c5cc8526874db5f40df22f8e587ea36f4 --- /dev/null +++ b/tests/system-test/2-query/nestedQuery_str.py @@ -0,0 +1,5753 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import os +import time +import taos +import subprocess +from faker import Faker +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes +from util.dnodes import * + +class TDTestCase: + updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.testcasePath = os.path.split(__file__)[0] + self.testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.num = 10 + self.fornum = 5 + + self.db_nest = "nest" + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + # regular column select + #q_select= ['ts' , '*' , 'q_int', 'q_bigint' , 'q_bigint' , 'q_smallint' , 'q_tinyint' , 'q_bool' , 'q_binary' , 'q_nchar' ,'q_float' , 'q_double' ,'q_ts '] + self.q_select= ['ts' , 'q_int', 'q_bigint' , 'q_bigint' , 'q_smallint' , 'q_tinyint' , 'q_bool' , 'q_binary' , 'q_nchar' ,'q_float' , 'q_double' ,'q_ts ', 'q_int_null ', 'q_bigint_null ' , 'q_bigint_null ' , 'q_smallint_null ' , 'q_tinyint_null ' , 'q_bool_null ' , 'q_binary_null ' , 'q_nchar_null ' ,'q_float_null ' , 'q_double_null ' ,'q_ts_null '] + + # tag column select + #t_select= ['*' , 'loc' ,'t_int', 't_bigint' , 't_bigint' , 't_smallint' , 't_tinyint' , 't_bool' , 't_binary' , 't_nchar' ,'t_float' , 't_double' ,'t_ts '] + self.t_select= ['loc','t_int', 't_bigint' , 't_bigint' , 't_smallint' , 't_tinyint' , 't_bool' , 't_binary' , 't_nchar' ,'t_float' , 't_double' ,'t_ts '] + + # regular and tag column select + self.qt_select= self.q_select + self.t_select + + # distinct regular column select + self.dq_select= ['distinct q_int', 'distinct q_bigint' , 'distinct q_smallint' , 'distinct q_tinyint' , + 'distinct q_bool' , 'distinct q_binary' , 'distinct q_nchar' ,'distinct q_float' , 'distinct q_double' ,'distinct q_ts '] + + # distinct tag column select + self.dt_select= ['distinct loc', 'distinct t_int', 'distinct t_bigint' , 'distinct t_smallint' , 'distinct t_tinyint' , + 'distinct t_bool' , 'distinct t_binary' , 'distinct t_nchar' ,'distinct t_float' , 'distinct t_double' ,'distinct t_ts '] + + # distinct regular and tag column select + self.dqt_select= self.dq_select + self.dt_select + + # special column select + self.s_r_select= ['_c0', '_rowts' , '_C0' ] + self.s_s_select= ['tbname' , '_rowts' , '_c0', '_C0' ] + self.unionall_or_union= [ ' union ' , ' union all ' ] + + # regular column where + self.q_where = ['ts < now +1s','q_bigint >= -9223372036854775807 and q_bigint <= 9223372036854775807', 'q_int <= 2147483647 and q_int >= -2147483647', + 'q_smallint >= -32767 and q_smallint <= 32767','q_tinyint >= -127 and q_tinyint <= 127','q_float >= -1.7E308 and q_float <= 1.7E308', + 'q_double >= -1.7E308 and q_double <= 1.7E308', 'q_binary like \'binary%\' or q_binary = \'0\' ' , 'q_nchar like \'nchar%\' or q_nchar = \'0\' ' , + 'q_bool = true or q_bool = false' , 'q_bool in (0 , 1)' , 'q_bool in ( true , false)' , 'q_bool = 0 or q_bool = 1', + 'q_bigint between -9223372036854775807 and 9223372036854775807',' q_int between -2147483647 and 2147483647','q_smallint between -32767 and 32767', + 'q_bigint not between 9223372036854775807 and -9223372036854775807','q_int not between 2147483647 and -2147483647','q_smallint not between 32767 and -32767', + 'q_tinyint between -127 and 127 ','q_float >= -3.4E38 ','q_float <= 3.4E38 ','q_double >= -1.7E308 ', + 'q_double <= 1.7E308 ','q_float between -3.4E38 and 3.4E38 ','q_double between -1.7E308 and 1.7E308 ' ,'q_float not between 3.4E38 and -3.4E38 ','q_double not between 1.7E308 and -1.7E308 ', + 'q_float is not null ' ,'q_double is not null ' ,'q_binary match \'binary\' ','q_binary nmatch \'binarynchar\' ','q_nchar match \'nchar\' ','q_nchar nmatch \'binarynchar\' ', + 'q_binary like \'binary%\' ','(q_binary like \'binary%\' or q_nchar = \'0\' or q_binary = \'binary_\' ) ','q_nchar like \'nchar%\' ','(q_nchar like \'nchar%\' or q_binary = \'0\' or q_nchar = \'nchar_\' ) ',] + #TD-6201 ,'q_bool between 0 and 1' + + # regular column where for test union,join + self.q_u_where = ['t1.ts < now +1s' , 't2.ts < now +1s','t1.q_bigint >= -9223372036854775807 and t1.q_bigint <= 9223372036854775807 and t2.q_bigint >= -9223372036854775807 and t2.q_bigint <= 9223372036854775807', + 't1.q_int <= 2147483647 and t1.q_int >= -2147483647 and t2.q_int <= 2147483647 and t2.q_int >= -2147483647', + 't1.q_smallint >= -32767 and t1.q_smallint <= 32767 and t2.q_smallint >= -32767 and t2.q_smallint <= 32767', + 't1.q_tinyint >= -127 and t1.q_tinyint <= 127 and t2.q_tinyint >= -127 and t2.q_tinyint <= 127', + 't1.q_float >= - 1.7E308 and t1.q_float <= 1.7E308 and t2.q_float >= - 1.7E308 and t2.q_float <= 1.7E308', + 't1.q_double >= - 1.7E308 and t1.q_double <= 1.7E308 and t2.q_double >= - 1.7E308 and t2.q_double <= 1.7E308', + 't1.q_binary like \'binary%\' and t2.q_binary like \'binary%\' ' , + 't1.q_nchar like \'nchar%\' and t2.q_nchar like \'nchar%\' ' , + 't1.q_bool in (0 , 1) and t2.q_bool in (0 , 1)' , 't1.q_bool in ( true , false) and t2.q_bool in ( true , false)' , + 't1.q_bigint between -9223372036854775807 and 9223372036854775807 and t2.q_bigint between -9223372036854775807 and 9223372036854775807', + 't1.q_int between -2147483647 and 2147483647 and t2.q_int between -2147483647 and 2147483647', + 't1.q_smallint between -32767 and 32767 and t2.q_smallint between -32767 and 32767', + 't1.q_tinyint between -127 and 127 and t2.q_tinyint between -127 and 127 ','t1.q_float between -1.7E308 and 1.7E308 and t2.q_float between -1.7E308 and 1.7E308', + 't1.q_double between -1.7E308 and 1.7E308 and t2.q_double between -1.7E308 and 1.7E308', + 't1.q_bigint not between 9223372036854775807 and -9223372036854775807 and t2.q_bigint not between 9223372036854775807 and -9223372036854775807', + 't1.q_int not between 2147483647 and -2147483647 and t2.q_int not between 2147483647 and -2147483647', + 't1.q_smallint not between 32767 and -32767 and t2.q_smallint not between 32767 and -32767', + 't1.q_tinyint not between 127 and -127 and t2.q_tinyint not between 127 and -127 ','t1.q_float not between -1.7E308 and -1.7E308 and t2.q_float not between 1.7E308 and -1.7E308', + 't1.q_double not between 1.7E308 and -1.7E308 and t2.q_double not between 1.7E308 and -1.7E308'] + #TD-6201 ,'t1.q_bool between 0 and 1 or t2.q_bool between 0 and 1'] + #'t1.q_bool = true and t1.q_bool = false and t2.q_bool = true and t2.q_bool = false' , 't1.q_bool = 0 and t1.q_bool = 1 and t2.q_bool = 0 and t2.q_bool = 1' , + + self.q_u_or_where = ['(t1.q_binary like \'binary%\' or t1.q_binary = \'0\' or t2.q_binary like \'binary%\' or t2.q_binary = \'0\' )' , + '(t1.q_nchar like \'nchar%\' or t1.q_nchar = \'0\' or t2.q_nchar like \'nchar%\' or t2.q_nchar = \'0\' )' , '(t1.q_bool = true or t1.q_bool = false or t2.q_bool = true or t2.q_bool = false)' , + '(t1.q_bool in (0 , 1) or t2.q_bool in (0 , 1))' , '(t1.q_bool in ( true , false) or t2.q_bool in ( true , false))' , '(t1.q_bool = 0 or t1.q_bool = 1 or t2.q_bool = 0 or t2.q_bool = 1)' , + '(t1.q_bigint between -9223372036854775807 and 9223372036854775807 or t2.q_bigint between -9223372036854775807 and 9223372036854775807)', + '(t1.q_int between -2147483647 and 2147483647 or t2.q_int between -2147483647 and 2147483647)', + '(t1.q_smallint between -32767 and 32767 or t2.q_smallint between -32767 and 32767)', + '(t1.q_tinyint between -127 and 127 or t2.q_tinyint between -127 and 127 )','(t1.q_float between -1.7E308 and 1.7E308 or t2.q_float between -1.7E308 and 1.7E308)', + '(t1.q_double between -1.7E308 and 1.7E308 or t2.q_double between -1.7E308 and 1.7E308)'] + + # tag column where + self.t_where = ['ts < now +1s','t_bigint >= -9223372036854775807 and t_bigint <= 9223372036854775807','t_int <= 2147483647 and t_int >= -2147483647', + 't_smallint >= -32767 and t_smallint <= 32767','q_tinyint >= -127 and t_tinyint <= 127','t_float >= -1.7E308 and t_float <= 1.7E308', + 't_double >= -1.7E308 and t_double <= 1.7E308', 't_binary like \'binary%\' or t_binary = \'0\' ' , 't_nchar like \'nchar%\' or t_nchar = \'0\'' , + 't_bool = true or t_bool = false' , 't_bool in (0 , 1)' , 't_bool in ( true , false)' , 't_bool = 0 or t_bool = 1', + 't_bigint between -9223372036854775807 and 9223372036854775807',' t_int between -2147483647 and 2147483647','t_smallint between -32767 and 32767', + 't_tinyint between -127 and 127 ','t_float between -1.7E308 and 1.7E308','t_double between -1.7E308 and 1.7E308', + 't_binary match \'binary\' ','t_binary nmatch \'binarynchar\' ','t_nchar match \'nchar\' ','t_nchar nmatch \'binarynchar\' ', + 't_binary like \'binary%\' ','t_nchar like \'nchar%\' ','(t_binary like \'binary%\' or t_nchar = \'0\' ) ','(t_nchar like \'nchar%\' or t_binary = \'0\' ) ',] + #TD-6201,'t_bool between 0 and 1' + + # tag column where for test union,join | this is not support + self.t_u_where = ['t1.ts < now +1s' , 't2.ts < now +1s','t1.t_bigint >= -9223372036854775807 and t1.t_bigint <= 9223372036854775807 and t2.t_bigint >= -9223372036854775807 and t2.t_bigint <= 9223372036854775807', + 't1.t_int <= 2147483647 and t1.t_int >= -2147483647 and t2.t_int <= 2147483647 and t2.t_int >= -2147483647', + 't1.t_smallint >= -32767 and t1.t_smallint <= 32767 and t2.t_smallint >= -32767 and t2.t_smallint <= 32767', + 't1.t_tinyint >= -127 and t1.t_tinyint <= 127 and t2.t_tinyint >= -127 and t2.t_tinyint <= 127', + 't1.t_float >= -1.7E308 and t1.t_float <= 1.7E308 and t2.t_float >= -1.7E308 and t2.t_float <= 1.7E308', + 't1.t_double >= -1.7E308 and t1.t_double <= 1.7E308 and t2.t_double >= -1.7E308 and t2.t_double <= 1.7E308', + '(t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\') ' , + '(t1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' )' , '(t1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false)' , + 't1.t_bool in (0 , 1) and t2.t_bool in (0 , 1)' , 't1.t_bool in ( true , false) and t2.t_bool in ( true , false)' , '(t1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1)', + 't1.t_bigint between -9223372036854775807 and 9223372036854775807 and t2.t_bigint between -9223372036854775807 and 9223372036854775807', + 't1.t_int between -2147483647 and 2147483647 and t2.t_int between -2147483647 and 2147483647', + 't1.t_smallint between -32767 and 32767 and t2.t_smallint between -32767 and 32767', + '(t1.t_tinyint between -127 and 127 and t2.t_tinyint between -127 and 127) ','t1.t_float between -1.7E308 and 1.7E308 and t2.t_float between -1.7E308 and 1.7E308', + '(t1.t_double between -1.7E308 and 1.7E308 and t2.t_double between -1.7E308 and 1.7E308)'] + #TD-6201,'t1.t_bool between 0 and 1 or t2.q_bool between 0 and 1'] + + self.t_u_or_where = ['(t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\' )' , + '(t1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' )' , '(t1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false)' , + '(t1.t_bool in (0 , 1) or t2.t_bool in (0 , 1))' , '(t1.t_bool in ( true , false) or t2.t_bool in ( true , false))' , '(t1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1)', + '(t1.t_bigint between -9223372036854775807 and 9223372036854775807 or t2.t_bigint between -9223372036854775807 and 9223372036854775807)', + '(t1.t_int between -2147483647 and 2147483647 or t2.t_int between -2147483647 and 2147483647)', + '(t1.t_smallint between -32767 and 32767 or t2.t_smallint between -32767 and 32767)', + '(t1.t_tinyint between -127 and 127 or t2.t_tinyint between -127 and 127 )','(t1.t_float between -1.7E308 and 1.7E308 or t2.t_float between -1.7E308 and 1.7E308)', + '(t1.t_double between -1.7E308 and 1.7E308 or t2.t_double between -1.7E308 and 1.7E308)'] + + # regular and tag column where + self.qt_where = self.q_where + self.t_where + self.qt_u_where = self.q_u_where + self.t_u_where + # now,qt_u_or_where is not support + self.qt_u_or_where = self.q_u_or_where + self.t_u_or_where + + # tag column where for test super join | this is support , 't1.t_bool = t2.t_bool ' ??? + self.t_join_where = ['t1.t_bigint = t2.t_bigint ', 't1.t_int = t2.t_int ', 't1.t_smallint = t2.t_smallint ', 't1.t_tinyint = t2.t_tinyint ', + 't1.t_float = t2.t_float ', 't1.t_double = t2.t_double ', 't1.t_binary = t2.t_binary ' , 't1.t_nchar = t2.t_nchar ' ] + + # session && fill + self.session_where = ['session(ts,10a)' , 'session(ts,10s)', 'session(ts,10m)' , 'session(ts,10h)','session(ts,10d)' , 'session(ts,10w)'] + self.session_u_where = ['session(t1.ts,10a)' , 'session(t1.ts,10s)', 'session(t1.ts,10m)' , 'session(t1.ts,10h)','session(t1.ts,10d)' , 'session(t1.ts,10w)', + 'session(t2.ts,10a)' , 'session(t2.ts,10s)', 'session(t2.ts,10m)' , 'session(t2.ts,10h)','session(t2.ts,10d)' , 'session(t2.ts,10w)'] + + self.fill_where = ['FILL(NONE)','FILL(PREV)','FILL(NULL)','FILL(LINEAR)','FILL(NEXT)','FILL(VALUE, 1.23)'] + + self.state_window = ['STATE_WINDOW(q_tinyint)','STATE_WINDOW(q_bigint)','STATE_WINDOW(q_int)','STATE_WINDOW(q_bool)','STATE_WINDOW(q_smallint)'] + self.state_u_window = ['STATE_WINDOW(t1.q_tinyint)','STATE_WINDOW(t1.q_bigint)','STATE_WINDOW(t1.q_int)','STATE_WINDOW(t1.q_bool)','STATE_WINDOW(t1.q_smallint)', + 'STATE_WINDOW(t2.q_tinyint)','STATE_WINDOW(t2.q_bigint)','STATE_WINDOW(t2.q_int)','STATE_WINDOW(t2.q_bool)','STATE_WINDOW(t2.q_smallint)'] + + # order by where + self.order_where = ['order by ts' , 'order by ts asc'] + self.order_u_where = ['order by t1.ts' , 'order by t1.ts asc' , 'order by t2.ts' , 'order by t2.ts asc'] + self.order_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' ] + self.orders_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' , 'order by loc' , 'order by loc asc' , 'order by loc desc'] + + self.group_where = ['group by tbname , loc' , 'group by tbname', 'group by tbname, t_bigint', 'group by tbname,t_int', 'group by tbname, t_smallint', 'group by tbname,t_tinyint', + 'group by tbname,t_float', 'group by tbname,t_double' , 'group by tbname,t_binary', 'group by tbname,t_nchar', 'group by tbname,t_bool' ,'group by tbname ,loc ,t_bigint', + 'group by tbname,t_binary ,t_nchar ,t_bool' , 'group by tbname,t_int ,t_smallint ,t_tinyint' , 'group by tbname,t_float ,t_double ' , + 'PARTITION BY tbname , loc' , 'PARTITION BY tbname', 'PARTITION BY tbname, t_bigint', 'PARTITION BY tbname,t_int', 'PARTITION BY tbname, t_smallint', 'PARTITION BY tbname,t_tinyint', + 'PARTITION BY tbname,t_float', 'PARTITION BY tbname,t_double' , 'PARTITION BY tbname,t_binary', 'PARTITION BY tbname,t_nchar', 'PARTITION BY tbname,t_bool' ,'PARTITION BY tbname ,loc ,t_bigint', + 'PARTITION BY tbname,t_binary ,t_nchar ,t_bool' , 'PARTITION BY tbname,t_int ,t_smallint ,t_tinyint' , 'PARTITION BY tbname,t_float ,t_double '] + self.group_where_j = ['group by t1.loc' , 'group by t1.t_bigint', 'group by t1.t_int', 'group by t1.t_smallint', 'group by t1.t_tinyint', + 'group by t1.t_float', 'group by t1.t_double' , 'group by t1.t_binary', 'group by t1.t_nchar', 'group by t1.t_bool' ,'group by t1.loc ,t1.t_bigint', + 'group by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'group by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'group by t1.t_float ,t1.t_double ' , + 'PARTITION BY t1.loc' , 'PARTITION by t1.t_bigint', 'PARTITION by t1.t_int', 'PARTITION by t1.t_smallint', 'PARTITION by t1.t_tinyint', + 'PARTITION by t1.t_float', 'PARTITION by t1.t_double' , 'PARTITION by t1.t_binary', 'PARTITION by t1.t_nchar', 'PARTITION by t1.t_bool' ,'PARTITION BY t1.loc ,t1.t_bigint', + 'PARTITION by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'PARTITION by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'PARTITION by t1.t_float ,t1.t_double ', + 'group by t2.loc' , 'group by t2.t_bigint', 'group by t2.t_int', 'group by t2.t_smallint', 'group by t2.t_tinyint', + 'group by t2.t_float', 'group by t2.t_double' , 'group by t2.t_binary', 'group by t2.t_nchar', 'group by t2.t_bool' ,'group by t2.loc ,t2.t_bigint', + 'group by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'group by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'group by t2.t_float ,t2.t_double ' , + 'PARTITION BY t2.loc' , 'PARTITION by t2.t_bigint', 'PARTITION by t2.t_int', 'PARTITION by t2.t_smallint', 'PARTITION by t2.t_tinyint', + 'PARTITION by t2.t_float', 'PARTITION by t2.t_double' , 'PARTITION by t2.t_binary', 'PARTITION by t2.t_nchar', 'PARTITION by t2.t_bool' ,'PARTITION BY t2.loc ,t2.t_bigint', + 'PARTITION by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'PARTITION by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'PARTITION by t2.t_float ,t2.t_double '] + + self.partiton_where = ['PARTITION BY tbname , loc' , 'PARTITION BY tbname', 'PARTITION BY tbname, t_bigint', 'PARTITION BY tbname,t_int', 'PARTITION BY tbname, t_smallint', 'PARTITION BY tbname,t_tinyint', + 'PARTITION BY tbname,t_float', 'PARTITION BY tbname,t_double' , 'PARTITION BY tbname,t_binary', 'PARTITION BY tbname,t_nchar', 'PARTITION BY tbname,t_bool' ,'PARTITION BY tbname ,loc ,t_bigint', + 'PARTITION BY tbname,t_binary ,t_nchar ,t_bool' , 'PARTITION BY tbname,t_int ,t_smallint ,t_tinyint' , 'PARTITION BY tbname,t_float ,t_double '] + self.partiton_where_j = ['PARTITION BY t1.loc' , 'PARTITION by t1.t_bigint', 'PARTITION by t1.t_int', 'PARTITION by t1.t_smallint', 'PARTITION by t1.t_tinyint', + 'PARTITION by t1.t_float', 'PARTITION by t1.t_double' , 'PARTITION by t1.t_binary', 'PARTITION by t1.t_nchar', 'PARTITION by t1.t_bool' ,'PARTITION BY t1.loc ,t1.t_bigint', + 'PARTITION by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'PARTITION by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'PARTITION by t1.t_float ,t1.t_double ', + 'PARTITION BY t2.loc' , 'PARTITION by t2.t_bigint', 'PARTITION by t2.t_int', 'PARTITION by t2.t_smallint', 'PARTITION by t2.t_tinyint', + 'PARTITION by t2.t_float', 'PARTITION by t2.t_double' , 'PARTITION by t2.t_binary', 'PARTITION by t2.t_nchar', 'PARTITION by t2.t_bool' ,'PARTITION BY t2.loc ,t2.t_bigint', + 'PARTITION by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'PARTITION by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'PARTITION by t2.t_float ,t2.t_double '] + + + self.group_where_regular = ['group by tbname ' , 'group by tbname', 'group by tbname, q_bigint', 'group by tbname,q_int', 'group by tbname, q_smallint', 'group by tbname,q_tinyint', + 'group by tbname,q_float', 'group by tbname,q_double' , 'group by tbname,q_binary', 'group by tbname,q_nchar', 'group by tbname,q_bool' ,'group by tbname ,q_bigint', + 'group by tbname,q_binary ,q_nchar ,q_bool' , 'group by tbname,q_int ,q_smallint ,q_tinyint' , 'group by tbname,q_float ,q_double ' , + 'PARTITION BY tbname ' , 'PARTITION BY tbname', 'PARTITION BY tbname, q_bigint', 'PARTITION BY tbname,q_int', 'PARTITION BY tbname, q_smallint', 'PARTITION BY tbname,q_tinyint', + 'PARTITION BY tbname,q_float', 'PARTITION BY tbname,q_double' , 'PARTITION BY tbname,q_binary', 'PARTITION BY tbname,q_nchar', 'PARTITION BY tbname,q_bool' ,'PARTITION BY tbname ,q_bigint', + 'PARTITION BY tbname,q_binary ,q_nchar ,q_bool' , 'PARTITION BY tbname,q_int ,q_smallint ,q_tinyint' , 'PARTITION BY tbname,q_float ,q_double '] + self.group_where_regular_j = ['group by t1.q_bigint', 'group by t1.q_int', 'group by t1.q_smallint', 'group by t1.q_tinyint', + 'group by t1.q_float', 'group by t1.q_double' , 'group by t1.q_binary', 'group by t1.q_nchar', 'group by t1.q_bool' ,'group by t1.q_bigint', + 'group by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'group by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'group by t1.q_float ,t1.q_double ' , + 'PARTITION by t1.q_bigint', 'PARTITION by t1.q_int', 'PARTITION by t1.q_smallint', 'PARTITION by t1.q_tinyint', + 'PARTITION by t1.q_float', 'PARTITION by t1.q_double' , 'PARTITION by t1.q_binary', 'PARTITION by t1.q_nchar', 'PARTITION by t1.q_bool' ,'PARTITION BY t1.q_bigint', + 'PARTITION by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'PARTITION by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'PARTITION by t1.q_float ,t1.q_double ', + 'group by t2.q_bigint', 'group by t2.q_int', 'group by t2.q_smallint', 'group by t2.q_tinyint', + 'group by t2.q_float', 'group by t2.q_double' , 'group by t2.q_binary', 'group by t2.q_nchar', 'group by t2.q_bool' ,'group by t2.q_bigint', + 'group by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'group by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'group by t2.q_float ,t2.q_double ' , + 'PARTITION by t2.q_bigint', 'PARTITION by t2.q_int', 'PARTITION by t2.q_smallint', 'PARTITION by t2.q_tinyint', + 'PARTITION by t2.q_float', 'PARTITION by t2.q_double' , 'PARTITION by t2.q_binary', 'PARTITION by t2.q_nchar', 'PARTITION by t2.q_bool' ,'PARTITION BY t2.q_bigint', + 'PARTITION by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'PARTITION by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'PARTITION by t2.q_float ,t2.q_double '] + + self.partiton_where_regular = ['PARTITION BY tbname ' , 'PARTITION BY tbname', 'PARTITION BY tbname, q_bigint', 'PARTITION BY tbname,q_int', 'PARTITION BY tbname, q_smallint', 'PARTITION BY tbname,q_tinyint', + 'PARTITION BY tbname,q_float', 'PARTITION BY tbname,q_double' , 'PARTITION BY tbname,q_binary', 'PARTITION BY tbname,q_nchar', 'PARTITION BY tbname,q_bool' ,'PARTITION BY tbname ,q_bigint', + 'PARTITION BY tbname,q_binary ,q_nchar ,q_bool' , 'PARTITION BY tbname,q_int ,q_smallint ,q_tinyint' , 'PARTITION BY tbname,q_float ,q_double '] + self.partiton_where_regular_j = ['PARTITION by t1.q_bigint', 'PARTITION by t1.q_int', 'PARTITION by t1.q_smallint', 'PARTITION by t1.q_tinyint', + 'PARTITION by t1.q_float', 'PARTITION by t1.q_double' , 'PARTITION by t1.q_binary', 'PARTITION by t1.q_nchar', 'PARTITION by t1.q_bool' ,'PARTITION BY t1.q_bigint', + 'PARTITION by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'PARTITION by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'PARTITION by t1.q_float ,t1.q_double ', + 'PARTITION by t2.q_bigint', 'PARTITION by t2.q_int', 'PARTITION by t2.q_smallint', 'PARTITION by t2.q_tinyint', + 'PARTITION by t2.q_float', 'PARTITION by t2.q_double' , 'PARTITION by t2.q_binary', 'PARTITION by t2.q_nchar', 'PARTITION by t2.q_bool' ,'PARTITION BY t2.q_bigint', + 'PARTITION by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'PARTITION by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'PARTITION by t2.q_float ,t2.q_double '] + + self.having_support = ['having count(q_int) > 0','having count(q_bigint) > 0','having count(q_smallint) > 0','having count(q_tinyint) > 0','having count(q_float) > 0','having count(q_double) > 0','having count(q_bool) > 0', + 'having avg(q_int) > 0','having avg(q_bigint) > 0','having avg(q_smallint) > 0','having avg(q_tinyint) > 0','having avg(q_float) > 0','having avg(q_double) > 0', + 'having sum(q_int) > 0','having sum(q_bigint) > 0','having sum(q_smallint) > 0','having sum(q_tinyint) > 0','having sum(q_float) > 0','having sum(q_double) > 0', + 'having STDDEV(q_int) > 0','having STDDEV(q_bigint) > 0','having STDDEV(q_smallint) > 0','having STDDEV(q_tinyint) > 0','having STDDEV(q_float) > 0','having STDDEV(q_double) > 0', + 'having TWA(q_int) > 0','having TWA(q_bigint) > 0','having TWA(q_smallint) > 0','having TWA(q_tinyint) > 0','having TWA(q_float) > 0','having TWA(q_double) > 0', + 'having IRATE(q_int) > 0','having IRATE(q_bigint) > 0','having IRATE(q_smallint) > 0','having IRATE(q_tinyint) > 0','having IRATE(q_float) > 0','having IRATE(q_double) > 0', + 'having MIN(q_int) > 0','having MIN(q_bigint) > 0','having MIN(q_smallint) > 0','having MIN(q_tinyint) > 0','having MIN(q_float) > 0','having MIN(q_double) > 0', + 'having MAX(q_int) > 0','having MAX(q_bigint) > 0','having MAX(q_smallint) > 0','having MAX(q_tinyint) > 0','having MAX(q_float) > 0','having MAX(q_double) > 0', + 'having FIRST(q_int) > 0','having FIRST(q_bigint) > 0','having FIRST(q_smallint) > 0','having FIRST(q_tinyint) > 0','having FIRST(q_float) > 0','having FIRST(q_double) > 0', + 'having LAST(q_int) > 0','having LAST(q_bigint) > 0','having LAST(q_smallint) > 0','having LAST(q_tinyint) > 0','having LAST(q_float) > 0','having LAST(q_double) > 0', + 'having APERCENTILE(q_int,10) > 0','having APERCENTILE(q_bigint,10) > 0','having APERCENTILE(q_smallint,10) > 0','having APERCENTILE(q_tinyint,10) > 0','having APERCENTILE(q_float,10) > 0','having APERCENTILE(q_double,10) > 0'] + self.having_not_support = ['having TOP(q_int,10) > 0','having TOP(q_bigint,10) > 0','having TOP(q_smallint,10) > 0','having TOP(q_tinyint,10) > 0','having TOP(q_float,10) > 0','having TOP(q_double,10) > 0','having TOP(q_bool,10) > 0', + 'having BOTTOM(q_int,10) > 0','having BOTTOM(q_bigint,10) > 0','having BOTTOM(q_smallint,10) > 0','having BOTTOM(q_tinyint,10) > 0','having BOTTOM(q_float,10) > 0','having BOTTOM(q_double,10) > 0','having BOTTOM(q_bool,10) > 0', + 'having LEASTSQUARES(q_int) > 0','having LEASTSQUARES(q_bigint) > 0','having LEASTSQUARES(q_smallint) > 0','having LEASTSQUARES(q_tinyint) > 0','having LEASTSQUARES(q_float) > 0','having LEASTSQUARES(q_double) > 0','having LEASTSQUARES(q_bool) > 0', + 'having FIRST(q_bool) > 0','having IRATE(q_bool) > 0','having PERCENTILE(q_bool,10) > 0','having avg(q_bool) > 0','having LAST_ROW(q_bool) > 0','having sum(q_bool) > 0','having STDDEV(q_bool) > 0','having APERCENTILE(q_bool,10) > 0','having TWA(q_bool) > 0','having LAST(q_bool) > 0', + 'having PERCENTILE(q_int,10) > 0','having PERCENTILE(q_bigint,10) > 0','having PERCENTILE(q_smallint,10) > 0','having PERCENTILE(q_tinyint,10) > 0','having PERCENTILE(q_float,10) > 0','having PERCENTILE(q_double,10) > 0'] + self.having_tagnot_support = ['having LAST_ROW(q_int) > 0','having LAST_ROW(q_bigint) > 0','having LAST_ROW(q_smallint) > 0','having LAST_ROW(q_tinyint) > 0','having LAST_ROW(q_float) > 0','having LAST_ROW(q_double) > 0'] + + self.having_support_j = ['having count(t1.q_int) > 0','having count(t1.q_bigint) > 0','having count(t1.q_smallint) > 0','having count(t1.q_tinyint) > 0','having count(t1.q_float) > 0','having count(t1.q_double) > 0','having count(t1.q_bool) > 0', + 'having avg(t1.q_int) > 0','having avg(t1.q_bigint) > 0','having avg(t1.q_smallint) > 0','having avg(t1.q_tinyint) > 0','having avg(t1.q_float) > 0','having avg(t1.q_double) > 0', + 'having sum(t1.q_int) > 0','having sum(t1.q_bigint) > 0','having sum(t1.q_smallint) > 0','having sum(t1.q_tinyint) > 0','having sum(t1.q_float) > 0','having sum(t1.q_double) > 0', + 'having STDDEV(t1.q_int) > 0','having STDDEV(t1.q_bigint) > 0','having STDDEV(t1.q_smallint) > 0','having STDDEV(t1.q_tinyint) > 0','having STDDEV(t1.q_float) > 0','having STDDEV(t1.q_double) > 0', + 'having TWA(t1.q_int) > 0','having TWA(t1.q_bigint) > 0','having TWA(t1.q_smallint) > 0','having TWA(t1.q_tinyint) > 0','having TWA(t1.q_float) > 0','having TWA(t1.q_double) > 0', + 'having IRATE(t1.q_int) > 0','having IRATE(t1.q_bigint) > 0','having IRATE(t1.q_smallint) > 0','having IRATE(t1.q_tinyint) > 0','having IRATE(t1.q_float) > 0','having IRATE(t1.q_double) > 0', + 'having MIN(t1.q_int) > 0','having MIN(t1.q_bigint) > 0','having MIN(t1.q_smallint) > 0','having MIN(t1.q_tinyint) > 0','having MIN(t1.q_float) > 0','having MIN(t1.q_double) > 0', + 'having MAX(t1.q_int) > 0','having MAX(t1.q_bigint) > 0','having MAX(t1.q_smallint) > 0','having MAX(t1.q_tinyint) > 0','having MAX(t1.q_float) > 0','having MAX(t1.q_double) > 0', + 'having FIRST(t1.q_int) > 0','having FIRST(t1.q_bigint) > 0','having FIRST(t1.q_smallint) > 0','having FIRST(t1.q_tinyint) > 0','having FIRST(t1.q_float) > 0','having FIRST(t1.q_double) > 0', + 'having LAST(t1.q_int) > 0','having LAST(t1.q_bigint) > 0','having LAST(t1.q_smallint) > 0','having LAST(t1.q_tinyint) > 0','having LAST(t1.q_float) > 0','having LAST(t1.q_double) > 0', + 'having APERCENTILE(t1.q_int,10) > 0','having APERCENTILE(t1.q_bigint,10) > 0','having APERCENTILE(t1.q_smallint,10) > 0','having APERCENTILE(t1.q_tinyint,10) > 0','having APERCENTILE(t1.q_float,10) > 0','having APERCENTILE(t1.q_double,10) > 0'] + + # limit offset where + self.limit_where = ['limit 1 offset 1' , 'limit 1' , 'limit 2 offset 1' , 'limit 2', 'limit 12 offset 1' , 'limit 20', 'limit 20 offset 10' , 'limit 200'] + self.limit1_where = ['limit 1 offset 1' , 'limit 1' ] + self.limit_u_where = ['limit 100 offset 10' , 'limit 50' , 'limit 100' , 'limit 10' ] + + # slimit soffset where + self.slimit_where = ['slimit 1 soffset 1' , 'slimit 1' , 'slimit 2 soffset 1' , 'slimit 2'] + self.slimit1_where = ['slimit 2 soffset 1' , 'slimit 1' ] + + # aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\] + # select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile] + # calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\] + # **_ns_** express is not support stable, therefore, separated from regular tables + # calc_select_all calc_select_regular calc_select_in_ts calc_select_fill calc_select_not_interval + # calc_aggregate_all calc_aggregate_regular calc_aggregate_groupbytbname + # calc_calculate_all calc_calculate_regular calc_calculate_groupbytbname + + # calc_select_all calc_select_regular calc_select_in_ts calc_select_fill calc_select_not_interval + # select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile] + + self.calc_select_all = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' , + 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' , + 'first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' , + 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' , + 'min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' , + 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' , + 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' , + 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , + 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)'] + + self.calc_select_in_ts = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' , + 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' , + 'first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' , + 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' ] + + self.calc_select_in = ['min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' , + 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' , + 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' , + 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , + 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)'] + + self.calc_select_not_support_ts = ['first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' , + 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' , + 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' , + 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)', + 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)'] + + self.calc_select_support_ts = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' , + 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' , + 'min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' , + 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' ] + + self.calc_select_regular = [ 'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)'] + + + self.calc_select_fill = ['INTERP(q_int)' ,'INTERP(q_bigint)' ,'INTERP(q_smallint)' ,'INTERP(q_tinyint)', 'INTERP(q_float)' ,'INTERP(q_double)'] + self.interp_where = ['ts = now' , 'ts = \'2020-09-13 20:26:40.000\'' , 'ts = \'2020-09-13 20:26:40.009\'' ,'tbname in (\'table_1\') and ts = now' ,'tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and ts = \'2020-09-13 20:26:40.000\'','tbname like \'table%\' and ts = \'2020-09-13 20:26:40.002\''] + + #two table join + self.calc_select_in_ts_j = ['bottom(t1.q_int,20)' , 'bottom(t1.q_bigint,20)' , 'bottom(t1.q_smallint,20)' , 'bottom(t1.q_tinyint,20)' ,'bottom(t1.q_float,20)' , 'bottom(t1.q_double,20)' , + 'top(t1.q_int,20)' , 'top(t1.q_bigint,20)' , 'top(t1.q_smallint,20)' ,'top(t1.q_tinyint,20)' ,'top(t1.q_float,20)' ,'top(t1.q_double,20)' , + 'first(t1.q_int)' , 'first(t1.q_bigint)' , 'first(t1.q_smallint)' , 'first(t1.q_tinyint)' , 'first(t1.q_float)' ,'first(t1.q_double)' ,'first(t1.q_binary)' ,'first(t1.q_nchar)' ,'first(t1.q_bool)' ,'first(t1.q_ts)' , + 'last(t1.q_int)' , 'last(t1.q_bigint)' , 'last(t1.q_smallint)' , 'last(t1.q_tinyint)' , 'last(t1.q_float)' ,'last(t1.q_double)' , 'last(t1.q_binary)' ,'last(t1.q_nchar)' ,'last(t1.q_bool)' ,'last(t1.q_ts)' , + 'bottom(t2.q_int,20)' , 'bottom(t2.q_bigint,20)' , 'bottom(t2.q_smallint,20)' , 'bottom(t2.q_tinyint,20)' ,'bottom(t2.q_float,20)' , 'bottom(t2.q_double,20)' , + 'top(t2.q_int,20)' , 'top(t2.q_bigint,20)' , 'top(t2.q_smallint,20)' ,'top(t2.q_tinyint,20)' ,'top(t2.q_float,20)' ,'top(t2.q_double,20)' , + 'first(t2.q_int)' , 'first(t2.q_bigint)' , 'first(t2.q_smallint)' , 'first(t2.q_tinyint)' , 'first(t2.q_float)' ,'first(t2.q_double)' ,'first(t2.q_binary)' ,'first(t2.q_nchar)' ,'first(t2.q_bool)' ,'first(t2.q_ts)' , + 'last(t2.q_int)' , 'last(t2.q_bigint)' , 'last(t2.q_smallint)' , 'last(t2.q_tinyint)' , 'last(t2.q_float)' ,'last(t2.q_double)' , 'last(t2.q_binary)' ,'last(t2.q_nchar)' ,'last(t2.q_bool)' ,'last(t2.q_ts)'] + + self.calc_select_in_support_ts_j = ['bottom(t1.q_int,20)' , 'bottom(t1.q_bigint,20)' , 'bottom(t1.q_smallint,20)' , 'bottom(t1.q_tinyint,20)' ,'bottom(t1.q_float,20)' , 'bottom(t1.q_double,20)' , + 'top(t1.q_int,20)' , 'top(t1.q_bigint,20)' , 'top(t1.q_smallint,20)' ,'top(t1.q_tinyint,20)' ,'top(t1.q_float,20)' ,'top(t1.q_double,20)' , + 'min(t1.q_int)' , 'min(t1.q_bigint)' , 'min(t1.q_smallint)' , 'min(t1.q_tinyint)' , 'min(t1.q_float)' ,'min(t1.q_double)' , + 'max(t1.q_int)' , 'max(t1.q_bigint)' , 'max(t1.q_smallint)' , 'max(t1.q_tinyint)' ,'max(t1.q_float)' ,'max(t1.q_double)' , + 'bottom(t2.q_int,20)' , 'bottom(t2.q_bigint,20)' , 'bottom(t2.q_smallint,20)' , 'bottom(t2.q_tinyint,20)' ,'bottom(t2.q_float,20)' , 'bottom(t2.q_double,20)' , + 'top(t2.q_int,20)' , 'top(t2.q_bigint,20)' , 'top(t2.q_smallint,20)' ,'top(t2.q_tinyint,20)' ,'top(t2.q_float,20)' ,'top(t2.q_double,20)' , + 'min(t2.q_int)' , 'min(t2.q_bigint)' , 'min(t2.q_smallint)' , 'min(t2.q_tinyint)' , 'min(t2.q_float)' ,'min(t2.q_double)' , + 'max(t2.q_int)' , 'max(t2.q_bigint)' , 'max(t2.q_smallint)' , 'max(t2.q_tinyint)' ,'max(t2.q_float)' ,'max(t2.q_double)' , + ] + + self.calc_select_in_not_support_ts_j = ['apercentile(t1.q_int,20)' , 'apercentile(t1.q_bigint,20)' ,'apercentile(t1.q_smallint,20)' ,'apercentile(t1.q_tinyint,20)' ,'apercentile(t1.q_float,20)' ,'apercentile(t1.q_double,20)' , + 'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' , + 'last_row(t1.q_double)' , 'last_row(t1.q_bool)' ,'last_row(t1.q_binary)' ,'last_row(t1.q_nchar)' ,'last_row(t1.q_ts)' , + 'apercentile(t2.q_int,20)' , 'apercentile(t2.q_bigint,20)' ,'apercentile(t2.q_smallint,20)' ,'apercentile(t2.q_tinyint,20)' ,'apercentile(t2.q_float,20)' ,'apercentile(t2.q_double,20)' , + 'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' , + 'last_row(t2.q_double)' , 'last_row(t2.q_bool)' ,'last_row(t2.q_binary)' ,'last_row(t2.q_nchar)' ,'last_row(t2.q_ts)'] + + self.calc_select_in_j = ['min(t1.q_int)' , 'min(t1.q_bigint)' , 'min(t1.q_smallint)' , 'min(t1.q_tinyint)' , 'min(t1.q_float)' ,'min(t1.q_double)' , + 'max(t1.q_int)' , 'max(t1.q_bigint)' , 'max(t1.q_smallint)' , 'max(t1.q_tinyint)' ,'max(t1.q_float)' ,'max(t1.q_double)' , + 'apercentile(t1.q_int,20)' , 'apercentile(t1.q_bigint,20)' ,'apercentile(t1.q_smallint,20)' ,'apercentile(t1.q_tinyint,20)' ,'apercentile(t1.q_float,20)' ,'apercentile(t1.q_double,20)' , + 'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' , + 'last_row(t1.q_double)' , 'last_row(t1.q_bool)' ,'last_row(t1.q_binary)' ,'last_row(t1.q_nchar)' ,'last_row(t1.q_ts)' , + 'min(t2.q_int)' , 'min(t2.q_bigint)' , 'min(t2.q_smallint)' , 'min(t2.q_tinyint)' , 'min(t2.q_float)' ,'min(t2.q_double)' , + 'max(t2.q_int)' , 'max(t2.q_bigint)' , 'max(t2.q_smallint)' , 'max(t2.q_tinyint)' ,'max(t2.q_float)' ,'max(t2.q_double)' , + 'apercentile(t2.q_int,20)' , 'apercentile(t2.q_bigint,20)' ,'apercentile(t2.q_smallint,20)' ,'apercentile(t2.q_tinyint,20)' ,'apercentile(t2.q_float,20)' ,'apercentile(t2.q_double,20)' , + 'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' , + 'last_row(t2.q_double)' , 'last_row(t2.q_bool)' ,'last_row(t2.q_binary)' ,'last_row(t2.q_nchar)' ,'last_row(t2.q_ts)'] + self.calc_select_all_j = self.calc_select_in_ts_j + self.calc_select_in_j + + self.calc_select_regular_j = [ 'PERCENTILE(t1.q_int,10)' ,'PERCENTILE(t1.q_bigint,20)' , 'PERCENTILE(t1.q_smallint,30)' ,'PERCENTILE(t1.q_tinyint,40)' ,'PERCENTILE(t1.q_float,50)' ,'PERCENTILE(t1.q_double,60)' , + 'PERCENTILE(t2.q_int,10)' ,'PERCENTILE(t2.q_bigint,20)' , 'PERCENTILE(t2.q_smallint,30)' ,'PERCENTILE(t2.q_tinyint,40)' ,'PERCENTILE(t2.q_float,50)' ,'PERCENTILE(t2.q_double,60)'] + + + self.calc_select_fill_j = ['INTERP(t1.q_int)' ,'INTERP(t1.q_bigint)' ,'INTERP(t1.q_smallint)' ,'INTERP(t1.q_tinyint)', 'INTERP(t1.q_float)' ,'INTERP(t1.q_double)' , + 'INTERP(t2.q_int)' ,'INTERP(t2.q_bigint)' ,'INTERP(t2.q_smallint)' ,'INTERP(t2.q_tinyint)', 'INTERP(t2.q_float)' ,'INTERP(t2.q_double)'] + self.interp_where_j = ['t1.ts = now' , 't1.ts = \'2020-09-13 20:26:40.000\'' , 't1.ts = \'2020-09-13 20:26:40.009\'' ,'t2.ts = now' , 't2.ts = \'2020-09-13 20:26:40.000\'' , 't2.ts = \'2020-09-13 20:26:40.009\'' , + 't1.tbname in (\'table_1\') and t1.ts = now' ,'t1.tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and t1.ts = \'2020-09-13 20:26:40.000\'','t1.tbname like \'table%\' and t1.ts = \'2020-09-13 20:26:40.002\'', + 't2.tbname in (\'table_1\') and t2.ts = now' ,'t2.tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and t2.ts = \'2020-09-13 20:26:40.000\'','t2.tbname like \'table%\' and t2.ts = \'2020-09-13 20:26:40.002\''] + + # calc_aggregate_all calc_aggregate_regular calc_aggregate_groupbytbname APERCENTILE\PERCENTILE + # aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\] + self.calc_aggregate_all = ['count(*)' , 'count(q_int)' ,'count(q_bigint)' , 'count(q_smallint)' ,'count(q_tinyint)' ,'count(q_float)' , + 'count(q_double)' ,'count(q_binary)' ,'count(q_nchar)' ,'count(q_bool)' ,'count(q_ts)' , + 'avg(q_int)' ,'avg(q_bigint)' , 'avg(q_smallint)' ,'avg(q_tinyint)' ,'avg(q_float)' ,'avg(q_double)' , + 'sum(q_int)' ,'sum(q_bigint)' , 'sum(q_smallint)' ,'sum(q_tinyint)' ,'sum(q_float)' ,'sum(q_double)' , + 'STDDEV(q_int)' ,'STDDEV(q_bigint)' , 'STDDEV(q_smallint)' ,'STDDEV(q_tinyint)' ,'STDDEV(q_float)' ,'STDDEV(q_double)', + 'APERCENTILE(q_int,10)' ,'APERCENTILE(q_bigint,20)' , 'APERCENTILE(q_smallint,30)' ,'APERCENTILE(q_tinyint,40)' ,'APERCENTILE(q_float,50)' ,'APERCENTILE(q_double,60)'] + + self.calc_aggregate_regular = ['twa(q_int)' ,'twa(q_bigint)' , 'twa(q_smallint)' ,'twa(q_tinyint)' ,'twa (q_float)' ,'twa(q_double)' , + 'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' , + 'LEASTSQUARES(q_int,15,3)' , 'LEASTSQUARES(q_bigint,10,1)' , 'LEASTSQUARES(q_smallint,20,3)' ,'LEASTSQUARES(q_tinyint,10,4)' ,'LEASTSQUARES(q_float,6,4)' ,'LEASTSQUARES(q_double,3,1)' , + 'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)'] + + self.calc_aggregate_groupbytbname = ['twa(q_int)' ,'twa(q_bigint)' , 'twa(q_smallint)' ,'twa(q_tinyint)' ,'twa (q_float)' ,'twa(q_double)' , + 'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' ] + + #two table join + self.calc_aggregate_all_j = ['count(t1.*)' , 'count(t1.q_int)' ,'count(t1.q_bigint)' , 'count(t1.q_smallint)' ,'count(t1.q_tinyint)' ,'count(t1.q_float)' , + 'count(t1.q_double)' ,'count(t1.q_binary)' ,'count(t1.q_nchar)' ,'count(t1.q_bool)' ,'count(t1.q_ts)' , + 'avg(t1.q_int)' ,'avg(t1.q_bigint)' , 'avg(t1.q_smallint)' ,'avg(t1.q_tinyint)' ,'avg(t1.q_float)' ,'avg(t1.q_double)' , + 'sum(t1.q_int)' ,'sum(t1.q_bigint)' , 'sum(t1.q_smallint)' ,'sum(t1.q_tinyint)' ,'sum(t1.q_float)' ,'sum(t1.q_double)' , + 'STDDEV(t1.q_int)' ,'STDDEV(t1.q_bigint)' , 'STDDEV(t1.q_smallint)' ,'STDDEV(t1.q_tinyint)' ,'STDDEV(t1.q_float)' ,'STDDEV(t1.q_double)', + 'APERCENTILE(t1.q_int,10)' ,'APERCENTILE(t1.q_bigint,20)' , 'APERCENTILE(t1.q_smallint,30)' ,'APERCENTILE(t1.q_tinyint,40)' ,'APERCENTILE(t1.q_float,50)' ,'APERCENTILE(t1.q_double,60)' , + 'count(t2.*)' , 'count(t2.q_int)' ,'count(t2.q_bigint)' , 'count(t2.q_smallint)' ,'count(t2.q_tinyint)' ,'count(t2.q_float)' , + 'count(t2.q_double)' ,'count(t2.q_binary)' ,'count(t2.q_nchar)' ,'count(t2.q_bool)' ,'count(t2.q_ts)' , + 'avg(t2.q_int)' ,'avg(t2.q_bigint)' , 'avg(t2.q_smallint)' ,'avg(t2.q_tinyint)' ,'avg(t2.q_float)' ,'avg(t2.q_double)' , + 'sum(t2.q_int)' ,'sum(t2.q_bigint)' , 'sum(t2.q_smallint)' ,'sum(t2.q_tinyint)' ,'sum(t2.q_float)' ,'sum(t2.q_double)' , + 'STDDEV(t2.q_int)' ,'STDDEV(t2.q_bigint)' , 'STDDEV(t2.q_smallint)' ,'STDDEV(t2.q_tinyint)' ,'STDDEV(t2.q_float)' ,'STDDEV(t2.q_double)', + 'APERCENTILE(t2.q_int,10)' ,'APERCENTILE(t2.q_bigint,20)' , 'APERCENTILE(t2.q_smallint,30)' ,'APERCENTILE(t2.q_tinyint,40)' ,'APERCENTILE(t2.q_float,50)' ,'APERCENTILE(t2.q_double,60)'] + + self.calc_aggregate_regular_j = ['twa(t1.q_int)' ,'twa(t1.q_bigint)' , 'twa(t1.q_smallint)' ,'twa(t1.q_tinyint)' ,'twa (t1.q_float)' ,'twa(t1.q_double)' , + 'IRATE(t1.q_int)' ,'IRATE(t1.q_bigint)' , 'IRATE(t1.q_smallint)' ,'IRATE(t1.q_tinyint)' ,'IRATE (t1.q_float)' ,'IRATE(t1.q_double)' , + 'LEASTSQUARES(t1.q_int,15,3)' , 'LEASTSQUARES(t1.q_bigint,10,1)' , 'LEASTSQUARES(t1.q_smallint,20,3)' ,'LEASTSQUARES(t1.q_tinyint,10,4)' ,'LEASTSQUARES(t1.q_float,6,4)' ,'LEASTSQUARES(t1.q_double,3,1)' , + 'PERCENTILE(t1.q_int,10)' ,'PERCENTILE(t1.q_bigint,20)' , 'PERCENTILE(t1.q_smallint,30)' ,'PERCENTILE(t1.q_tinyint,40)' ,'PERCENTILE(t1.q_float,50)' ,'PERCENTILE(t1.q_double,60)' , + 'twa(t2.q_int)' ,'twa(t2.q_bigint)' , 'twa(t2.q_smallint)' ,'twa(t2.q_tinyint)' ,'twa (t2.q_float)' ,'twa(t2.q_double)' , + 'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)', + 'LEASTSQUARES(t2.q_int,15,3)' , 'LEASTSQUARES(t2.q_bigint,10,1)' , 'LEASTSQUARES(t2.q_smallint,20,3)' ,'LEASTSQUARES(t2.q_tinyint,10,4)' ,'LEASTSQUARES(t2.q_float,6,4)' ,'LEASTSQUARES(t2.q_double,3,1)' , + 'PERCENTILE(t2.q_int,10)' ,'PERCENTILE(t2.q_bigint,20)' , 'PERCENTILE(t2.q_smallint,30)' ,'PERCENTILE(t2.q_tinyint,40)' ,'PERCENTILE(t2.q_float,50)' ,'PERCENTILE(t2.q_double,60)'] + + self.calc_aggregate_groupbytbname_j = ['twa(t1.q_int)' ,'twa(t1.q_bigint)' , 'twa(t1.q_smallint)' ,'twa(t1.q_tinyint)' ,'twa (t1.q_float)' ,'twa(t1.q_double)' , + 'IRATE(t1.q_int)' ,'IRATE(t1.q_bigint)' , 'IRATE(t1.q_smallint)' ,'IRATE(t1.q_tinyint)' ,'IRATE (t1.q_float)' ,'IRATE(t1.q_double)' , + 'twa(t2.q_int)' ,'twa(t2.q_bigint)' , 'twa(t2.q_smallint)' ,'twa(t2.q_tinyint)' ,'twa (t2.q_float)' ,'twa(t2.q_double)' , + 'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)' ] + + # calc_calculate_all calc_calculate_regular calc_calculate_groupbytbname + # calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\] + self.calc_calculate_all = ['SPREAD(ts)' , 'SPREAD(q_ts)' , 'SPREAD(q_int)' ,'SPREAD(q_bigint)' , 'SPREAD(q_smallint)' ,'SPREAD(q_tinyint)' ,'SPREAD(q_float)' ,'SPREAD(q_double)' , + '(SPREAD(q_int) + SPREAD(q_bigint))' , '(SPREAD(q_smallint) - SPREAD(q_float))', '(SPREAD(q_double) * SPREAD(q_tinyint))' , '(SPREAD(q_double) / SPREAD(q_float))'] + self.calc_calculate_regular = ['DIFF(q_int)' ,'DIFF(q_bigint)' , 'DIFF(q_smallint)' ,'DIFF(q_tinyint)' ,'DIFF(q_float)' ,'DIFF(q_double)' , + 'DIFF(q_int,0)' ,'DIFF(q_bigint,0)' , 'DIFF(q_smallint,0)' ,'DIFF(q_tinyint,0)' ,'DIFF(q_float,0)' ,'DIFF(q_double,0)' , + 'DIFF(q_int,1)' ,'DIFF(q_bigint,1)' , 'DIFF(q_smallint,1)' ,'DIFF(q_tinyint,1)' ,'DIFF(q_float,1)' ,'DIFF(q_double,1)' , + 'DERIVATIVE(q_int,15s,0)' , 'DERIVATIVE(q_bigint,10s,1)' , 'DERIVATIVE(q_smallint,20s,0)' ,'DERIVATIVE(q_tinyint,10s,1)' ,'DERIVATIVE(q_float,6s,0)' ,'DERIVATIVE(q_double,3s,1)' ] + self.calc_calculate_groupbytbname = self.calc_calculate_regular + + #two table join + self.calc_calculate_all_j = ['SPREAD(t1.ts)' , 'SPREAD(t1.q_ts)' , 'SPREAD(t1.q_int)' ,'SPREAD(t1.q_bigint)' , 'SPREAD(t1.q_smallint)' ,'SPREAD(t1.q_tinyint)' ,'SPREAD(t1.q_float)' ,'SPREAD(t1.q_double)' , + 'SPREAD(t2.ts)' , 'SPREAD(t2.q_ts)' , 'SPREAD(t2.q_int)' ,'SPREAD(t2.q_bigint)' , 'SPREAD(t2.q_smallint)' ,'SPREAD(t2.q_tinyint)' ,'SPREAD(t2.q_float)' ,'SPREAD(t2.q_double)' , + '(SPREAD(t1.q_int) + SPREAD(t1.q_bigint))' , '(SPREAD(t1.q_tinyint) - SPREAD(t1.q_float))', '(SPREAD(t1.q_double) * SPREAD(t1.q_tinyint))' , '(SPREAD(t1.q_double) / SPREAD(t1.q_tinyint))', + '(SPREAD(t2.q_int) + SPREAD(t2.q_bigint))' , '(SPREAD(t2.q_smallint) - SPREAD(t2.q_float))', '(SPREAD(t2.q_double) * SPREAD(t2.q_tinyint))' , '(SPREAD(t2.q_double) / SPREAD(t2.q_tinyint))', + '(SPREAD(t1.q_int) + SPREAD(t1.q_smallint))' , '(SPREAD(t2.q_smallint) - SPREAD(t2.q_float))', '(SPREAD(t1.q_double) * SPREAD(t1.q_tinyint))' , '(SPREAD(t1.q_double) / SPREAD(t1.q_float))'] + self.calc_calculate_regular_j = ['DIFF(t1.q_int)' ,'DIFF(t1.q_bigint)' , 'DIFF(t1.q_smallint)' ,'DIFF(t1.q_tinyint)' ,'DIFF(t1.q_float)' ,'DIFF(t1.q_double)' , + 'DIFF(t1.q_int,0)' ,'DIFF(t1.q_bigint,0)' , 'DIFF(t1.q_smallint,0)' ,'DIFF(t1.q_tinyint,0)' ,'DIFF(t1.q_float,0)' ,'DIFF(t1.q_double,0)' , + 'DIFF(t1.q_int,1)' ,'DIFF(t1.q_bigint,1)' , 'DIFF(t1.q_smallint,1)' ,'DIFF(t1.q_tinyint,1)' ,'DIFF(t1.q_float,1)' ,'DIFF(t1.q_double,1)' , + 'DERIVATIVE(t1.q_int,15s,0)' , 'DERIVATIVE(t1.q_bigint,10s,1)' , 'DERIVATIVE(t1.q_smallint,20s,0)' ,'DERIVATIVE(t1.q_tinyint,10s,1)' ,'DERIVATIVE(t1.q_float,6s,0)' ,'DERIVATIVE(t1.q_double,3s,1)' , + 'DIFF(t2.q_int)' ,'DIFF(t2.q_bigint)' , 'DIFF(t2.q_smallint)' ,'DIFF(t2.q_tinyint)' ,'DIFF(t2.q_float)' ,'DIFF(t2.q_double)' , + 'DIFF(t2.q_int,0)' ,'DIFF(t2.q_bigint,0)' , 'DIFF(t2.q_smallint,0)' ,'DIFF(t2.q_tinyint,0)' ,'DIFF(t2.q_float,0)' ,'DIFF(t2.q_double,0)' , + 'DIFF(t2.q_int,1)' ,'DIFF(t2.q_bigint,1)' , 'DIFF(t2.q_smallint,1)' ,'DIFF(t2.q_tinyint,1)' ,'DIFF(t2.q_float,1)' ,'DIFF(t2.q_double,1)' , + 'DERIVATIVE(t2.q_int,15s,0)' , 'DERIVATIVE(t2.q_bigint,10s,1)' , 'DERIVATIVE(t2.q_smallint,20s,0)' ,'DERIVATIVE(t2.q_tinyint,10s,1)' ,'DERIVATIVE(t2.q_float,6s,0)' ,'DERIVATIVE(t2.q_double,3s,1)' ] + self.calc_calculate_groupbytbname_j = self.calc_calculate_regular_j + + #inter && calc_aggregate_all\calc_aggregate_regular\calc_select_all + self.interval_sliding = ['interval(4w) sliding(1w) ','interval(1w) sliding(1d) ','interval(1d) sliding(1h) ' , + 'interval(1h) sliding(1m) ','interval(1m) sliding(1s) ','interval(1s) sliding(10a) ', + 'interval(1y) ','interval(1n) ','interval(1w) ','interval(1d) ','interval(1h) ','interval(1m) ','interval(1s) ' ,'interval(10a)', + 'interval(1y,1n) ','interval(1n,1w) ','interval(1w,1d) ','interval(1d,1h) ','interval(1h,1m) ','interval(1m,1s) ','interval(1s,10a) ' ,'interval(100a,30a)'] + + self.conn1 = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos/") + self.cur1 = self.conn1.cursor() + print(self.cur1) + self.cur1.execute("use %s ;" %self.db_nest) + sql = 'select * from stable_1 limit 5;' + self.cur1.execute(sql) + + + def data_matrix_equal(self, sql1,row1_s,row1_e,col1_s,col1_e, sql2,row2_s,row2_e,col2_s,col2_e): + # ----row1_start----col1_start---- + # - - - - 是一个矩阵内的数据相等- - - + # - - - - - - - - - - - - - - - - + # ----row1_end------col1_end------ + self.sql1 = sql1 + list1 =[] + tdSql.query(sql1) + for i1 in range(row1_s-1,row1_e): + #print("iiii=%d"%i1) + for j1 in range(col1_s-1,col1_e): + #print("jjjj=%d"%j1) + #print("data=%s" %(tdSql.getData(i1,j1))) + list1.append(tdSql.getData(i1,j1)) + print("=====list1-------list1---=%s" %set(list1)) + + tdSql.execute("reset query cache;") + self.sql2 = sql2 + list2 =[] + tdSql.query(sql2) + for i2 in range(row2_s-1,row2_e): + #print("iiii222=%d"%i2) + for j2 in range(col2_s-1,col2_e): + #print("jjjj222=%d"%j2) + #print("data=%s" %(tdSql.getData(i2,j2))) + list2.append(tdSql.getData(i2,j2)) + print("=====list2-------list2---=%s" %set(list2)) + + if (list1 == list2) and len(list2)>0: + # print(("=====matrix===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + tdLog.info(("===matrix===sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2)) + elif (set(list2)).issubset(set(list1)): + # 解决不同子表排列结果乱序 + # print(("=====list_issubset==matrix2in1-true===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + tdLog.info(("===matrix_issubset===sql1:'%s' matrix_set_result = sql2:'%s' matrix_set_result") %(sql1,sql2)) + #elif abs(float(str(list1).replace("]","").replace("[","").replace("e+","")) - float(str(list2).replace("]","").replace("[","").replace("e+",""))) <= 0.0001: + elif abs(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-","")) - float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-",""))) <= 0.0001: + print(("=====matrix_abs+e+===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + print(("=====matrix_abs+e+replace_after===sql1.list1:'%s',sql2.list2:'%s'") %(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-","")),float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-","")))) + tdLog.info(("===matrix_abs+e+===sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2)) + elif abs(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")) - float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-",""))) <= 0.1: + #{datetime.datetime(2021, 8, 27, 1, 46, 40), -441.46841430664057}replace + print(("=====matrix_abs+replace===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + print(("=====matrix_abs+replace_after===sql1.list1:'%s',sql2.list2:'%s'") %(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")),float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")))) + tdLog.info(("===matrix_abs+replace===sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2)) + elif abs(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")) - float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-",""))) <= 0.5: + print(("=====matrix_abs===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + print(("=====matrix_abs===sql1.list1:'%s',sql2.list2:'%s'") %(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")),float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")))) + tdLog.info(("===matrix_abs======sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2)) + else: + print(("=====matrix_error===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2)) + tdLog.info(("sql1:'%s' matrix_result != sql2:'%s' matrix_result") %(sql1,sql2)) + return tdSql.checkEqual(list1,list2) + + def restartDnodes(self): + pass + # tdDnodes.stop(1) + # tdDnodes.start(1) + + def dropandcreateDB_random(self,database,n): + ts = 1630000000000 + num_random = 100 + fake = Faker('zh_CN') + tdSql.execute('''drop database if exists %s ;''' %database) + tdSql.execute('''create database %s keep 36500;'''%database) + tdSql.execute('''use %s;'''%database) + + tdSql.execute('''create stable stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create stable stable_2 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + + tdSql.execute('''create stable stable_null_data (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + + tdSql.execute('''create stable stable_null_childtable (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \ + tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''') + + #tdSql.execute('''create table stable_1_1 using stable_1 tags('stable_1_1', '0' , '0' , '0' , '0' , 0 , 'binary1' , 'nchar1' , '0' , '0' ,'0') ;''') + tdSql.execute('''create table stable_1_1 using stable_1 tags('stable_1_1', '%d' , '%d', '%d' , '%d' , 0 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + tdSql.execute('''create table stable_1_2 using stable_1 tags('stable_1_2', '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , 'binary2' , 'nchar2' , '2' , '22' , \'1999-09-09 09:09:09.090\') ;''') + tdSql.execute('''create table stable_1_3 using stable_1 tags('stable_1_3', '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , 'binary3' , 'nchar3nchar3' , '-3.3' , '-33.33' , \'2099-09-09 09:09:09.090\') ;''') + #tdSql.execute('''create table stable_1_4 using stable_1 tags('stable_1_4', '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''') + tdSql.execute('''create table stable_1_4 using stable_1 tags('stable_1_4', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + + # tdSql.execute('''create table stable_2_1 using stable_2 tags('stable_2_1' , '0' , '0' , '0' , '0' , 0 , 'binary21' , 'nchar21' , '0' , '0' ,'0') ;''') + # tdSql.execute('''create table stable_2_2 using stable_2 tags('stable_2_2' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''') + + # tdSql.execute('''create table stable_null_data_1 using stable_null_data tags('stable_null_data_1', '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''') + + tdSql.execute('''create table stable_2_1 using stable_2 tags('stable_2_1' , '0' , '0' , '0' , '0' , 0 , 'binary21' , 'nchar21' , '0' , '0' ,\'2099-09-09 09:09:09.090\') ;''') + tdSql.execute('''create table stable_2_2 using stable_2 tags('stable_2_2' , '%d' , '%d', '%d' , '%d' , 0 , 'binary2.%s' , 'nchar2.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + + tdSql.execute('''create table stable_null_data_1 using stable_null_data tags('stable_null_data_1', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;''' + %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1))) + + #regular table + tdSql.execute('''create table regular_table_1 \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + tdSql.execute('''create table regular_table_2 \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + tdSql.execute('''create table regular_table_3 \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + + tdSql.execute('''create table regular_table_null \ + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \ + q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\ + q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\ + q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''') + + + for i in range(num_random*n): + tdSql.execute('''insert into stable_1_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double , q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1), + fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1), + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + tdSql.execute('''insert into regular_table_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1) , + fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1) , + fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_1_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8)\ + values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1), + fake.random_int(min=0, max=9223372036854775807, step=1), + fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + tdSql.execute('''insert into regular_table_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1), + fake.random_int(min=0, max=9223372036854775807, step=1), + fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_1_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +1, fake.random_int(min=-2147483647, max=0, step=1), + fake.random_int(min=-9223372036854775807, max=0, step=1), + fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i +1, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + tdSql.execute('''insert into regular_table_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +1, fake.random_int(min=-2147483647, max=0, step=1), + fake.random_int(min=-9223372036854775807, max=0, step=1), + fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i +1, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000, fake.random_int(min=-0, max=2147483647, step=1), + fake.random_int(min=-0, max=9223372036854775807, step=1), + fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +1, fake.random_int(min=-0, max=2147483647, step=1), + fake.random_int(min=-0, max=9223372036854775807, step=1), + fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\ + q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \ + values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \ + 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;''' + % (ts + i*1000 +10, fake.random_int(min=-0, max=2147483647, step=1), + fake.random_int(min=-0, max=9223372036854775807, step=1), + fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) , + fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , + fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address())) + + tdSql.query("select count(*) from stable_1;") + tdSql.checkData(0,0,3*num_random*n) + tdSql.query("select count(*) from regular_table_1;") + tdSql.checkData(0,0,num_random*n) + + def math_nest(self,mathlist): + + print("==========%s===start=============" %mathlist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + if (mathlist == ['ABS','SQRT']) or (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['FLOOR','CEIL','ROUND']) \ + or (mathlist == ['CSUM']) or (mathlist == ['']): + math_functions = mathlist + fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)'] + fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)', + '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] + fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (mathlist == ['UNIQUE']) or (mathlist == ['HYPERLOGLOG']): + math_functions = mathlist + fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_binary)','(q_nchar)','(q_bool)','(q_ts)', + '(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)','(q_binary_null)','(q_nchar_null)','(q_bool_null)','(q_ts_null)'] + fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)', + '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] + fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (mathlist == ['POW','LOG']) or (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']): + math_functions = mathlist + num = random.randint(0, 1000) + fun_fix_column = ['(q_bigint,num)','(q_smallint,num)','(q_tinyint,num)','(q_int,num)','(q_float,num)','(q_double,num)', + '(q_bigint_null,num)','(q_smallint_null,num)','(q_tinyint_null,num)','(q_int_null,num)','(q_float_null,num)','(q_double_null,num)'] + fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) + fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1) + math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) + + fun_fix_column_j = ['(t1.q_bigint,num)','(t1.q_smallint,num)','(t1.q_tinyint,num)','(t1.q_int,num)','(t1.q_float,num)','(t1.q_double,num)', + '(t1.q_bigint_null,num)','(t1.q_smallint_null,num)','(t1.q_tinyint_null,num)','(t1.q_int_null,num)','(t1.q_float_null,num)','(t1.q_double_null,num)', + '(t2.q_bigint,num)','(t2.q_smallint,num)','(t2.q_tinyint,num)','(t2.q_int,num)','(t2.q_float,num)','(t2.q_double,num)', + '(t2.q_bigint_null,num)','(t2.q_smallint_null,num)','(t2.q_tinyint_null,num)','(t2.q_int_null,num)','(t2.q_float_null,num)','(t2.q_double_null,num)'] + fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) + fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1) + math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num)) + + tdSql.query("select 1-1 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , floor(asct1) from ( select " + sql += "%s as asct1, " % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select floor(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + # sql += "%s as asct2, " % math_fun_2 + # sql += "%s, " % random.choice(self.s_s_select) + # sql += "%s, " % random.choice(self.q_select) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + #tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-2 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , abs(asct1) from ( select " + sql += "%s as asct1, " % math_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2 from ( select " + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select abs(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + # sql += "%s, " % random.choice(self.s_s_select) + # sql += "%s, " % random.choice(self.q_select) + sql += "from regular_table_1 where " + sql += "%s )" % random.choice(self.q_where) + #sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select floor(asct2) from ( select " + sql += "%s as asct2 " % math_fun_2 + # sql += "%s, " % random.choice(self.s_s_select) + # sql += "%s, " % random.choice(self.q_select) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #TD-15473 self.cur1.execute(sql) + + tdSql.query("select 1-3 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % math_fun_2 + sql += "%s as asct1, " % math_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % math_fun_2 + sql += "%s as asct1, " % math_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 1-4 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , asct1 from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "%s, " % math_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select count(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 + sql += "from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-5 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts ," + sql += "%s, " % math_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % math_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select " + # sql += "%s, " % math_fun_1 + # sql += "%s, " % random.choice(self.q_select) + # sql += "%s, " % random.choice(self.q_select) + sql += "%s " % math_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15973 tdSql.query(sql) + #TD-15973 self.cur1.execute(sql) + + tdSql.query("select 1-6 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % math_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select max(asct1) from ( select " + #sql += "%s, " % math_fun_join_1 + sql += "%s as asct1 " % math_fun_join_2 + # sql += "t1.%s, " % random.choice(self.q_select) + # sql += "t2.%s, " % random.choice(self.q_select) + # sql += "%s, " % math_fun_join_1 + sql += "from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-7 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , abs(asct1) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select abs(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-8 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts,floor(asct1) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select floor(asct1) " + sql += "from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-9 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select max(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 + # sql += "t1.%s, " % random.choice(self.q_select) + # sql += "t1.%s, " % random.choice(self.q_select) + # sql += "t2.%s, " % random.choice(self.q_select) + # sql += "t2.%s, " % random.choice(self.q_select) + sql += "from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql)# TD-16039 + # self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 1-10 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select min(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select max(asct2) from ( select " + sql += "%s as asct2 " % math_fun_2 + sql += "from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 1-11 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , min(asct1), max(asct2) from ( select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + #sql += "%s " % random.choice(limit1_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % math_fun_1 + sql += "%s as asct2, " % math_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select min(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct2 " % math_fun_2 + sql += " from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 1-12 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select max(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-13 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts ," + sql += "%s, " % math_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % math_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql) # TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select " + sql += "%s " % math_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD15973 tdSql.query(sql) + #TD15973 self.cur1.execute(sql) + + tdSql.query("select 1-14 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select avg(asct1),count(asct2) from ( select " + sql += "%s as asct1, " % math_fun_1 + sql += "%s as asct2" % math_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select avg(asct1) from ( select " + sql += "%s as asct1 " % math_fun_1 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + tdSql.query("select 1-15 as math_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \ + or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) : + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % math_fun_join_1 + sql += "%s as asct1, " % math_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\ + or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']): + sql = "select max(asct1) from ( select " + sql += "%s as asct1 " % math_fun_join_2 + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f %s sql start!" %mathlist) + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f %s sql over!" %mathlist) + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %mathlist) + + + def str_nest(self,strlist): + + print("==========%s===start=============" %strlist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['LENGTH','CHAR_LENGTH']) \ + or (strlist == ['']): + str_functions = strlist + fun_fix_column = ['(q_nchar)','(q_binary)','(q_nchar_null)','(q_binary_null)'] + fun_column_1 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) + str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) + str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)','(t1.q_nchar_null)','(t1.q_binary_null)', + '(t2.q_nchar)','(t2.q_binary)','(t2.q_nchar_null)','(t2.q_binary_null)'] + fun_column_join_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_s = ['(q_nchar)','(q_binary)','(q_nchar_null)','(q_binary_null)','(loc)','(tbname)'] + fun_column_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) + str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) + str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_binary)','(t1.q_nchar_null)','(t1.q_binary_null)','(t1.loc)','(t1.tbname)', + '(t2.q_nchar)','(t2.q_binary)','(t2.q_nchar_null)','(t2.q_binary_null)','(t2.loc)','(t2.tbname)'] + fun_column_join_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_s_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_s_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (strlist == ['SUBSTR']) : + str_functions = strlist + pos = random.randint(1, 20) + sub_len = random.randint(1, 10) + fun_fix_column = ['(q_nchar,pos)','(q_binary,pos)','(q_nchar_null,pos)','(q_binary_null,pos)', + '(q_nchar,pos,sub_len)','(q_binary,pos,sub_len)','(q_nchar_null,pos,sub_len)','(q_binary_null,pos,sub_len)',] + fun_column_1 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) + str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + fun_column_2 = random.sample(str_functions,1)+random.sample(fun_fix_column,1) + str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + + fun_fix_column_j = ['(t1.q_nchar,pos)','(t1.q_binary,pos)','(t1.q_nchar_null,pos)','(t1.q_binary_null,pos)', + '(t1.q_nchar,pos,sub_len)','(t1.q_binary,pos,sub_len)','(t1.q_nchar_null,pos,sub_len)','(t1.q_binary_null,pos,sub_len)', + '(t2.q_nchar,pos)','(t2.q_binary,pos)','(t2.q_nchar_null,pos)','(t2.q_binary_null,pos)', + '(t2.q_nchar,pos,sub_len)','(t2.q_binary,pos,sub_len)','(t2.q_nchar_null,pos,sub_len)','(t2.q_binary_null,pos,sub_len)'] + fun_column_join_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + fun_column_join_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1) + str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + + fun_fix_column_s = ['(q_nchar,pos)','(q_binary,pos)','(q_nchar_null,pos)','(q_binary_null,pos)','(loc,pos)', + '(q_nchar,pos,sub_len)','(q_binary,pos,sub_len)','(q_nchar_null,pos,sub_len)','(q_binary_null,pos,sub_len)','(loc,pos,sub_len)',] + fun_column_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) + str_fun_s_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + fun_column_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1) + str_fun_s_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + + fun_fix_column_s_j = ['(t1.q_nchar,pos)','(t1.q_binary,pos)','(t1.q_nchar_null,pos)','(t1.q_binary_null,pos)','(t1.loc,pos)', + '(t1.q_nchar,pos,sub_len)','(t1.q_binary,pos,sub_len)','(t1.q_nchar_null,pos,sub_len)','(t1.q_binary_null,pos,sub_len)','(t1.loc,pos,sub_len)', + '(t2.q_nchar,pos)','(t2.q_binary,pos)','(t2.q_nchar_null,pos)','(t2.q_binary_null,pos)','(t2.loc,pos)', + '(t2.q_nchar,pos,sub_len)','(t2.q_binary,pos,sub_len)','(t2.q_nchar_null,pos,sub_len)','(t2.q_binary_null,pos,sub_len)','(t2.loc,pos,sub_len)'] + fun_column_join_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s_j,1) + str_fun_join_s_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + fun_column_join_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s_j,1) + str_fun_join_s_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len)) + + elif (strlist == ['CONCAT']) : + str_functions = strlist + i = random.randint(2,8) + fun_fix_column = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','q_nchar_null', + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + column1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+column1+')' + str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + column2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+column2+')' + str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)', + '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)', + '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j1 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+column_j1+')' + str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + column_j2 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+column_j2+')' + str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_s = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','loc','q_nchar_null', + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + column_s1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_s_1 = str(random.sample(str_functions,1))+'('+column_s1+')' + str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","") + + column_s2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_s_2 = str(random.sample(str_functions,1))+'('+column_s2+')' + str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)','(t1.loc)', + '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)','(t2.loc)', + '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j_s1 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_s_1 = str(random.sample(str_functions,1))+'('+column_j_s1+')' + str_fun_join_s_1 = str(fun_column_join_s_1).replace("[","").replace("]","").replace("'","") + + column_j_s2 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_s_2 = str(random.sample(str_functions,1))+'('+column_j_s2+')' + str_fun_join_s_2 = str(fun_column_join_s_2).replace("[","").replace("]","").replace("'","") + + elif (strlist == ['CONCAT_WS']): + str_functions = strlist + i = random.randint(2,8) + fun_fix_column = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','q_nchar_null', + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + separators = ['',' ','abc','123','!','@','#','$','%','^','&','*','(',')','-','_','+','=','{', + '[','}',']','|',';',':',',','.','<','>','?','/','~','`','taos','涛思'] + separator = str(random.sample(separators,i)).replace("[","").replace("]","") + + column1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column1+')' + str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + column2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column2+')' + str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)', + '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)', + '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j1 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j1+')' + str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + column_j2 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j2+')' + str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_s = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','loc','q_nchar_null', + 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null'] + + column_s1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_s_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_s1+')' + str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","") + + column_s2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","") + fun_column_s_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_s2+')' + str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)','(t1.loc)', + '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)','(t2.loc)', + '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)', + '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)'] + + column_j_s1 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_s_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j_s1+')' + str_fun_join_s_1 = str(fun_column_join_s_1).replace("[","").replace("]","").replace("'","") + + column_j_s2 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","") + fun_column_join_s_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j_s2+')' + str_fun_join_s_2 = str(fun_column_join_s_2).replace("[","").replace("]","").replace("'","") + + + tdSql.query("select 1-1 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']) : + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-2 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']) : + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2 from ( select " + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, " % str_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2 from ( select " + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + tdSql.query("select 1-3 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % str_fun_2 + sql += "%s as asct1, " % str_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % str_fun_2 + sql += "%s as asct1, " % str_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 1-4 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "%s, " % str_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "%s, " % str_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-5 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts ," + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % str_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select ts ," + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % str_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-6 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_1 + sql += "%s as asct1, " % str_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-7 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-8 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts, LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-9 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + self.restartDnodes() + tdSql.query("select 1-10 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 1-11 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + #sql += "%s " % random.choice(limit1_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, ts ," % str_fun_s_1 + sql += "%s as asct2, " % str_fun_s_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + #sql += "%s " % random.choice(limit1_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % str_fun_1 + sql += "%s as asct2, " % str_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + + tdSql.query("select 1-12 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-13 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts ," + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql) # TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select ts ," + sql += "%s, " % str_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % str_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-14 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select " + sql += "%s as asct1, " % str_fun_s_1 + sql += "%s as asct2" % str_fun_s_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select " + sql += "%s as asct1, " % str_fun_s_1 + sql += "%s as asct2" % str_fun_s_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + tdSql.query("select 1-15 as str_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']): + sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15955 tdSql.query(sql) + #TD-15955 self.cur1.execute(sql) + elif (strlist == ['LENGTH','CHAR_LENGTH']): + sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % str_fun_join_s_1 + sql += "%s as asct1, " % str_fun_join_s_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15955 tdSql.query(sql) + #TD-15955 self.cur1.execute(sql) + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f %s sql start!" %strlist) + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f %s sql over!" %strlist) + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %strlist) + + def time_nest(self,timelist): + + print("==========%s===start=============" %timelist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMEZONE']): + time_functions = timelist + fun_fix_column = ['()'] + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['()'] + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (timelist == ['TIMETRUNCATE']): + time_functions = timelist + + t = time.time() + t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) + fun_fix_column = ['q_ts','ts','_c0','_C0','_rowts','1600000000000','1600000000000000','1600000000000000000', + '%d' %t, '%d000' %t, '%d000000' %t,'t_to_s'] + + timeunits = ['1u' , '1a' ,'1s', '1m' ,'1h', '1d'] + timeunit = str(random.sample(timeunits,1)).replace("[","").replace("]","").replace("'","") + + column_1 = ['(%s,timeutil)'%(random.sample(fun_fix_column,1))] + fun_column_1 = random.sample(time_functions,1)+random.sample(column_1,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") + time_fun_1 = str(time_fun_1).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) + + column_2 = ['(%s,timeutil)'%(random.sample(fun_fix_column,1))] + fun_column_2 = random.sample(time_functions,1)+random.sample(column_2,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") + time_fun_2 = str(time_fun_2).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) + + + fun_fix_column_j = ['(t1.q_ts)','(t1.ts)', '(t2.q_ts)','(t2.ts)','(1600000000000)','(1600000000000000)','(1600000000000000000)', + '(%d)' %t, '(%d000)' %t, '(%d000000)' %t,'t_to_s'] + + column_j1 = ['(%s,timeutil)'%(random.sample(fun_fix_column_j,1))] + fun_column_join_1 = random.sample(time_functions,1)+random.sample(column_j1,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") + time_fun_join_1 = str(time_fun_join_1).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) + + column_j2 = ['(%s,timeutil)'%(random.sample(fun_fix_column_j,1))] + fun_column_join_2 = random.sample(time_functions,1)+random.sample(column_j2,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'") + time_fun_join_2 = str(time_fun_join_2).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s) + + elif (timelist == ['TO_ISO8601']): + time_functions = timelist + + t = time.time() + fun_fix_column = ['(now())','(ts)','(q_ts)','(_rowts)','(_c0)','(_C0)', + '(1600000000000)','(1600000000000000)','(1600000000000000000)', + '(%d)' %t, '(%d000)' %t, '(%d000000)' %t] + + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_ts)','(t1.ts)', '(t2.q_ts)','(t2.ts)','(1600000000000)','(1600000000000000)','(1600000000000000000)','(now())', + '(%d)' %t, '(%d000)' %t, '(%d000000)' %t] + + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (timelist == ['TO_UNIXTIMESTAMP']): + time_functions = timelist + + t = time.time() + t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) + fun_fix_column = ['(q_nchar)','(q_nchar1)','(q_nchar2)','(q_nchar3)','(q_nchar4)','(q_nchar_null)','(q_binary)','(q_binary5)','(q_binary6)','(q_binary7)','(q_binary8)','(q_binary_null)','(t_to_s)'] + + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_1 = str(time_fun_1).replace("t_to_s","%s" %t_to_s) + + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_2 = str(time_fun_2).replace("t_to_s","%s" %t_to_s) + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)', '(t2.q_nchar)','(t2.q_binary)','(t_to_s)'] + + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_join_1 = str(time_fun_join_1).replace("t_to_s","%s" %t_to_s) + + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_join_2 = str(time_fun_join_2).replace("t_to_s","%s" %t_to_s) + + elif (timelist == ['TIMEDIFF']): + time_functions = timelist + + t = time.time() + t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) + fun_fix_column = ['(q_nchar)','(q_nchar1)','(q_nchar2)','(q_nchar3)','(q_nchar4)','(q_nchar_null)','(q_binary)','(q_binary5)','(q_binary6)','(q_binary7)','(q_binary8)','(q_binary_null)','(t_to_s)'] + + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_1 = str(time_fun_1).replace("t_to_s","%s" %t_to_s) + + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_2 = str(time_fun_2).replace("t_to_s","%s" %t_to_s) + + fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)', '(t2.q_nchar)','(t2.q_binary)','(t_to_s)'] + + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_join_1 = str(time_fun_join_1).replace("t_to_s","%s" %t_to_s) + + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'") + time_fun_join_2 = str(time_fun_join_2).replace("t_to_s","%s" %t_to_s) + + elif (timelist == ['ELAPSED']): + time_functions = timelist + + fun_fix_column = ['(ts)','(q_ts)','(_c0)','(_C0)','(_rowts)','(ts,time_unit)','(_c0,time_unit)','(_C0,time_unit)','(_rowts,time_unit)'] + + time_units = ['nums','numm','numh','numd','numa'] + time_unit = str(random.sample(time_units,1)).replace("[","").replace("]","").replace("'","") + time_num1 = random.randint(0, 1000) + time_unit1 = time_unit.replace("num","%d" %time_num1) + time_num2 = random.randint(0, 1000) + time_unit2 = time_unit.replace("num","%d" %time_num2) + + fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit1) + + fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1) + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit2) + + + fun_fix_column_j = ['(t1.ts)','(t1.q_ts)', '(t2.ts)','(t2.q_ts)','(t1.ts,time_unit)','(t1.q_ts,time_unit)','(t2.ts,time_unit)','(t2.q_ts,time_unit)'] + + fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit1) + + fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1) + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit2) + + + elif (timelist == ['CAST']) : + str_functions = timelist + #下面的4个是全的,这个只是1个 + i = random.randint(1,4) + if i ==1: + print('===========cast_1===========') + fun_fix_column = ['q_bool','q_bool_null','q_bigint','q_bigint_null','q_smallint','q_smallint_null', + 'q_tinyint','q_tinyint_null','q_int','q_int_null','q_float','q_float_null','q_double','q_double_null'] + type_names = ['BIGINT','BINARY(100)','TIMESTAMP','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['t1.q_bool','t1.q_bool_null','t1.q_bigint','t1.q_bigint_null','t1.q_smallint','t1.q_smallint_null', + 't1.q_tinyint','t1.q_tinyint_null','t1.q_int','t1.q_int_null','t1.q_float','t1.q_float_null','t1.q_double','t1.q_double_null', + 't2.q_bool','t2.q_bool_null','t2.q_bigint','t2.q_bigint_null','t2.q_smallint','t2.q_smallint_null', + 't2.q_tinyint','t2.q_tinyint_null','t2.q_int','t2.q_int_null','t2.q_float','t2.q_float_null','t2.q_double','t2.q_double_null'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + elif i==2: + print('===========cast_2===========') + fun_fix_column = ['q_binary','q_binary_null','q_binary1','q_binary2','q_binary3','q_binary4'] + type_names = ['BIGINT','BINARY(100)','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['t1.q_binary','t1.q_binary_null','t1.q_binary1','t1.q_binary2','t1.q_binary3','t1.q_smallint_null','t1.q_binary4', + 't2.q_binary','t2.q_binary_null','t2.q_bigint','t2.q_binary1','t2.q_binary2','t2.q_binary3','t2.q_binary4'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + elif i==3: + print('===========cast_3===========') + fun_fix_column = ['q_nchar','q_nchar_null','q_nchar5','q_nchar6','q_nchar7','q_nchar8'] + type_names = ['BIGINT','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['t1.q_nchar','t1.q_nchar_null','t1.q_nchar5','t1.q_nchar6','t1.q_nchar7','t1.q_nchar8', + 't2.q_nchar','t2.q_nchar_null','t2.q_nchar5','t2.q_nchar6','t2.q_nchar7','t2.q_nchar8'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + elif i==4: + print('===========cast_4===========') + fun_fix_column = ['q_ts','q_ts_null','_C0','_c0','ts','_rowts'] + type_names = ['BIGINT','TIMESTAMP','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","") + + fun_fix_column_j = ['t1.q_ts','t1.q_ts_null','t1.ts','t2.q_ts','t2.q_ts_null','t2.ts'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","") + + elif (timelist == ['CAST_1']) : + str_functions = timelist + + print('===========cast_1===========') + fun_fix_column = ['q_bool','q_bool_null','q_bigint','q_bigint_null','q_smallint','q_smallint_null', + 'q_tinyint','q_tinyint_null','q_int','q_int_null','q_float','q_float_null','q_double','q_double_null'] + type_names = ['BIGINT','BINARY(100)','TIMESTAMP','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_1","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_1","") + + fun_fix_column_j = ['t1.q_bool','t1.q_bool_null','t1.q_bigint','t1.q_bigint_null','t1.q_smallint','t1.q_smallint_null', + 't1.q_tinyint','t1.q_tinyint_null','t1.q_int','t1.q_int_null','t1.q_float','t1.q_float_null','t1.q_double','t1.q_double_null', + 't2.q_bool','t2.q_bool_null','t2.q_bigint','t2.q_bigint_null','t2.q_smallint','t2.q_smallint_null', + 't2.q_tinyint','t2.q_tinyint_null','t2.q_int','t2.q_int_null','t2.q_float','t2.q_float_null','t2.q_double','t2.q_double_null'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_1","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_1","") + + elif (timelist == ['CAST_2']) : + str_functions = timelist + print('===========cast_2===========') + fun_fix_column = ['q_binary','q_binary_null','q_binary1','q_binary2','q_binary3','q_binary4'] + type_names = ['BIGINT','BINARY(100)','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_2","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_2","") + + fun_fix_column_j = ['t1.q_binary','t1.q_binary_null','t1.q_binary1','t1.q_binary2','t1.q_binary3','t1.q_smallint_null','t1.q_binary4', + 't2.q_binary','t2.q_binary_null','t2.q_bigint','t2.q_binary1','t2.q_binary2','t2.q_binary3','t2.q_binary4'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_2","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_2","") + + elif (timelist == ['CAST_3']) : + str_functions = timelist + print('===========cast_3===========') + fun_fix_column = ['q_nchar','q_nchar_null','q_nchar5','q_nchar6','q_nchar7','q_nchar8'] + type_names = ['BIGINT','NCHAR(100)','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_3","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_3","") + + fun_fix_column_j = ['t1.q_nchar','t1.q_nchar_null','t1.q_nchar5','t1.q_nchar6','t1.q_nchar7','t1.q_nchar8', + 't2.q_nchar','t2.q_nchar_null','t2.q_nchar5','t2.q_nchar6','t2.q_nchar7','t2.q_nchar8'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_3","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_3","") + + elif (timelist == ['CAST_4']) : + str_functions = timelist + print('===========cast_4===========') + fun_fix_column = ['q_ts','q_ts_null','_C0','_c0','ts','_rowts'] + type_names = ['BIGINT','TIMESTAMP','BIGINT UNSIGNED'] + + type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')' + time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_4","") + + type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')' + time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_4","") + + fun_fix_column_j = ['t1.q_ts','t1.q_ts_null','t1.ts','t2.q_ts','t2.q_ts_null','t2.ts'] + + type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')' + time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_4","") + + type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","") + fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')' + time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_4","") + + tdSql.query("select 1-1 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) \ + or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , asct1,now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select max(asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-2 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now),now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , timediff(asct2,now),now(),today(),timezone() from ( select " + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2,now(),today(),timezone() from ( select " + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select min(asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1 " % time_fun_1 + sql += " from regular_table_1 where " + sql += "%s )" % random.choice(self.q_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select avg(asct2),now(),today(),timezone() from ( select " + sql += "%s as asct2 " % time_fun_2 + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-3 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % time_fun_2 + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % time_fun_2 + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select abs(asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1," % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2," % time_fun_2 + sql += "%s as asct1 " % time_fun_1 + sql += "from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-4 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s, " % time_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s, " % time_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select floor(asct1) from ( select " + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s " % time_fun_join_1 + sql += " from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-5 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['ELAPSED']) : + sql = "select now(),today(),timezone(), " + sql += "%s, " % time_fun_1 + sql += "%s " % time_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + else: + sql = "select ts ,now(),today(),timezone(), " + sql += "%s, " % time_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % time_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-6 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % time_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select (asct1)*111 from ( select " + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "%s " % time_fun_join_1 + sql += " from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-7 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # tdSql.checkRows(300) + # self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # tdSql.checkRows(300) + # self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select (asct1)/asct2 ,now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) #同时出现core:TD-16095和TD-16042 + # self.cur1.execute(sql) + + tdSql.query("select 1-8 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # tdSql.checkRows(300) + # self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # tdSql.checkRows(300) + # self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select floor(abs(asct1)),now(),today(),timezone() " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) # TD-16039 + # self.cur1.execute(sql) + + tdSql.query("select 1-9 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) TD-16039 + # self.cur1.execute(sql) TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , asct1 from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select min(asct1*110) from ( select " + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1 " % time_fun_join_2 + sql += "from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + self.restartDnodes() + tdSql.query("select 1-10 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2),now(),today(),timezone() from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + elif (timelist == ['ELAPSED']) : + sql = "select abs(asct1),now(),today(),timezone() from ( select " + sql += "%s as asct1 ," % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select max(asct2),now(),today(),timezone() from ( select " + sql += "%s as asct1 ," % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += "from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 1-11 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now), timediff(now,asct2) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql)#TD-15473 + # self.cur1.execute(sql)#TD-15473 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1,now()),(now(),asct2) from ( select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % time_fun_1 + sql += "%s as asct2, " % time_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql)#TD-15473 + # self.cur1.execute(sql)#TD-15473 + elif (timelist == ['ELAPSED']) : + sql = "select asct1+asct2,now(),today(),timezone() from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1 ," % time_fun_1 + sql += "%s as asct2 " % time_fun_2 + sql += " from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql)#TD-15473 + self.cur1.execute(sql)#TD-15473 + + tdSql.query("select 1-12 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , asct1,now() from ( select t1.ts as ts," + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select min(floor(asct1)),now() from ( select " + sql += "%s, " % time_fun_join_1 + sql += "%s as asct1 " % time_fun_join_2 + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql)# TD-16039 + + tdSql.query("select 1-13 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(%s,now)," % time_fun_2 + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % time_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts ,now(),today(),timezone(), " + sql += "%s as asct1, " % time_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % time_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # tdSql.checkRows(300) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select now(),today(),timezone(), " + sql += "%s as asct1, " % time_fun_1 + sql += "%s " % time_fun_2 + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + tdSql.query("select 1-14 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now),timediff(now,asct2) from ( select ts ts ," + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2" % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , (asct1),now(),(now()),asct2 from ( select ts ts ," + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2" % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select ts , (asct1)*asct2,now(),(now()) from ( select " + sql += "%s as asct1, " % time_fun_1 + sql += "%s as asct2" % time_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + + tdSql.query("select 1-15 as time_nest from stable_1 limit 1;") + for i in range(self.fornum): + if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\ + or (timelist == ['TO_UNIXTIMESTAMP']): + sql = "select ts , timediff(asct1,now),timediff(now,asct2) from ( select t1.ts as ts," + sql += "%s as asct2, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']): + sql = "select ts , asct1,(now()),(now()),asct2 ,now(),today(),timezone() from ( select t1.ts as ts," + sql += "%s as asct2, " % time_fun_join_1 + sql += "%s as asct1, " % time_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + # self.cur1.execute(sql) # TD-16039 + elif (timelist == ['ELAPSED']) : + sql = "select asct1,(now()),(now()),asct2 ,now(),today(),timezone() from ( select " + sql += "%s as asct2, " % time_fun_join_1 + sql += "%s as asct1 " % time_fun_join_2 + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) # TD-16039 + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f %s sql start!" %timelist) + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f %s sql over!" %timelist) + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %timelist) + + def base_nest(self,baselist): + + print("==========%s===start=============" %baselist) + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + if (baselist == ['A']) or (baselist == ['S']) or (baselist == ['F']) \ + or (baselist == ['C']): + base_functions = baselist + fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)'] + fun_column_1 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) + base_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_2 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) + base_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)', + '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)'] + fun_column_join_1 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) + base_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","") + fun_column_join_2 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) + base_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","") + + elif (baselist == ['P']) or (baselist == ['M']) or (baselist == ['S'])or (baselist == ['T']): + base_functions = baselist + num = random.randint(0, 1000) + fun_fix_column = ['(q_bigint,num)','(q_smallint,num)','(q_tinyint,num)','(q_int,num)','(q_float,num)','(q_double,num)', + '(q_bigint_null,num)','(q_smallint_null,num)','(q_tinyint_null,num)','(q_int_null,num)','(q_float_null,num)','(q_double_null,num)'] + fun_column_1 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) + base_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) + fun_column_2 = random.sample(base_functions,1)+random.sample(fun_fix_column,1) + base_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) + + fun_fix_column_j = ['(t1.q_bigint,num)','(t1.q_smallint,num)','(t1.q_tinyint,num)','(t1.q_int,num)','(t1.q_float,num)','(t1.q_double,num)', + '(t1.q_bigint_null,num)','(t1.q_smallint_null,num)','(t1.q_tinyint_null,num)','(t1.q_int_null,num)','(t1.q_float_null,num)','(t1.q_double_null,num)', + '(t2.q_bigint,num)','(t2.q_smallint,num)','(t2.q_tinyint,num)','(t2.q_int,num)','(t2.q_float,num)','(t2.q_double,num)', + '(t2.q_bigint_null,num)','(t2.q_smallint_null,num)','(t2.q_tinyint_null,num)','(t2.q_int_null,num)','(t2.q_float_null,num)','(t2.q_double_null,num)'] + fun_column_join_1 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) + base_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) + fun_column_join_2 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1) + base_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num)) + + tdSql.query("select 1-1 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , floor(asct1) from ( select " + sql += "%s as asct1, " % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-2 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , abs(asct1) from ( select " + sql += "%s as asct1, " % base_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s )" % random.choice(self.order_where) + sql += "%s " % random.choice(self.unionall_or_union) + sql += "select ts , asct2 from ( select " + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(having_support) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + tdSql.query("select 1-3 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s select " % random.choice(self.unionall_or_union) + sql += "%s as asct2, ts ," % base_fun_2 + sql += "%s as asct1, " % base_fun_1 + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15473 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 1-4 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , asct1 from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "%s, " % base_fun_join_1 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-5 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts ," + sql += "%s, " % base_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % base_fun_2 + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + tdSql.query("select 1-6 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "%s, " % base_fun_join_1 + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s )" % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-7 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , abs(asct1) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + tdSql.query("select 1-8 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts,floor(asct1) " + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + tdSql.query("select 1-9 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "and %s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 1-10 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , min(asct1) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") %s " % random.choice(self.unionall_or_union) + sql += "select ts , max(asct2) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15437 tdSql.query(sql) + #TD-15437 self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 1-11 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , min(asct1), max(asct2) from ( select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + #sql += "%s " % random.choice(limit1_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s as asct1, ts ," % base_fun_1 + sql += "%s as asct2, " % base_fun_2 + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + + tdSql.query("select 1-12 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "and %s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-13 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts ," + sql += "%s, " % base_fun_1 + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % base_fun_2 + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + tdSql.query("select 1-14 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select avg(asct1),count(asct2) from ( select " + sql += "%s as asct1, " % base_fun_1 + sql += "%s as asct2" % base_fun_2 + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ) ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 1-15 as base_nest from stable_1 limit 1;") + for i in range(self.fornum): + sql = "select ts , max(asct1) from ( select t1.ts as ts," + sql += "%s, " % base_fun_join_1 + sql += "%s as asct1, " % base_fun_join_2 + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s " % random.choice(self.q_select) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f %s sql start!" %baselist) + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f %s sql over!" %baselist) + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print("=========%s====over=============" %baselist) + + def function_before_26(self): + + print('=====================2.6 old function start ===========') + os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename)) + + self.dropandcreateDB_random("%s" %self.db_nest, 1) + + #1 select * from (select column form regular_table where <\>\in\and\or order by) + tdSql.query("select 1-1 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " ===暂时不支持select * ,用下面这一行 + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + #1 outer union not support + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 1-2 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") union " + #sql += "select ts , * from ( select " + sql += "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 1-2 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") union all " + #sql += "select ts , * from ( select " + sql += "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(200) + self.cur1.execute(sql) + + #1 inter union not support + tdSql.query("select 1-3 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "" + sql += " union select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15607 tdSql.query(sql) + #tdSql.checkRows(200) + #self.cur1.execute(sql) + + tdSql.query("select 1-3 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += " union all select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15607 tdSql.query(sql) + # tdSql.checkRows(300) + #self.cur1.execute(sql) + + #join:select * from (select column form regular_table1,regular_table2 where t1.ts=t2.ts and <\>\in\and\or order by) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 1-4 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select t1.ts ," + sql = "select * from ( select t1.ts ," + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + + #2 select column from (select * form regular_table ) where <\>\in\and\or order by + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 2-1 from stable_1;") + for i in range(self.fornum): + sql = "select ts ," + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s " % random.choice(self.q_select) + sql += " from ( select * from regular_table_1 ) where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(100) + self.cur1.execute(sql) + + #join: select column from (select column form regular_table1,regular_table2 )where t1.ts=t2.ts and <\>\in\and\or order by + #cross join not supported yet + tdSql.query("select 2-2 from stable_1;") + for i in range(self.fornum): + sql = "select ts , * from ( select t1.ts ," + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 ) where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.order_u_where) + #sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) + + #3 select * from (select column\tag form stable where <\>\in\and\or order by ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 3-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + tdSql.query("select 3-1 from stable_1;") + for i in range(self.fornum): + sql = "select ts, " + sql += "%s " % random.choice(self.s_r_select) + sql += "from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.t_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + # select ts,* from (select column\tag form stable1,stable2 where t1.ts = t2.ts and <\>\in\and\or order by ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 3-2 from stable_1;") + for i in range(self.fornum): + sql = "select ts , * from ( select t1.ts , " + sql += "t1.%s, " % random.choice(self.s_s_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.s_s_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + # TD-15609 tdSql.query(sql) + # tdSql.checkRows(100) + #self.cur1.execute(sql) + + #3 outer union not support + self.restartDnodes() + tdSql.query("select 3-3 from stable_1;") + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") union " + sql += "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + for i in range(self.fornum): + #sql = "select ts , * from ( select " + sql = "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ") union all " + sql += "select ts from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(600) + self.cur1.execute(sql) + + #3 inter union not support + tdSql.query("select 3-4 from stable_1;") + for i in range(self.fornum): + sql = "select ts , * from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += " %s " % random.choice(self.unionall_or_union) + sql += " select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from stable_2 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += ")" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15837 tdSql.query(sql) + # self.cur1.execute(sql) + + #join:select * from (select column form stable1,stable2 where t1.ts=t2.ts and <\>\in\and\or order by) + tdSql.query("select 3-5 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select t1.ts ," + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # TD-15609 tdSql.query(sql) + # tdSql.checkRows(100) + #self.cur1.execute(sql) + + tdSql.query("select 3-6 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select t1.ts ," + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t1.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.%s, " % random.choice(self.q_select) + sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += ");" + tdLog.info(sql) + tdLog.info(len(sql)) + # TD-15609 同上 tdSql.query(sql) + # tdSql.checkRows(100) + #self.cur1.execute(sql) + + #4 select column from (select * form stable where <\>\in\and\or order by ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 4-1 from stable_1;") + for i in range(self.fornum): + sql = "select ts , " + sql += "%s, " % random.choice(self.q_select) + sql += "%s, " % random.choice(self.q_select) + sql += "%s " % random.choice(self.t_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(300) + self.cur1.execute(sql) + + #5 select distinct column\tag from (select * form stable where <\>\in\and\or order by limit offset ) + tdSql.query("select 5-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.dqt_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15500 tdSql.query(sql) + #self.cur1.execute(sql) + + #5-1 select distinct column\tag from (select calc form stable where <\>\in\and\or order by limit offset ) + tdSql.query("select 5-2 from stable_1;") + for i in range(self.fornum): + sql = "select distinct c5_1 " + sql += " from ( select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += " as c5_1 from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + #sql += "%s " % random.choice(order_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + #tdSql.checkRows(1)有的函数还没有提交,会不返回结果,先忽略 + self.cur1.execute(sql) + + #6-error select * from (select distinct(tag) form stable where <\>\in\and\or order by limit ) + tdSql.query("select 6-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.dt_select) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) + tdSql.query("select 6-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.dt_select) + sql += " from stable_1 where " + sql += "%s ) ;" % random.choice(self.qt_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #7-error select * from (select distinct(tag) form stable where <\>\in\and\or order by limit ) + tdSql.query("select 7-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.dq_select) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[0] , self.limit_where[1]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) #distinct 和 order by 不能混合使用 + tdSql.query("select 7-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.dq_select) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + #sql += "%s " % random.choice(order_desc_where) + sql += "%s " % random.choice([self.limit_where[0] , self.limit_where[1]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(1) + self.cur1.execute(sql) + + #calc_select,TWA/Diff/Derivative/Irate are not allowed to apply to super table directly + #8 select * from (select ts,calc form ragular_table where <\>\in\and\or order by ) + + # dcDB = self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 8-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select ts ," + sql += "%s " % random.choice(self.calc_select_support_ts) + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) # 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function + self.cur1.execute(sql) + tdSql.query("select 8-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_not_support_ts) + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) # 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function + #self.cur1.execute(sql) + + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + #sql += "%s " % random.choice(order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 8-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select t1.ts, " + sql += "%s " % random.choice(self.calc_select_in_support_ts_j) + sql += "from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql)# 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function + self.cur1.execute(sql) + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_in_not_support_ts_j) + sql += "from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + ##top返回结果有问题 tdSql.checkRows(1) + #self.cur1.execute(sql) + + #9 select * from (select ts,calc form stable where <\>\in\and\or order by ) + # self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 9-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_not_support_ts) + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + # self.cur1.execute(sql) + tdSql.query("select 9-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select ts ," + sql += "%s " % random.choice(self.calc_select_support_ts) + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 9-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_in_not_support_ts_j) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + tdSql.query("select 9-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select t1.ts," + sql += "%s " % random.choice(self.calc_select_in_support_ts_j) + sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += " and %s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #10 select calc from (select * form regualr_table where <\>\in\and\or order by ) + tdSql.query("select 10-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "as calc10_1 from ( select * from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(1) + self.cur1.execute(sql) + + #10-1 select calc from (select * form regualr_table where <\>\in\and\or order by ) + # rsDn = self.restartDnodes() + # self.dropandcreateDB_random("%s" %db, 1) + # rsDn = self.restartDnodes() + tdSql.query("select 10-2 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_all) + sql += "as calc10_2 from ( select * from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + #10-2 select calc from (select * form regualr_tables where <\>\in\and\or order by ) + tdSql.query("select 10-3 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s as calc10_3 " % random.choice(self.calc_select_all) + sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += " and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 10-4 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s as calc10_4 " % random.choice(self.calc_select_all) + sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += " and %s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + #11 select calc from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 11-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "as calc11_1 from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + tdSql.checkRows(1) + self.cur1.execute(sql) + + #11-1 select calc from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 11-2 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_all) + sql += "as calc11_1 from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + #不好计算结果 tdSql.checkRows(1) + + #11-2 select calc from (select * form stables where <\>\in\and\or order by limit ) + tdSql.query("select 11-3 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_all) + sql += "as calc11_1 from ( select * from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 11-4 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_select_all) + sql += "as calc11_1 from ( select * from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + + #12 select calc-diff from (select * form regualr_table where <\>\in\and\or order by limit ) + ##self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 12-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from ( select * from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + ##目前derivative不支持 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + tdSql.query("select 12-2 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #目前derivative不支持 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + tdSql.query("select 12-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #目前derivative不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + #12-1 select calc-diff from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 12-3 from stable_1;") + self.restartDnodes() + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #目前derivative不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 12-4 from stable_1;") + #join query does not support group by + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_calculate_regular_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.group_where_j) + sql += ") " + #sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) 目前de函数不支持,另外看看需要不需要将group by和pari by分开 + #self.cur1.execute(sql) + + tdSql.query("select 12-5 from stable_1;") + #join query does not support group by + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_calculate_regular_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.group_where_j) + sql += ") " + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += " ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #derivative not support tdSql.query(sql) + #self.cur1.execute(sql) + + + #13 select calc-diff as diffns from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 13-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " as calc13_1 from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.orders_desc_where) + sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] ) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #derivative not support tdSql.query(sql) + #self.cur1.execute(sql) + + #14 select * from (select calc_aggregate_alls as agg from stable where <\>\in\and\or group by order by slimit soffset ) + # TD-5955 select * from ( select count (q_double) from stable_1 where t_bool = true or t_bool = false group by loc order by ts asc slimit 1 ) ; + tdSql.query("select 14-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all) + sql += "%s " % random.choice(self.calc_aggregate_all) + sql += " as calc14_3 from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.group_where) + sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15678 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + # error group by in out query + tdSql.query("select 14-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all) + sql += "%s " % random.choice(self.calc_aggregate_all) + sql += " as calc14_3 from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.group_where) + sql += "%s " % random.choice(self.having_support) + sql += "%s " % random.choice(self.orders_desc_where) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s " % random.choice(self.group_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15678 tdSql.query(sql) + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + #14-2 select * from (select calc_aggregate_all_js as agg from stables where <\>\in\and\or group by order by slimit soffset ) + tdSql.query("select 14-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all_j) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all_j) + sql += "%s " % random.choice(self.calc_aggregate_all_j) + sql += " as calc14_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 14-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all_j) + sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all_j) + sql += "%s " % random.choice(self.calc_aggregate_all_j) + sql += " as calc14_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.partiton_where_j) + sql += "%s " % random.choice(self.slimit1_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #15 select * from (select calc_aggregate_regulars as agg from regular_table where <\>\in\and\or order by slimit soffset ) + tdSql.query("select 15-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular) + sql += "%s " % random.choice(self.calc_aggregate_regular) + sql += " as calc15_3 from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where_regular) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #Invalid function name: twa' + # tdSql.checkRows(1) + #self.cur1.execute(sql) + + tdSql.query("select 15-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s " % random.choice(self.calc_aggregate_regular_j) + sql += " as calc15_3 from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.group_where_regular_j) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #Invalid function name: twa' + #self.cur1.execute(sql) + + tdSql.query("select 15-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular_j) + sql += "%s " % random.choice(self.calc_aggregate_regular_j) + sql += " as calc15_3 from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.group_where_regular_j) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + sql += "%s ;" % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #Invalid function name: twa' + #self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 15-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname) + sql += " as calc15_3 from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + sql += "%s " % random.choice(self.having_support) + sql += "%s " % random.choice(self.order_desc_where) + sql += ") " + sql += "order by calc15_1 " + sql += "%s " % random.choice(self.limit_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #Invalid function name: twa',可能还的去掉order by + #self.cur1.execute(sql) + + tdSql.query("select 15-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += " as calc15_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.group_where_j) + sql += "%s " % random.choice(self.having_support_j) + #sql += "%s " % random.choice(orders_desc_where) + sql += ") " + sql += "order by calc15_1 " + sql += "%s " % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #'Invalid function name: irate' + #self.cur1.execute(sql) + + tdSql.query("select 15-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname_j) + sql += " as calc15_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.group_where_j) + sql += "%s " % random.choice(self.having_support_j) + sql += "%s " % random.choice(self.orders_desc_where) + sql += ") " + sql += "order by calc15_1 " + sql += "%s " % random.choice(self.limit_u_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15678 #tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 15-5 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname) + sql += "%s " % random.choice(self.calc_aggregate_groupbytbname) + sql += " as calc15_3 from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + sql += ") " + sql += "order by calc15_1 " + sql += "%s " % random.choice(self.limit_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql) #'Invalid function name: irate' + #self.cur1.execute(sql) + + #16 select * from (select calc_aggregate_regulars as agg from regular_table where <\>\in\and\or order by limit offset ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 16-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_0 , " % random.choice(self.calc_calculate_all) + sql += "%s as calc16_1 , " % random.choice(self.calc_aggregate_all) + sql += "%s as calc16_2 " % random.choice(self.calc_select_in) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + #sql += "%s " % random.choice(having_support)having和 partition不能混合使用 + sql += ") " + sql += "order by calc16_0 " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #TD-15651 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 16-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_0 " % random.choice(self.calc_calculate_all_j) + sql += ", %s as calc16_1 " % random.choice(self.calc_aggregate_all_j) + #sql += ", %s as calc16_2 " % random.choice(self.calc_select_in_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += ") " + sql += "order by calc16_0 " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 16-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_0 " % random.choice(self.calc_calculate_all_j) + sql += ", %s as calc16_1 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += ") " + sql += "order by calc16_0 " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 16-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql)#Invalid function name: derivative' + #self.cur1.execute(sql) + + tdSql.query("select 16-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql)#Invalid function name: derivative' + #self.cur1.execute(sql) + + tdSql.query("select 16-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #tdSql.query(sql)#Invalid function name: derivative' + #self.cur1.execute(sql) + + tdSql.query("select 16-5 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 , " % random.choice(self.calc_calculate_all) + sql += "%s as calc16_1 , " % random.choice(self.calc_calculate_regular) + sql += "%s as calc16_2 " % random.choice(self.calc_select_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + #sql += "%s " % random.choice(having_support) + sql += ") " + sql += "order by calc16_1 " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + # tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 16-6 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.group_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #Invalid function name: derivative' tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 16-7 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #Invalid function name: derivative' tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 16-8 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "limit 2 ) " + sql += "%s " % random.choice(self.limit1_where) + tdLog.info(sql) + tdLog.info(len(sql)) + #Invalid function name: derivative' tdSql.query(sql) + #self.cur1.execute(sql) + + #17 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or interval_sliding group by having order by limit offset )interval_sliding + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 17-1 from stable_1;") + for i in range(self.fornum): + #this is having_support , but tag-select cannot mix with last_row,other select can + sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(having_support) + #sql += "%s " % random.choice(order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-2 from stable_1;") + for i in range(self.fornum): + #this is having_support , but tag-select cannot mix with last_row,other select can + sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.interval_sliding) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-2.2 from stable_1;") + for i in range(self.fornum): + #this is having_support , but tag-select cannot mix with last_row,other select can + sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all_j) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 17-3 from stable_1;") + for i in range(self.fornum): + #this is having_tagnot_support , because tag-select cannot mix with last_row... + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.having_tagnot_support) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-4 from stable_1;") + for i in range(self.fornum): + #this is having_tagnot_support , because tag-select cannot mix with last_row... + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-4.2 from stable_1;") + for i in range(self.fornum): + #this is having_tagnot_support , because tag-select cannot mix with last_row... + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-5 from stable_1;") + for i in range(self.fornum): + #having_not_support + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.partiton_where) + sql += "%s " % random.choice(self.interval_sliding) + # sql += "%s " % random.choice(self.having_not_support) + # sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-6 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-7 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-7.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 17-8 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-9 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 17-10 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.interval_sliding) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #18 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or session order by limit )interval_sliding + tdSql.query("select 18-1 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.session_where) + #sql += "%s " % random.choice(self.fill_where) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + self.restartDnodes() + tdSql.query("select 18-3 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.session_where) + #sql += "%s " % random.choice(self.fill_where) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-4 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-5 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.session_where) + #sql += "%s " % random.choice(self.fill_where) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + #sql += "%s " % random.choice(interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-6 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 18-7 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.session_u_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #19 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or session order by limit )interval_sliding + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 19-1 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) + sql += " from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.state_window) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + sql += "%s " % random.choice(self.state_u_window) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + sql += "%s " % random.choice(self.state_u_window) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-3 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.state_window) + #sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-4 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + #sql += "%s " % random.choice(self.state_window) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_or_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-5 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += "%s " % random.choice(self.state_window) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit1_where) + sql += ") " + sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) #'STATE_WINDOW not support for super table query' + + tdSql.query("select 19-6 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.q_u_where) + #sql += "%s " % random.choice(self.state_window) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + tdSql.query("select 19-7 from stable_1;") + for i in range(self.fornum): + sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000)) + sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j) + sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j) + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s " % random.choice(self.qt_u_or_where) + #sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + #sql += "%s " % random.choice(self.interval_sliding) + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #20 select * from (select calc_select_fills form regualr_table or stable where <\>\in\and\or fill_where group by order by limit offset ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 20-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill) + sql += "%s ," % random.choice(self.calc_select_fill) + sql += "%s " % random.choice(self.calc_select_fill) + sql += " from stable_1 where " + sql += "%s " % random.choice(self.interp_where) + sql += "%s " % random.choice(self.fill_where) + sql += "%s " % random.choice(self.group_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + rsDn = self.restartDnodes() + tdSql.query("select 20-2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s and " % random.choice(self.t_join_where) + sql += "%s " % random.choice(self.interp_where_j) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-2.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and " + sql += "%s and " % random.choice(self.qt_u_or_where) + sql += "%s " % random.choice(self.interp_where_j) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-3 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill) + sql += "%s ," % random.choice(self.calc_select_fill) + sql += "%s " % random.choice(self.calc_select_fill) + sql += " from stable_1 where " + sql += "%s " % self.interp_where[2] + sql += "%s " % random.choice(self.fill_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-4 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1, table_1 t2 where t1.ts = t2.ts and " + #sql += "%s and " % random.choice(self.t_join_where) + sql += "%s " % self.interp_where_j[random.randint(0,5)] + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + #interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-4.2 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from stable_1 t1, stable_1_1 t2 where t1.ts = t2.ts and " + sql += "%s and " % random.choice(self.qt_u_or_where) + sql += "%s " % self.interp_where_j[random.randint(0,5)] + sql += "%s " % random.choice(self.fill_where) + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + ##interp不支持 tdSql.error(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-5 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill) + sql += "%s ," % random.choice(self.calc_select_fill) + sql += "%s " % random.choice(self.calc_select_fill) + sql += " from regular_table_1 where " + sql += "%s " % self.interp_where[1] + sql += "%s " % random.choice(self.fill_where) + sql += "%s " % random.choice(self.order_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + ##interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + tdSql.query("select 20-6 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s , " % random.choice(self.calc_select_fill_j) + sql += "%s ," % random.choice(self.calc_select_fill_j) + sql += "%s " % random.choice(self.calc_select_fill_j) + sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and " + #sql += "%s " % random.choice(self.interp_where_j) + sql += "%s " % self.interp_where_j[random.randint(0,5)] + sql += "%s " % random.choice(self.order_u_where) + sql += "%s " % random.choice(self.limit_u_where) + sql += ") " + tdLog.info(sql) + tdLog.info(len(sql)) + ##interp不支持 tdSql.query(sql) + #self.cur1.execute(sql) + + #1 select * from (select * from (select * form regular_table where <\>\in\and\or order by limit )) + tdSql.query("select 1-1 from stable_1;") + for i in range(self.fornum): + # sql_start = "select * from ( " + # sql_end = ")" + for_num = random.randint(1, 15); + sql = "select * from (" * for_num + sql += "select * from ( select * from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ")) " + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + sql2 = "select * from ( select * from ( select " + sql2 += "%s, " % random.choice(self.s_r_select) + sql2 += "%s, " % random.choice(self.q_select) + sql2 += "ts from regular_table_1 where " + sql2 += "%s " % random.choice(self.q_where) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) + self.cur1.execute(sql2) + + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1) + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql ,1,10,3,3) + self.data_matrix_equal('%s' %sql ,1,10,3,3,'%s' %sql2 ,1,10,3,3) + + for i in range(self.fornum): + for_num = random.randint(1, 15); + sql = "select ts from (" * for_num + sql += "select * from ( select * from ( select " + sql += "%s, " % random.choice(self.s_r_select) + sql += "%s, " % random.choice(self.q_select) + sql += "ts from regular_table_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ")) " + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + sql2 = "select * from ( select * from ( select " + sql2 += "%s, " % random.choice(self.s_r_select) + sql2 += "%s, " % random.choice(self.q_select) + sql2 += "ts from regular_table_1 where " + sql2 += "%s " % random.choice(self.q_where) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) + self.cur1.execute(sql2) + + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1) + + #2 select * from (select * from (select * form stable where <\>\in\and\or order by limit )) + tdSql.query("select 2-1 from stable_1;") + for i in range(self.fornum): + for_num = random.randint(1, 15); + sql = "select * from (" * for_num + sql += "select * from ( select * from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.qt_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ")) " + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + sql2 = "select * from ( select * from ( select " + sql2 += "%s, " % random.choice(self.s_s_select) + sql2 += "%s, " % random.choice(self.qt_select) + sql2 += "ts from stable_1 where " + sql2 += "%s " % random.choice(self.q_where) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) + self.cur1.execute(sql2) + + self.data_matrix_equal('%s' %sql ,1,10,3,3,'%s' %sql2 ,1,10,3,3) + + for i in range(self.fornum): + for_num = random.randint(1, 15); + sql = "select ts from (" * for_num + sql += "select * from ( select * from ( select " + sql += "%s, " % random.choice(self.s_s_select) + sql += "%s, " % random.choice(self.qt_select) + sql += "ts from stable_1 where " + sql += "%s " % random.choice(self.q_where) + sql += ")) " + sql += ")" * for_num + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + sql2 = "select ts from ( select * from ( select " + sql2 += "%s, " % random.choice(self.s_s_select) + sql2 += "%s, " % random.choice(self.qt_select) + sql2 += "ts from stable_1 where " + sql2 += "%s " % random.choice(self.q_where) + sql2 += ")) " + tdLog.info(sql2) + tdLog.info(len(sql2)) + tdSql.query(sql2) + self.cur1.execute(sql2) + + self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1) + + #3 select ts ,calc from (select * form stable where <\>\in\and\or order by limit ) + #self.dropandcreateDB_random("%s" %db, 1) + tdSql.query("select 3-1 from stable_1;") + for i in range(self.fornum): + sql = "select " + sql += "%s " % random.choice(self.calc_calculate_regular) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.orders_desc_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + #'Invalid function name: derivative' tdSql.query(sql) + #self.cur1.execute(sql) + + #4 select * from (select calc form stable where <\>\in\and\or order by limit ) + tdSql.query("select 4-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select " + sql += "%s " % random.choice(self.calc_select_in_ts) + sql += "from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + #sql += "%s " % random.choice(self.order_desc_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.query(sql) + self.cur1.execute(sql) + + #5 select ts ,tbname from (select * form stable where <\>\in\and\or order by limit ) + tdSql.query("select 5-1 from stable_1;") + for i in range(self.fornum): + sql = "select ts , tbname , " + sql += "%s ," % random.choice(self.calc_calculate_regular) + sql += "%s ," % random.choice(self.dqt_select) + sql += "%s " % random.choice(self.qt_select) + sql += " from ( select * from stable_1 where " + sql += "%s " % random.choice(self.qt_where) + sql += "%s " % random.choice(self.orders_desc_where) + sql += "%s " % random.choice(self.limit_where) + sql += ") ;" + tdLog.info(sql) + tdLog.info(len(sql)) + tdSql.error(sql) + + #special sql + tdSql.query("select 6-1 from stable_1;") + for i in range(self.fornum): + sql = "select * from ( select _block_dist() from stable_1);" + # tdSql.query(sql) + # tdSql.checkRows(1) + sql = "select _block_dist() from (select * from stable_1);" + tdSql.error(sql) + sql = "select * from (select database());" + tdSql.error(sql) + sql = "select * from (select client_version());" + tdSql.error(sql) + sql = "select * from (select client_version() as version);" + tdSql.error(sql) + sql = "select * from (select server_version());" + tdSql.error(sql) + sql = "select * from (select server_version() as version);" + tdSql.error(sql) + sql = "select * from (select server_status());" + tdSql.error(sql) + sql = "select * from (select server_status() as status);" + tdSql.error(sql) + + #taos -f sql + startTime_taos_f = time.time() + print("taos -f sql start!") + taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + print("taos -f sql over!") + endTime_taos_f = time.time() + print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f)) + + print('=====================2.6 old function end ===========') + + + + def run(self): + tdSql.prepare() + + startTime = time.time() + + # + + + #self.math_nest(['TAIL']) #TD-16009 + # self.math_nest(['HYPERLOGLOG']) #TD-16038 + # self.math_nest(['UNIQUE']) + + + + # # + #self.function_before_26() #TD-16031 + + # self.math_nest(['ABS','SQRT']) #TD-16042 + # self.math_nest(['SIN','COS','TAN','ASIN','ACOS','ATAN']) + # self.math_nest(['POW','LOG']) #TD-16039 + # self.math_nest(['FLOOR','CEIL','ROUND']) + # #self.math_nest(['SAMPLE']) #TD-16017 + # #self.math_nest(['CSUM']) #TD-15936 crash + # self.math_nest(['MAVG']) + + self.str_nest(['LTRIM','RTRIM','LOWER','UPPER']) + self.str_nest(['LENGTH','CHAR_LENGTH']) + self.str_nest(['SUBSTR']) #TD-16042 + self.str_nest(['CONCAT']) #TD-16002 偶尔 + self.str_nest(['CONCAT_WS']) #TD-16002 偶尔 + # self.time_nest(['CAST']) #TD-16017偶尔,放到time里起来弄 + self.time_nest(['CAST_1']) + self.time_nest(['CAST_2']) + self.time_nest(['CAST_3']) + self.time_nest(['CAST_4']) + + + + # self.time_nest(['NOW','TODAY']) # + # self.time_nest(['TIMEZONE']) # + # self.time_nest(['TIMETRUNCATE']) #TD-16039 + # self.time_nest(['TO_ISO8601']) + # self.time_nest(['TO_UNIXTIMESTAMP'])#core多 + # self.time_nest(['ELAPSED']) + + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/percentile.py b/tests/system-test/2-query/percentile.py index 2122197ad2cfbe2996266840b4fcd615627179b9..8df9bcb9ce4df065a151d33116f1331298ee35fd 100644 --- a/tests/system-test/2-query/percentile.py +++ b/tests/system-test/2-query/percentile.py @@ -11,6 +11,7 @@ # -*- coding: utf-8 -*- +from platform import java_ver from util.log import * from util.cases import * from util.sql import * @@ -41,147 +42,21 @@ class TDTestCase: # percentile verifacation tdSql.error("select percentile(ts ,20) from test") - tdSql.error("select apercentile(ts ,20) from test") tdSql.error("select percentile(col7 ,20) from test") - tdSql.error("select apercentile(col7 ,20) from test") tdSql.error("select percentile(col8 ,20) from test") - tdSql.error("select apercentile(col8 ,20) from test") - tdSql.error("select percentile(col9 ,20) from test") - tdSql.error("select apercentile(col9 ,20) from test") - - tdSql.query("select percentile(col1, 0) from test") - tdSql.checkData(0, 0, np.percentile(intData, 0)) - tdSql.query("select apercentile(col1, 0) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col1, 50) from test") - tdSql.checkData(0, 0, np.percentile(intData, 50)) - tdSql.query("select apercentile(col1, 50) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col1, 100) from test") - tdSql.checkData(0, 0, np.percentile(intData, 100)) - tdSql.query("select apercentile(col1, 100) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - - tdSql.query("select percentile(col2, 0) from test") - tdSql.checkData(0, 0, np.percentile(intData, 0)) - tdSql.query("select apercentile(col2, 0) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col2, 50) from test") - tdSql.checkData(0, 0, np.percentile(intData, 50)) - tdSql.query("select apercentile(col2, 50) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col2, 100) from test") - tdSql.checkData(0, 0, np.percentile(intData, 100)) - tdSql.query("select apercentile(col2, 100) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - - tdSql.query("select percentile(col3, 0) from test") - tdSql.checkData(0, 0, np.percentile(intData, 0)) - tdSql.query("select apercentile(col3, 0) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col3, 50) from test") - tdSql.checkData(0, 0, np.percentile(intData, 50)) - tdSql.query("select apercentile(col3, 50) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col3, 100) from test") - tdSql.checkData(0, 0, np.percentile(intData, 100)) - tdSql.query("select apercentile(col3, 100) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - - tdSql.query("select percentile(col4, 0) from test") - tdSql.checkData(0, 0, np.percentile(intData, 0)) - tdSql.query("select apercentile(col4, 0) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col4, 50) from test") - tdSql.checkData(0, 0, np.percentile(intData, 50)) - tdSql.query("select apercentile(col4, 50) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col4, 100) from test") - tdSql.checkData(0, 0, np.percentile(intData, 100)) - tdSql.query("select apercentile(col4, 100) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - - tdSql.query("select percentile(col11, 0) from test") - tdSql.checkData(0, 0, np.percentile(intData, 0)) - tdSql.query("select apercentile(col11, 0) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col11, 50) from test") - tdSql.checkData(0, 0, np.percentile(intData, 50)) - tdSql.query("select apercentile(col11, 50) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col11, 100) from test") - tdSql.checkData(0, 0, np.percentile(intData, 100)) - tdSql.query("select apercentile(col11, 100) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - - tdSql.query("select percentile(col12, 0) from test") - tdSql.checkData(0, 0, np.percentile(intData, 0)) - tdSql.query("select apercentile(col12, 0) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col12, 50) from test") - tdSql.checkData(0, 0, np.percentile(intData, 50)) - tdSql.query("select apercentile(col12, 50) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col12, 100) from test") - tdSql.checkData(0, 0, np.percentile(intData, 100)) - tdSql.query("select apercentile(col12, 100) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - - tdSql.query("select percentile(col13, 0) from test") - tdSql.checkData(0, 0, np.percentile(intData, 0)) - tdSql.query("select apercentile(col13, 0) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col13, 50) from test") - tdSql.checkData(0, 0, np.percentile(intData, 50)) - tdSql.query("select apercentile(col13, 50) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col13, 100) from test") - tdSql.checkData(0, 0, np.percentile(intData, 100)) - tdSql.query("select apercentile(col13, 100) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - - tdSql.query("select percentile(col14, 0) from test") - tdSql.checkData(0, 0, np.percentile(intData, 0)) - tdSql.query("select apercentile(col14, 0) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col14, 50) from test") - tdSql.checkData(0, 0, np.percentile(intData, 50)) - tdSql.query("select apercentile(col14, 50) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col14, 100) from test") - tdSql.checkData(0, 0, np.percentile(intData, 100)) - tdSql.query("select apercentile(col14, 100) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - - tdSql.query("select percentile(col5, 0) from test") - print("query result: %s" % tdSql.getData(0, 0)) - print("array result: %s" % np.percentile(floatData, 0)) - tdSql.query("select apercentile(col5, 0) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col5, 50) from test") - print("query result: %s" % tdSql.getData(0, 0)) - print("array result: %s" % np.percentile(floatData, 50)) - tdSql.query("select apercentile(col5, 50) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col5, 100) from test") - print("query result: %s" % tdSql.getData(0, 0)) - print("array result: %s" % np.percentile(floatData, 100)) - tdSql.query("select apercentile(col5, 100) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - - tdSql.query("select percentile(col6, 0) from test") - tdSql.checkData(0, 0, np.percentile(floatData, 0)) - tdSql.query("select apercentile(col6, 0) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col6, 50) from test") - tdSql.checkData(0, 0, np.percentile(floatData, 50)) - tdSql.query("select apercentile(col6, 50) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - tdSql.query("select percentile(col6, 100) from test") - tdSql.checkData(0, 0, np.percentile(floatData, 100)) - tdSql.query("select apercentile(col6, 100) from test") - print("apercentile result: %s" % tdSql.getData(0, 0)) - + tdSql.error("select percentile(col9 ,20) from test") + column_list = [1,2,3,4,11,12,13,14] + percent_list = [0,50,100] + for i in column_list: + for j in percent_list: + tdSql.query(f"select percentile(col{i}, {j}) from test") + tdSql.checkData(0, 0, np.percentile(intData, j)) + + for i in [5,6]: + for j in percent_list: + tdSql.query(f"select percentile(col{i}, {j}) from test") + tdSql.checkData(0, 0, np.percentile(floatData, j)) + tdSql.execute("create table meters (ts timestamp, voltage int) tags(loc nchar(20))") tdSql.execute("create table t0 using meters tags('beijing')") tdSql.execute("create table t1 using meters tags('shanghai')") @@ -189,9 +64,8 @@ class TDTestCase: tdSql.execute("insert into t0 values(%d, %d)" % (self.ts + i, i + 1)) tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i, i + 1)) - tdSql.error("select percentile(voltage, 20) from meters") - tdSql.query("select apercentile(voltage, 20) from meters") - print("apercentile result: %s" % tdSql.getData(0, 0)) + # tdSql.error("select percentile(voltage, 20) from meters") + tdSql.execute("create table st(ts timestamp, k int)") diff --git a/tests/system-test/2-query/sample.py b/tests/system-test/2-query/sample.py new file mode 100644 index 0000000000000000000000000000000000000000..94e06347d2923fc60d99768c667b927dde5dfd83 --- /dev/null +++ b/tests/system-test/2-query/sample.py @@ -0,0 +1,863 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from pstats import Stats +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re +import taos + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def sample_query_form(self, sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + ''' + sample function: + + :param sel: string, must be "select", required parameters; + :param func: string, in this case must be "sample(", otherwise return other function, required parameters; + :param col: string, column name, required parameters; + :param m_comm: string, comma between col and k , required parameters; + :param k: int/float,the width of the sliding window, [1,100], required parameters; + :param r_comm: string, must be ")", use with "(" in func, required parameters; + :param alias: string, result column another name,or add other funtion; + :param fr: string, must be "from", required parameters; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :return: sample query statement,default: select sample(c1, 1) from t1 + ''' + + return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}" + + def checksample(self,sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + # print(self.sample_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition)) + line = sys._getframe().f_back.f_lineno + + if not all([sel , func , col , m_comm , k , r_comm , fr , table_expr]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + + sql = "select * from t1" + collist = tdSql.getColNameList(sql) + + if not isinstance(col, str): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if len([x for x in col.split(",") if x.strip()]) != 1: + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + col = col.replace(",", "").replace(" ","") + + if any([re.compile('^[a-zA-Z]{1}.*$').match(col) is None , not col.replace(".","").isalnum()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if '.' in col: + if any([col.split(".")[0] not in table_expr, col.split(".")[1] not in collist]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if "." not in col: + if col not in collist: + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + # colname = col if "." not in col else col.split(".")[1] + # col_index = collist.index(colname) + # if any([tdSql.cursor.istype(col_index, "TIMESTAMP"), tdSql.cursor.istype(col_index, "BOOL")]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # + # if any([tdSql.cursor.istype(col_index, "BINARY") , tdSql.cursor.istype(col_index,"NCHAR")]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + + if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != "select"]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["(" not in table_expr, "stb" in table_expr, "group" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["group" in condition.lower(), "tbname" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + alias_list = ["tbname", "_c0", "st", "ts"] + if all([alias, "," not in alias]): + if any([ not alias.isalnum(), re.compile('^[a-zA-Z]{1}.*$').match(col) is None ]): + # actually, column alias also support "_", but in this case,forbidden that。 + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all([alias, "," in alias]): + if all(parm != alias.lower().split(",")[1].strip() for parm in alias_list): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + condition_exception = [ "-", "+", "/", "*", "~", "^", "insert", "distinct", + "count", "avg", "twa", "irate", "sum", "stddev", "leastquares", + "min", "max", "first", "last", "top", "bottom", "percentile", + "apercentile", "last_row", "interp", "diff", "derivative", + "spread", "ceil", "floor", "round", "interval", "fill", "slimit", "soffset"] + if "union" not in condition.lower(): + if any(parm in condition.lower().strip() for parm in condition_exception): + + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if not any([isinstance(k, int) , isinstance(k, float)]) : + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + if not(1 <= k < 1001): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + k = int(k // 1) + pre_sql = re.sub("sample\([a-z0-9 .,]*\)", f"count({col})", self.sample_query_form( + col=col, table_expr=table_expr, condition=condition + )) + tdSql.query(pre_sql) + if tdSql.queryRows == 0: + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + sample_result = tdSql.queryResult + sample_len = tdSql.queryRows + + if "group" in condition: + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"and {tb_condition}='{group_name}' and {col} is not null", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}' and {col} is not null", clear_condition) + + tdSql.query(f"select ts, {col} {alias} from {table_expr} {pre_condition}") + # pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + # pre_sample = np.convolve(pre_data, np.ones(k), "valid")/k + pre_sample = tdSql.queryResult + pre_len = tdSql.queryRows + step = pre_len if pre_len < k else k + # tdSql.query(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + for i in range(step): + if sample_result[pre_row:pre_row+step][i] not in pre_sample: + tdLog.exit(f"case in {line} is failed: sample data is not in {group_name}") + else: + tdLog.info(f"case in {line} is success: sample data is in {group_name}") + + # for j in range(len(pre_sample)): + # print(f"case in {line}:", end='') + # tdSql.checkData(pre_row+j, 1, pre_sample[j]) + pre_row += step + return + elif "union" in condition: + union_sql_0 = self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + # union_sample_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + # union_sample_1 = tdSql.queryResult + row_union_1 = tdSql.queryRows + + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + # for i in range(tdSql.queryRows): + # print(f"case in {line}: ", end='') + # if i < row_union_0: + # tdSql.checkData(i, 1, union_sample_0[i][1]) + # else: + # tdSql.checkData(i, 1, union_sample_1[i-row_union_0][1]) + if row_union_0 + row_union_1 != sample_len: + tdLog.exit(f"case in {line} is failed: sample data is not in ") + else: + tdLog.info(f"case in {line} is success: sample data is in ") + return + + else: + if "where" in condition: + condition = re.sub('where', f"where {col} is not null and ", condition) + else: + condition = f"where {col} is not null" + condition + print(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + tdSql.query(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + # offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_sample = tdSql.queryResult + # pre_len = tdSql.queryRows + # for i in range(sample_len): + # if sample_result[pre_row:pre_row + step][i] not in pre_sample: + # tdLog.exit(f"case in {line} is failed: sample data is not in {group_name}") + # else: + # tdLog.info(f"case in {line} is success: sample data is in {group_name}") + + pass + + def sample_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checksample() + case2 = {"col": "c2"} + self.checksample(**case2) + case3 = {"col": "c5"} + self.checksample(**case3) + case4 = {"col": "c7"} + self.checksample(**case4) + case5 = {"col": "c8"} + self.checksample(**case5) + case6 = {"col": "c9"} + self.checksample(**case6) + + # # case7~8: nested query + # case7 = {"table_expr": "(select c1 from stb1)"} + # self.checksample(**case7) + # case8 = {"table_expr": "(select sample(c1, 1) c1 from stb1 group by tbname)"} + # self.checksample(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checksample(**case9) + # case10 = {"alias": ", _c0"} + # self.checksample(**case10) + case11 = {"alias": ", st1"} + self.checksample(**case11) + case12 = {"alias": ", c1"} + self.checksample(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checksample(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checksample(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checksample(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checksample(**case16) + + # # case17: only support normal table join + # case17 = { + # "col": "t1.c1", + # "table_expr": "t1, t2", + # "condition": "where t1.ts=t2.ts" + # } + # self.checksample(**case17) + # # case18~19: with group by + # case19 = { + # "table_expr": "stb1", + # "condition": "partition by tbname" + # } + # self.checksample(**case19) + + # # case20~21: with order by + # case20 = {"condition": "order by ts"} + # self.checksample(**case20) + # case21 = { + # "table_expr": "stb1", + # "condition": "partition by tbname order by tbname" + # } + # self.checksample(**case21) + + # case22: with union + case22 = { + "condition": "union all select sample( c1 , 1 ) from t2" + } + self.checksample(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checksample(**case23) + + # case24: value k range[1, 100], can be int or float, k = floor(k) + case24 = {"k": 3} + self.checksample(**case24) + case25 = {"k": 2.999} + self.checksample(**case25) + case26 = {"k": 1000} + self.checksample(**case26) + case27 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 " + } + self.checksample(**case27) # with slimit + case28 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + self.checksample(**case28) # with soffset + + pass + + def sample_error_query(self) -> None : + # unusual test + + # form test + err1 = {"col": ""} + self.checksample(**err1) # no col + err2 = {"sel": ""} + self.checksample(**err2) # no select + err3 = {"func": "sample", "col": "", "m_comm": "", "k": "", "r_comm": ""} + self.checksample(**err3) # no sample condition: select sample from + err4 = {"col": "", "m_comm": "", "k": ""} + self.checksample(**err4) # no sample condition: select sample() from + err5 = {"func": "sample", "r_comm": ""} + self.checksample(**err5) # no brackets: select sample col, k from + err6 = {"fr": ""} + self.checksample(**err6) # no from + err7 = {"k": ""} + self.checksample(**err7) # no k + err8 = {"table_expr": ""} + self.checksample(**err8) # no table_expr + + # err9 = {"col": "st1"} + # self.checksample(**err9) # col: tag + tdSql.query(" select sample(st1 ,1) from t1 ") + err10 = {"col": 1} + self.checksample(**err10) # col: value + err11 = {"col": "NULL"} + self.checksample(**err11) # col: NULL + err12 = {"col": "%_"} + self.checksample(**err12) # col: %_ + err13 = {"col": "c3"} + self.checksample(**err13) # col: timestamp col + err14 = {"col": "_c0"} + # self.checksample(**err14) # col: Primary key + err15 = {"col": "avg(c1)"} + # self.checksample(**err15) # expr col + err16 = {"col": "c4"} + self.checksample(**err16) # binary col + err17 = {"col": "c10"} + self.checksample(**err17) # nchar col + err18 = {"col": "c6"} + self.checksample(**err18) # bool col + err19 = {"col": "'c1'"} + self.checksample(**err19) # col: string + err20 = {"col": None} + self.checksample(**err20) # col: None + err21 = {"col": "''"} + self.checksample(**err21) # col: '' + err22 = {"col": "tt1.c1"} + self.checksample(**err22) # not table_expr col + err23 = {"col": "t1"} + self.checksample(**err23) # tbname + err24 = {"col": "stb1"} + self.checksample(**err24) # stbname + err25 = {"col": "db"} + self.checksample(**err25) # datbasename + err26 = {"col": "True"} + self.checksample(**err26) # col: BOOL 1 + err27 = {"col": True} + self.checksample(**err27) # col: BOOL 2 + err28 = {"col": "*"} + self.checksample(**err28) # col: all col + err29 = {"func": "sample[", "r_comm": "]"} + self.checksample(**err29) # form: sample[col, k] + err30 = {"func": "sample{", "r_comm": "}"} + self.checksample(**err30) # form: sample{col, k} + err31 = {"col": "[c1]"} + self.checksample(**err31) # form: sample([col], k) + err32 = {"col": "c1, c2"} + self.checksample(**err32) # form: sample(col, col2, k) + err33 = {"col": "c1, 2"} + self.checksample(**err33) # form: sample(col, k1, k2) + err34 = {"alias": ", count(c1)"} + self.checksample(**err34) # mix with aggregate function 1 + err35 = {"alias": ", avg(c1)"} + self.checksample(**err35) # mix with aggregate function 2 + err36 = {"alias": ", min(c1)"} + self.checksample(**err36) # mix with select function 1 + err37 = {"alias": ", top(c1, 5)"} + self.checksample(**err37) # mix with select function 2 + err38 = {"alias": ", spread(c1)"} + self.checksample(**err38) # mix with calculation function 1 + err39 = {"alias": ", diff(c1)"} + self.checksample(**err39) # mix with calculation function 2 + # err40 = {"alias": "+ 2"} + # self.checksample(**err40) # mix with arithmetic 1 + # tdSql.query(" select sample(c1 , 1) + 2 from t1 ") + err41 = {"alias": "+ avg(c1)"} + self.checksample(**err41) # mix with arithmetic 2 + err42 = {"alias": ", c1"} + self.checksample(**err42) # mix with other col + # err43 = {"table_expr": "stb1"} + # self.checksample(**err43) # select stb directly + err44 = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + self.checksample(**err44) # stb join + err45 = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + self.checksample(**err45) # interval + err46 = { + "table_expr": "t1", + "condition": "group by c6" + } + # self.checksample(**err46) # group by normal col + + err49 = {"k": "2021-01-01 00:00:00.000"} + self.checksample(**err49) # k: timestamp + err50 = {"k": False} + self.checksample(**err50) # k: False + err51 = {"k": "%"} + self.checksample(**err51) # k: special char + err52 = {"k": ""} + self.checksample(**err52) # k: "" + err53 = {"k": None} + self.checksample(**err53) # k: None + err54 = {"k": "NULL"} + self.checksample(**err54) # k: null + err55 = {"k": "binary(4)"} + self.checksample(**err55) # k: string + err56 = {"k": "c1"} + self.checksample(**err56) # k: sring,col name + err57 = {"col": "c1, 1, c2"} + self.checksample(**err57) # form: sample(col1, k1, col2, k2) + err58 = {"col": "c1 cc1"} + self.checksample(**err58) # form: sample(col newname, k) + err59 = {"k": "'1'"} + # self.checksample(**err59) # formL sample(colm, "1") + err60 = {"k": "-1-(-2)"} + # self.checksample(**err60) # formL sample(colm, -1-2) + err61 = {"k": 1001} + self.checksample(**err61) # k: right out of [1, 1000] + err62 = {"k": -1} + self.checksample(**err62) # k: negative number + err63 = {"k": 0} + self.checksample(**err63) # k: 0 + err64 = {"k": 2**63-1} + self.checksample(**err64) # k: max(bigint) + err65 = {"k": 1-2**63} + # self.checksample(**err65) # k: min(bigint) + err66 = {"k": -2**63} + self.checksample(**err66) # k: NULL + err67 = {"k": 0.999999} + self.checksample(**err67) # k: left out of [1, 1000] + + pass + + def sample_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def sample_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + + def check_sample(self , sample_query , origin_query ): + + tdSql.query(origin_query) + + origin_datas = tdSql.queryResult + + tdSql.query(sample_query) + + sample_datas = tdSql.queryResult + status = True + for ind , sample_data in enumerate(sample_datas): + if sample_data not in origin_datas: + status = False + + if status: + tdLog.info(" sample data is in datas groups ,successed sql is : %s" % sample_query ) + else: + tdLog.exit(" sample data is not in datas groups ,failed sql is : %s" % sample_query ) + + + def basic_sample_query(self): + tdSql.execute(" drop database if exists db ") + tdSql.execute(" create database if not exists db days 300 ") + tdSql.execute(" use db ") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + # basic query for sample + + # params test for all + tdSql.error(" select sample(c1,c1) from t1 ") + tdSql.error(" select sample(c1,now) from t1 ") + tdSql.error(" select sample(c1,tbname) from t1 ") + tdSql.error(" select sample(c1,ts) from t1 ") + tdSql.error(" select sample(c1,false) from t1 ") + tdSql.error(" select sample(123,1) from t1 ") + + tdSql.query(" select sample(c1,2) from t1 ") + tdSql.checkRows(2) + tdSql.query(" select sample(c1,10) from t1 ") + tdSql.checkRows(9) + tdSql.query(" select sample(c8,10) from t1 ") + tdSql.checkRows(9) + tdSql.query(" select sample(c1,999) from t1 ") + tdSql.checkRows(9) + tdSql.query(" select sample(c1,1000) from t1 ") + tdSql.checkRows(9) + tdSql.query(" select sample(c8,1000) from t1 ") + tdSql.checkRows(9) + tdSql.error(" select sample(c1,-1) from t1 ") + + # bug need fix + # tdSql.query("select sample(c1 ,2) , 123 from stb1;") + + # all type support + tdSql.query(" select sample(c1 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c2 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c3 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c4 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c5 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c6 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c7 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c8 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c9 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(c10 , 20 ) from ct4 ") + tdSql.checkRows(9) + + tdSql.query(" select sample(t1 , 20 ) from ct1 ") + tdSql.checkRows(13) + # filter data + + tdSql.query(" select sample(c1, 20 ) from t1 where c1 is null ") + tdSql.checkRows(0) + + tdSql.query(" select sample(c1, 20 ) from t1 where c1 =6 ") + tdSql.checkRows(1) + + tdSql.query(" select sample(c1, 20 ) from t1 where c1 > 6 ") + tdSql.checkRows(3) + + self.check_sample("select sample(c1, 20 ) from t1 where c1 > 6" , "select c1 from t1 where c1 > 6") + + tdSql.query(" select sample( c1 , 1 ) from t1 where c1 in (0, 1,2) ") + tdSql.checkRows(1) + + tdSql.query("select sample( c1 ,3 ) from t1 where c1 between 1 and 10 ") + tdSql.checkRows(3) + + self.check_sample("select sample( c1 ,3 ) from t1 where c1 between 1 and 10" ,"select c1 from t1 where c1 between 1 and 10") + + # join + + tdSql.query("select sample( ct4.c1 , 1 ) from ct1, ct4 where ct4.ts=ct1.ts") + + # partition by tbname + + tdSql.query("select sample(c1,2) from stb1 partition by tbname") + tdSql.checkRows(4) + + self.check_sample("select sample(c1,2) from stb1 partition by tbname" , "select c1 from stb1 partition by tbname") + + # nest query + # tdSql.query("select sample(c1,2) from (select c1 from t1); ") + # tdSql.checkRows(2) + + # union all + tdSql.query("select sample(c1,2) from t1 union all select sample(c1,3) from t1") + tdSql.checkRows(5) + + # fill interval + + # not support mix with other function + tdSql.error("select top(c1,2) , sample(c1,2) from ct1") + tdSql.error("select max(c1) , sample(c1,2) from ct1") + tdSql.error("select c1 , sample(c1,2) from ct1") + + # bug for mix with scalar + # tdSql.error("select 123 , sample(c1,100) from ct1") + # tdSql.error("select sample(c1,100)+2 from ct1") + # tdSql.error("select abs(sample(c1,100)) from ct1") + + def sample_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 10 + self.sample_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.sample_current_query() + self.sample_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.sample_current_query() + self.sample_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + # self.sample_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # self.sample_current_query() + # self.sample_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + # self.sample_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + # self.sample_current_query() + # self.sample_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.sample_test_table(tbnum) + self.sample_test_data(tbnum, per_table_rows, nowtime) + self.sample_current_query() + self.sample_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.sample_current_query() + self.sample_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.sample_current_query() + self.sample_error_query() + + self.basic_sample_query() + + def run(self): + import traceback + try: + # run in develop branch + self.sample_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/stateduration.py b/tests/system-test/2-query/stateduration.py new file mode 100644 index 0000000000000000000000000000000000000000..fa71009ef210e6a14c5abe04fbfe0f0b95c6598a --- /dev/null +++ b/tests/system-test/2-query/stateduration.py @@ -0,0 +1,265 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import * +from util.cases import * +from util.sql import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.ts = 1537146000000 + self.param_list = ['LT','lt','Lt','lT','GT','gt','Gt','gT','LE','le','Le','lE','GE','ge','Ge','gE','NE','ne','Ne','nE','EQ','eq','Eq','eQ'] + self.row_num = 10 + def run(self): + tdSql.prepare() + # timestamp = 1ms , time_unit = 1s + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + for i in range(self.row_num): + tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + integer_list = [1,2,3,4,11,12,13,14] + float_list = [5,6] + + for i in integer_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5) from test") + tdSql.checkRows(10) + if j in ['LT' ,'lt','Lt','lT']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GT','gt', 'Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (0,), (0,), (0,), (0,)]) + elif j in ['LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in [ 'GE','ge','Ge','gE']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (0,), (0,), (0,), (0,), (0,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (-1,), (0,), (0,), (0,), (0,), (0,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + for i in float_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5) from test") + tdSql.checkRows(10) + if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (0,), (0,), (0,), (0,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (0,), (0,), (0,), (0,), (0,), (0,), (0,), (0,), (0,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + + error_column_list = ['ts','col7','col8','col9','a',1] + for i in error_column_list: + for j in self.param_list: + tdSql.error(f"select stateduration({i},{j},5) from test") + + error_param_list = ['a',1] + for i in error_param_list: + tdSql.error(f"select stateduration(col1,{i},5) from test") + + # timestamp = 1s, time_unit =1s + tdSql.execute('''create table test1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + for i in range(self.row_num): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + for i in integer_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5) from test1") + tdSql.checkRows(10) + # print(tdSql.queryResult) + if j in ['LT' ,'lt','Lt','lT']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GT','gt', 'Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in [ 'GE','ge','Ge','gE']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + for i in float_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5) from test1") + tdSql.checkRows(10) + print(tdSql.queryResult) + if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + + + # timestamp = 1m, time_unit =1m + tdSql.execute('''create table test2(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + for i in range(self.row_num): + tdSql.execute("insert into test2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i*1000*60, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + for i in integer_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1m) from test2") + tdSql.checkRows(10) + # print(tdSql.queryResult) + if j in ['LT' ,'lt','Lt','lT']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GT','gt', 'Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in [ 'GE','ge','Ge','gE']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + for i in float_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1m) from test2") + tdSql.checkRows(10) + print(tdSql.queryResult) + if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + + # timestamp = 1h, time_unit =1h + tdSql.execute('''create table test3(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') + for i in range(self.row_num): + tdSql.execute("insert into test3 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i*1000*60*60, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + for i in integer_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from test3") + tdSql.checkRows(10) + # print(tdSql.queryResult) + if j in ['LT' ,'lt','Lt','lT']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GT','gt', 'Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in [ 'GE','ge','Ge','gE']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + for i in float_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from test3") + tdSql.checkRows(10) + print(tdSql.queryResult) + if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + + # timestamp = 1h,time_unit =1m + for i in integer_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1m) from test3") + tdSql.checkRows(10) + # print(tdSql.queryResult) + if j in ['LT' ,'lt','Lt','lT']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GT','gt', 'Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (60,), (120,), (180,), (240,)]) + elif j in ['LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (240,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in [ 'GE','ge','Ge','gE']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (60,), (120,), (180,), (240,), (300,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (-1,), (0,), (60,), (120,), (180,), (240,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + for i in float_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1m) from test3") + tdSql.checkRows(10) + print(tdSql.queryResult) + if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (240,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (60,), (120,), (180,), (240,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (240,), (300,), (360,), (420,), (480,), (540,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + + # for stb + tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(t0 int)''') + tdSql.execute('create table stb_1 using stb tags(1)') + for i in range(self.row_num): + tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i*1000*60*60, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + for i in integer_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from stb") + tdSql.checkRows(10) + # print(tdSql.queryResult) + if j in ['LT' ,'lt','Lt','lT']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GT','gt', 'Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in [ 'GE','ge','Ge','gE']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + for i in float_list: + for j in self.param_list: + tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from stb") + tdSql.checkRows(10) + print(tdSql.queryResult) + if j in ['LT','lt','Lt','lT','LE','le','Le','lE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)]) + elif j in ['NE','ne','Ne','nE']: + tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)]) + elif j in ['EQ','eq','Eq','eQ']: + tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)]) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/top.py b/tests/system-test/2-query/top.py index 12e81fa1900ffe2633520359f0051a21434611b6..fbbbb2c99acc2cce1b0cb53a0dafd7f18ec01011 100644 --- a/tests/system-test/2-query/top.py +++ b/tests/system-test/2-query/top.py @@ -89,14 +89,15 @@ class TDTestCase: tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) tdSql.query("select ts,top(col1, 2),ts from test1") tdSql.checkRows(2) - + tdSql.query("select top(col14, 100) from test") + tdSql.checkRows(10) tdSql.query("select ts,top(col1, 2),ts from test group by tbname") tdSql.checkRows(2) tdSql.query('select top(col2,1) from test interval(1y) order by col2') tdSql.checkData(0,0,10) - tdSql.error('select * from test where bottom(col2,1)=1') - + tdSql.error("select * from test where bottom(col2,1)=1") + tdSql.error("select top(col14, 0) from test;") def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py new file mode 100644 index 0000000000000000000000000000000000000000..227efa6f9ceda24df73830bd46838fd657b67d48 --- /dev/null +++ b/tests/system-test/2-query/unique.py @@ -0,0 +1,457 @@ +from math import floor +from random import randint, random +from numpy import equal +import taos +import sys +import datetime +import inspect + +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def prepare_datas(self): + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + def test_errors(self): + error_sql_lists = [ + "select unique from t1", + "select unique(123--123)==1 from t1", + "select unique(123,123) from t1", + "select unique(c1,ts) from t1", + "select unique(c1,c1,ts) from t1", + "select unique(c1) as 'd1' from t1", + "select unique(c1 ,c2 ) from t1", + "select unique(c1 ,NULL) from t1", + "select unique(,) from t1;", + "select unique(floor(c1) ab from t1)", + "select unique(c1) as int from t1", + "select unique('c1') from t1", + "select unique(NULL) from t1", + "select unique('') from t1", + "select unique(c%) from t1", + "select unique(t1) from t1", + "select unique(True) from t1", + "select unique(c1) , count(c1) from t1", + "select unique(c1) , avg(c1) from t1", + "select unique(c1) , min(c1) from t1", + "select unique(c1) , spread(c1) from t1", + "select unique(c1) , diff(c1) from t1", + "select unique(c1) , abs(c1) from t1", + "select unique(c1) , c1 from t1", + "select unique from stb1 partition by tbname", + "select unique(123--123)==1 from stb1 partition by tbname", + "select unique(123) from stb1 partition by tbname", + "select unique(c1,ts) from stb1 partition by tbname", + "select unique(c1,c1,ts) from stb1 partition by tbname", + "select unique(c1) as 'd1' from stb1 partition by tbname", + "select unique(c1 ,c2 ) from stb1 partition by tbname", + "select unique(c1 ,NULL) from stb1 partition by tbname", + "select unique(,) from stb1 partition by tbname;", + "select unique(floor(c1) ab from stb1 partition by tbname)", + "select unique(c1) as int from stb1 partition by tbname", + "select unique('c1') from stb1 partition by tbname", + "select unique(NULL) from stb1 partition by tbname", + "select unique('') from stb1 partition by tbname", + "select unique(c%) from stb1 partition by tbname", + #"select unique(t1) from stb1 partition by tbname", + "select unique(True) from stb1 partition by tbname", + "select unique(c1) , count(c1) from stb1 partition by tbname", + "select unique(c1) , avg(c1) from stb1 partition by tbname", + "select unique(c1) , min(c1) from stb1 partition by tbname", + "select unique(c1) , spread(c1) from stb1 partition by tbname", + "select unique(c1) , diff(c1) from stb1 partition by tbname", + "select unique(c1) , abs(c1) from stb1 partition by tbname", + "select unique(c1) , c1 from stb1 partition by tbname" + + ] + for error_sql in error_sql_lists: + tdSql.error(error_sql) + pass + + def support_types(self): + other_no_value_types = [ + "select unique(ts) from t1" , + "select unique(c7) from t1", + "select unique(c8) from t1", + "select unique(c9) from t1", + "select unique(ts) from ct1" , + "select unique(c7) from ct1", + "select unique(c8) from ct1", + "select unique(c9) from ct1", + "select unique(ts) from ct3" , + "select unique(c7) from ct3", + "select unique(c8) from ct3", + "select unique(c9) from ct3", + "select unique(ts) from ct4" , + "select unique(c7) from ct4", + "select unique(c8) from ct4", + "select unique(c9) from ct4", + "select unique(ts) from stb1 partition by tbname" , + "select unique(c7) from stb1 partition by tbname", + "select unique(c8) from stb1 partition by tbname", + "select unique(c9) from stb1 partition by tbname" + ] + + for type_sql in other_no_value_types: + tdSql.query(type_sql) + tdLog.info("support type ok , sql is : %s"%type_sql) + + type_sql_lists = [ + "select unique(c1) from t1", + "select unique(c2) from t1", + "select unique(c3) from t1", + "select unique(c4) from t1", + "select unique(c5) from t1", + "select unique(c6) from t1", + + "select unique(c1) from ct1", + "select unique(c2) from ct1", + "select unique(c3) from ct1", + "select unique(c4) from ct1", + "select unique(c5) from ct1", + "select unique(c6) from ct1", + + "select unique(c1) from ct3", + "select unique(c2) from ct3", + "select unique(c3) from ct3", + "select unique(c4) from ct3", + "select unique(c5) from ct3", + "select unique(c6) from ct3", + + "select unique(c1) from stb1 partition by tbname", + "select unique(c2) from stb1 partition by tbname", + "select unique(c3) from stb1 partition by tbname", + "select unique(c4) from stb1 partition by tbname", + "select unique(c5) from stb1 partition by tbname", + "select unique(c6) from stb1 partition by tbname", + + "select unique(c6) as alisb from stb1 partition by tbname", + "select unique(c6) alisb from stb1 partition by tbname", + ] + + for type_sql in type_sql_lists: + tdSql.query(type_sql) + + def check_unique_table(self , unique_sql): + # unique_sql = "select unique(c1) from ct1" + origin_sql = unique_sql.replace("unique(","").replace(")","") + tdSql.query(unique_sql) + unique_result = tdSql.queryResult + + unique_datas = [] + for elem in unique_result: + unique_datas.append(elem[0]) + + + tdSql.query(origin_sql) + origin_result = tdSql.queryResult + origin_datas = [] + for elem in origin_result: + origin_datas.append(elem[0]) + + pre_unique = [] + for elem in origin_datas: + if elem in pre_unique: + continue + else: + pre_unique.append(elem) + + if pre_unique == unique_datas: + tdLog.info(" unique query check pass , unique sql is: %s" %unique_sql) + else: + tdLog.exit(" unique query check fail , unique sql is: %s " %unique_sql) + + def basic_unique_function(self): + + # basic query + tdSql.query("select c1 from ct3") + tdSql.checkRows(0) + tdSql.query("select c1 from t1") + tdSql.checkRows(12) + tdSql.query("select c1 from stb1") + tdSql.checkRows(25) + + # used for empty table , ct3 is empty + tdSql.query("select unique(c1) from ct3") + tdSql.checkRows(0) + tdSql.query("select unique(c2) from ct3") + tdSql.checkRows(0) + tdSql.query("select unique(c3) from ct3") + tdSql.checkRows(0) + tdSql.query("select unique(c4) from ct3") + tdSql.checkRows(0) + tdSql.query("select unique(c5) from ct3") + tdSql.checkRows(0) + tdSql.query("select unique(c6) from ct3") + + # will support _rowts mix with + # tdSql.query("select unique(c6),_rowts from ct3") + + # auto check for t1 table + # used for regular table + tdSql.query("select unique(c1) from t1") + + tdSql.query("desc t1") + col_lists_rows = tdSql.queryResult + col_lists = [] + for col_name in col_lists_rows: + col_lists.append(col_name[0]) + + for col in col_lists: + self.check_unique_table(f"select unique({col}) from t1") + + # unique with super tags + + tdSql.query("select unique(c1) from ct1") + tdSql.checkRows(10) + + tdSql.query("select unique(c1) from ct4") + tdSql.checkRows(10) + + tdSql.error("select unique(c1),tbname from ct1") + tdSql.error("select unique(c1),t1 from ct1") + + # unique with common col + tdSql.error("select unique(c1) ,ts from ct1") + tdSql.error("select unique(c1) ,c1 from ct1") + + # unique with scalar function + tdSql.error("select unique(c1) ,abs(c1) from ct1") + tdSql.error("select unique(c1) , unique(c2) from ct1") + tdSql.error("select unique(c1) , abs(c2)+2 from ct1") + + + # unique with aggregate function + tdSql.error("select unique(c1) ,sum(c1) from ct1") + tdSql.error("select unique(c1) ,max(c1) from ct1") + tdSql.error("select unique(c1) ,csum(c1) from ct1") + tdSql.error("select unique(c1) ,count(c1) from ct1") + + # unique with filter where + tdSql.query("select unique(c1) from ct4 where c1 is null") + tdSql.checkData(0, 0, None) + + tdSql.query("select unique(c1) from ct4 where c1 >2 ") + tdSql.checkData(0, 0, 8) + tdSql.checkData(1, 0, 7) + tdSql.checkData(2, 0, 6) + tdSql.checkData(5, 0, 3) + + tdSql.query("select unique(c1) from ct4 where c2 between 0 and 99999") + tdSql.checkData(0, 0, 8) + tdSql.checkData(1, 0, 7) + tdSql.checkData(2, 0, 6) + tdSql.checkData(3, 0, 5) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 3) + tdSql.checkData(6, 0, 2) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 0) + + # unique with union all + tdSql.query("select unique(c1) from ct4 union all select c1 from ct1") + tdSql.checkRows(23) + tdSql.query("select unique(c1) from ct4 union all select distinct(c1) from ct4") + tdSql.checkRows(20) + tdSql.query("select unique(c2) from ct4 union all select abs(c2)/2 from ct4") + tdSql.checkRows(22) + + # unique with join + # prepare join datas with same ts + + tdSql.execute(" use db ") + tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)") + tdSql.execute(" create table tb1 using st1 tags(1)") + tdSql.execute(" create table tb2 using st1 tags(2)") + + tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)") + tdSql.execute(" create table ttb1 using st2 tags(1)") + tdSql.execute(" create table ttb2 using st2 tags(2)") + + start_ts = 1622369635000 # 2021-05-30 18:13:55 + + for i in range(10): + ts_value = start_ts+i*1000 + tdSql.execute(f" insert into tb1 values({ts_value} , {i})") + tdSql.execute(f" insert into tb2 values({ts_value} , {i})") + + tdSql.execute(f" insert into ttb1 values({ts_value} , {i})") + tdSql.execute(f" insert into ttb2 values({ts_value} , {i})") + + tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.checkRows(10) + tdSql.checkData(0,0,0) + tdSql.checkData(1,0,1) + tdSql.checkData(2,0,2) + tdSql.checkData(9,0,9) + + tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts union all select unique(tb1.num) from tb1, tb2 where tb1.ts=tb2.ts ") + tdSql.checkRows(20) + tdSql.checkData(0,0,0) + tdSql.checkData(1,0,1) + tdSql.checkData(2,0,2) + tdSql.checkData(9,0,9) + + # nest query + # tdSql.query("select unique(c1) from (select c1 from ct1)") + tdSql.query("select c1 from (select unique(c1) c1 from ct4)") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 8) + tdSql.checkData(9, 0, 0) + + tdSql.query("select sum(c1) from (select unique(c1) c1 from ct1)") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 45) + + tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select unique(c1) c1 from ct1)") + tdSql.checkRows(2) + tdSql.checkData(0, 0, 45) + tdSql.checkData(1, 0, 45) + + tdSql.query("select 1-abs(c1) from (select unique(c1) c1 from ct4)") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, -7.000000000) + + + # bug for stable + #partition by tbname + # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.checkRows(21) + + # tdSql.query(" select unique(c1) from stb1 partition by tbname ") + # tdSql.checkRows(21) + + # group by + tdSql.error("select unique(c1) from ct1 group by c1") + tdSql.error("select unique(c1) from ct1 group by tbname") + + # super table + + + + + def check_boundary_values(self): + + tdSql.execute("drop database if exists bound_test") + tdSql.execute("create database if not exists bound_test") + tdSql.execute("use bound_test") + tdSql.execute( + "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);" + ) + tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )') + tdSql.execute( + f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.execute( + f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.error( + f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )" + ) + + tdSql.query("select unique(c2) from sub1_bound") + tdSql.checkRows(5) + tdSql.checkData(0,0,9223372036854775807) + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table ==============") + + self.prepare_datas() + + tdLog.printNoPrefix("==========step2:test errors ==============") + + self.test_errors() + + tdLog.printNoPrefix("==========step3:support types ============") + + self.support_types() + + tdLog.printNoPrefix("==========step4: floor basic query ============") + + self.basic_unique_function() + + tdLog.printNoPrefix("==========step5: floor boundary query ============") + + self.check_boundary_values() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeDb.py b/tests/system-test/7-tmq/subscribeDb.py index 157bc7928b2800c5ba68c5f1b65ec601274dc4b9..43b707e65127586124caa16f8e5ec060d57a9f11 100644 --- a/tests/system-test/7-tmq/subscribeDb.py +++ b/tests/system-test/7-tmq/subscribeDb.py @@ -182,7 +182,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicName1 @@ -223,7 +223,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicName1 @@ -279,7 +279,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -343,7 +343,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -427,7 +427,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] diff --git a/tests/system-test/7-tmq/subscribeDb0.py b/tests/system-test/7-tmq/subscribeDb0.py index d6f93acfd6599c60708f0726caf26b7fec01a0f3..ce273367c75d014d4a6d4228f97e50fd7f3b7df6 100644 --- a/tests/system-test/7-tmq/subscribeDb0.py +++ b/tests/system-test/7-tmq/subscribeDb0.py @@ -195,7 +195,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] @@ -272,7 +272,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] @@ -358,8 +358,8 @@ class TDTestCase: topicName1 = 'topic_db60' topicName2 = 'topic_db61' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] @@ -443,8 +443,8 @@ class TDTestCase: topicName1 = 'topic_db60' topicName2 = 'topic_db61' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] diff --git a/tests/system-test/7-tmq/subscribeDb1.py b/tests/system-test/7-tmq/subscribeDb1.py index 56db157ab849f609eb22debde6936d2de406ee06..ca87f0dba533404aaf14a6cd2437417d962260ed 100644 --- a/tests/system-test/7-tmq/subscribeDb1.py +++ b/tests/system-test/7-tmq/subscribeDb1.py @@ -183,7 +183,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2 topicList = topicName1 @@ -261,7 +261,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2 topicList = topicName1 @@ -339,7 +339,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicName1 @@ -382,6 +382,7 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") + time.sleep(15) tdSql.query("drop topic %s"%topicName1) tdLog.printNoPrefix("======== test case 10 end ...... ") @@ -410,7 +411,7 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicName1 @@ -453,6 +454,7 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") + time.sleep(15) tdSql.query("drop topic %s"%topicName1) tdLog.printNoPrefix("======== test case 11 end ...... ") diff --git a/tests/system-test/fulltest.bat b/tests/system-test/fulltest.bat index f9dfdf9a5486bb13d0ad6ed16b8edeece083a6d7..871c93c9824333acb6ba05474d9249fb9f8d8ed7 100644 --- a/tests/system-test/fulltest.bat +++ b/tests/system-test/fulltest.bat @@ -1,3 +1,4 @@ +python3 .\test.py -f 0-others\taosShell.py python3 .\test.py -f 0-others\taosShellError.py python3 .\test.py -f 0-others\taosShellNetChk.py \ No newline at end of file diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh old mode 100755 new mode 100644 index 37c7f18177d27a7b56f4d6219b0151f973cd8cbb..a3770b381cab021ced958d12face0352ffc51067 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -14,6 +14,9 @@ python3 ./test.py -f 0-others/udf_restart_taosd.py python3 ./test.py -f 0-others/user_control.py python3 ./test.py -f 0-others/fsync.py +python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py +python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py + python3 ./test.py -f 2-query/between.py python3 ./test.py -f 2-query/distinct.py python3 ./test.py -f 2-query/varchar.py @@ -53,8 +56,8 @@ python3 ./test.py -f 2-query/Timediff.py python3 ./test.py -f 2-query/top.py python3 ./test.py -f 2-query/bottom.py - - +python3 ./test.py -f 2-query/percentile.py +python3 ./test.py -f 2-query/apercentile.py python3 ./test.py -f 2-query/abs.py python3 ./test.py -f 2-query/ceil.py python3 ./test.py -f 2-query/floor.py @@ -70,6 +73,18 @@ python3 ./test.py -f 2-query/arccos.py python3 ./test.py -f 2-query/arctan.py python3 ./test.py -f 2-query/query_cols_tags_and_or.py # python3 ./test.py -f 2-query/nestedQuery.py +# TD-15983 subquery output duplicate name column. +# Please Xiangyang Guo modify the following script +# python3 ./test.py -f 2-query/nestedQuery_str.py +python3 ./test.py -f 2-query/avg.py +python3 ./test.py -f 2-query/elapsed.py +python3 ./test.py -f 2-query/csum.py +python3 ./test.py -f 2-query/mavg.py +python3 ./test.py -f 2-query/diff.py +python3 ./test.py -f 2-query/sample.py +python3 ./test.py -f 2-query/function_diff.py +python3 ./test.py -f 2-query/unique.py +python3 ./test.py -f 2-query/stateduration.py python3 ./test.py -f 7-tmq/basic5.py python3 ./test.py -f 7-tmq/subscribeDb.py @@ -81,4 +96,3 @@ python3 ./test.py -f 7-tmq/subscribeStb1.py python3 ./test.py -f 7-tmq/subscribeStb2.py python3 ./test.py -f 7-tmq/subscribeStb3.py python3 ./test.py -f 7-tmq/subscribeStb4.py -python3 ./test.py -f 7-tmq/subscribeStb2.py diff --git a/tests/system-test/insert.json b/tests/system-test/insert.json deleted file mode 100644 index 5dea9eabfef35de733e70c7a7ac251b53f5c3563..0000000000000000000000000000000000000000 --- a/tests/system-test/insert.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "filetype": "insert", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "thread_count": 16, - "create_table_thread_count": 1, - "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "insert_interval": 0, - "interlace_rows": 0, - "num_of_records_per_req": 10000, - "prepared_rand": 10000, - "chinese": "no", - "databases": [ - { - "dbinfo": { - "name": "db", - "drop": "yes", - "vgroups":4, - "replica": 1, - "precision": "ms" - }, - "super_tables": [ - { - "name": "stb", - "child_table_exists": "no", - "childtable_count": 1000, - "childtable_prefix": "stb_", - "escape_character": "no", - "auto_create_table": "no", - "batch_create_tbl_num": 10, - "data_source": "rand", - "insert_mode": "taosc", - "non_stop_mode": "no", - "line_protocol": "line", - "insert_rows": 100000, - "interlace_rows": 0, - "insert_interval": 0, - "disorder_ratio": 0, - "timestamp_step": 1, - "start_timestamp": "2020-10-01 00:00:00.000", - "use_sample_ts": "no", - "tags_file": "", - "columns": [ - { - "type": "FLOAT", - "name": "current", - "count": 4, - "max": 12, - "min": 8 - }, - { "type": "INT", "name": "voltage", "max": 225, "min": 215 }, - { "type": "FLOAT", "name": "phase", "max": 1, "min": 0 } - ], - "tags": [ - { - "type": "TINYINT", - "name": "groupid", - "max": 10, - "min": 1 - }, - { - "name": "location", - "type": "BINARY", - "len": 16, - "values": ["beijing", "shanghai"] - } - ] - } - ] - } - ] -} diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 6fd7237b339dc1d2eeeee1d1f5965ec77d03b53d..a11085708c42ec63672129d37636e30fb9140598 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -37,6 +37,7 @@ if __name__ == "__main__": masterIp = "" testCluster = False valgrind = 0 + killValgrind = 1 logSql = True stop = 0 restart = False @@ -45,8 +46,8 @@ if __name__ == "__main__": windows = 1 updateCfgDict = {} execCmd = "" - opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:e:', [ - 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'execCmd']) + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd']) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -60,6 +61,7 @@ if __name__ == "__main__": tdLog.printNoPrefix('-g valgrind Test Flag') tdLog.printNoPrefix('-r taosd restart test') tdLog.printNoPrefix('-d update cfg dict, base64 json str') + tdLog.printNoPrefix('-k not kill valgrind processer') tdLog.printNoPrefix('-e eval str to run') sys.exit(0) @@ -100,6 +102,9 @@ if __name__ == "__main__": print('updateCfgDict convert fail.') sys.exit(0) + if key in ['-k', '--killValgrind']: + killValgrind = 0 + if key in ['-e', '--execCmd']: try: execCmd = base64.b64decode(value.encode()).decode() @@ -145,7 +150,11 @@ if __name__ == "__main__": if masterIp == "": host = '127.0.0.1' else: - host = masterIp + try: + config = eval(masterIp) + host = config["host"] + except Exception as r: + host = masterIp tdLog.info("Procedures for tdengine deployed in %s" % (host)) if windows: @@ -185,6 +194,7 @@ if __name__ == "__main__": else: tdCases.runAllWindows(conn) else: + tdDnodes.setKillValgrind(killValgrind) tdDnodes.init(deployPath, masterIp) tdDnodes.setTestCluster(testCluster) tdDnodes.setValgrind(valgrind) diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt index 964f9fee4abbdc244b83f50390c2b660be6b476c..505c290f2a7b45cdba530fcfbad3e42adefdea90 100644 --- a/tests/test/c/CMakeLists.txt +++ b/tests/test/c/CMakeLists.txt @@ -23,20 +23,18 @@ target_link_libraries( PUBLIC os ) -if(NOT TD_WINDOWS) - add_executable(sdbDump sdbDump.c) - target_link_libraries( - sdbDump - PUBLIC dnode - PUBLIC mnode - PUBLIC sdb - PUBLIC os - ) - target_include_directories( - sdbDump - PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode" - PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/impl/inc" - PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/sdb/inc" - PRIVATE "${TD_SOURCE_DIR}/source/dnode/mgmt/node_mgmt/inc" - ) -ENDIF () \ No newline at end of file +add_executable(sdbDump sdbDump.c) +target_link_libraries( + sdbDump + PUBLIC dnode + PUBLIC mnode + PUBLIC sdb + PUBLIC os +) +target_include_directories( + sdbDump + PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode" + PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/impl/inc" + PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/sdb/inc" + PRIVATE "${TD_SOURCE_DIR}/source/dnode/mgmt/node_mgmt/inc" +) \ No newline at end of file diff --git a/tests/test/c/sdbDump.c b/tests/test/c/sdbDump.c index 8be2822c0ae8d6c2176895d0a6e51449d81ea44b..3b3a9fc85ec7c7e20e6b91574034bb4a196e9876 100644 --- a/tests/test/c/sdbDump.c +++ b/tests/test/c/sdbDump.c @@ -21,12 +21,12 @@ #include "tjson.h" #define TMP_DNODE_DIR TD_TMP_DIR_PATH "dumpsdb" -#define TMP_MNODE_DIR TD_TMP_DIR_PATH "dumpsdb/mnode" -#define TMP_SDB_DATA_DIR TD_TMP_DIR_PATH "dumpsdb/mnode/data" -#define TMP_SDB_SYNC_DIR TD_TMP_DIR_PATH "dumpsdb/mnode/sync" -#define TMP_SDB_DATA_FILE TD_TMP_DIR_PATH "dumpsdb/mnode/data/sdb.data" -#define TMP_SDB_RAFT_CFG_FILE TD_TMP_DIR_PATH "dumpsdb/mnode/sync/raft_config.json" -#define TMP_SDB_RAFT_STORE_FILE TD_TMP_DIR_PATH "dumpsdb/mnode/sync/raft_store.json" +#define TMP_MNODE_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" +#define TMP_SDB_DATA_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "data" +#define TMP_SDB_SYNC_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "sync" +#define TMP_SDB_DATA_FILE TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "data" TD_DIRSEP "sdb.data" +#define TMP_SDB_RAFT_CFG_FILE TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_config.json" +#define TMP_SDB_RAFT_STORE_FILE TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_store.json" void reportStartup(const char *name, const char *desc) {} @@ -283,9 +283,7 @@ void dumpTrans(SSdb *pSdb, SJson *json) { tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime)); tjsonAddStringToObject(item, "dbUid", i642str(pObj->dbUid)); tjsonAddStringToObject(item, "dbname", pObj->dbname); - tjsonAddIntegerToObject(item, "redoLogNum", taosArrayGetSize(pObj->redoLogs)); - tjsonAddIntegerToObject(item, "undoLogNum", taosArrayGetSize(pObj->undoLogs)); - tjsonAddIntegerToObject(item, "commitLogNum", taosArrayGetSize(pObj->commitLogs)); + tjsonAddIntegerToObject(item, "commitLogNum", taosArrayGetSize(pObj->commitActions)); tjsonAddIntegerToObject(item, "redoActionNum", taosArrayGetSize(pObj->redoActions)); tjsonAddIntegerToObject(item, "undoActionNum", taosArrayGetSize(pObj->undoActions)); @@ -412,13 +410,23 @@ int32_t parseArgs(int32_t argc, char *argv[]) { char dataFile[PATH_MAX] = {0}; char raftCfgFile[PATH_MAX] = {0}; char raftStoreFile[PATH_MAX] = {0}; - snprintf(dataFile, PATH_MAX, "%s/mnode/data/sdb.data", tsDataDir); - snprintf(raftCfgFile, PATH_MAX, "%s/mnode/sync/raft_config.json", tsDataDir); - snprintf(raftStoreFile, PATH_MAX, "%s/mnode/sync/raft_store.json", tsDataDir); + snprintf(dataFile, PATH_MAX, "%s" TD_DIRSEP "mnode" TD_DIRSEP "data" TD_DIRSEP "sdb.data", tsDataDir); + snprintf(raftCfgFile, PATH_MAX, "%s" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_config.json", tsDataDir); + snprintf(raftStoreFile, PATH_MAX, "%s" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_store.json", tsDataDir); char cmd[PATH_MAX * 2] = {0}; snprintf(cmd, sizeof(cmd), "rm -rf %s", TMP_DNODE_DIR); system(cmd); +#ifdef WINDOWS + taosMulMkDir(TMP_SDB_DATA_DIR); + taosMulMkDir(TMP_SDB_SYNC_DIR); + snprintf(cmd, sizeof(cmd), "cp %s %s 2>nul", dataFile, TMP_SDB_DATA_FILE); + system(cmd); + snprintf(cmd, sizeof(cmd), "cp %s %s 2>nul", raftCfgFile, TMP_SDB_RAFT_CFG_FILE); + system(cmd); + snprintf(cmd, sizeof(cmd), "cp %s %s 2>nul", raftStoreFile, TMP_SDB_RAFT_STORE_FILE); + system(cmd); +#else snprintf(cmd, sizeof(cmd), "mkdir -p %s", TMP_SDB_DATA_DIR); system(cmd); snprintf(cmd, sizeof(cmd), "mkdir -p %s", TMP_SDB_SYNC_DIR); @@ -429,6 +437,7 @@ int32_t parseArgs(int32_t argc, char *argv[]) { system(cmd); snprintf(cmd, sizeof(cmd), "cp %s %s 2>/dev/null", raftStoreFile, TMP_SDB_RAFT_STORE_FILE); system(cmd); +#endif strcpy(tsDataDir, TMP_DNODE_DIR); return 0; diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index a866488d3ad2d239c47b3279f506e755737b88bf..851d9a2070b75f7863f8e55f5779e9bac90607db 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -587,6 +587,8 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) { int32_t width = (int32_t)strlen(field->name); switch (field->type) { + case TSDB_DATA_TYPE_NULL: + return TMAX(4, width); // null case TSDB_DATA_TYPE_BOOL: return TMAX(5, width); // 'false' diff --git a/tools/taos-tools b/tools/taos-tools index 2f3dfddd4d9a869e706ba3cf98fb6d769404cd7c..4d83d8c62973506f760bcaa3a33f4665ed9046d0 160000 --- a/tools/taos-tools +++ b/tools/taos-tools @@ -1 +1 @@ -Subproject commit 2f3dfddd4d9a869e706ba3cf98fb6d769404cd7c +Subproject commit 4d83d8c62973506f760bcaa3a33f4665ed9046d0