diff --git a/Jenkinsfile2 b/Jenkinsfile2
index db49ab27d76f03bbaab0e0bf4aeba74b2f7ae361..a2b55e3acca0c141a2d550ccabb5bb129adb3d7e 100644
--- a/Jenkinsfile2
+++ b/Jenkinsfile2
@@ -269,7 +269,7 @@ pipeline {
}
}
stage('linux test') {
- agent{label " slave3_0 || slave15 || slave16 || slave17 "}
+ agent{label " worker03 || slave215 || slave217 || slave219 "}
options { skipDefaultCheckout() }
when {
changeRequest()
@@ -287,9 +287,9 @@ pipeline {
'''
sh '''
cd ${WKC}/tests/parallel_test
- export DEFAULT_RETRY_TIME=1
+ export DEFAULT_RETRY_TIME=2
date
- timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME} -l ${WKDIR}/log -o 480
+ timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480
'''
}
}
diff --git a/cmake/cmake.options b/cmake/cmake.options
index cb6fd1400d43b6073d81ab43e46140343b277512..ab3c5ac1ad08b98ee2dbe09692584be63e477d71 100644
--- a/cmake/cmake.options
+++ b/cmake/cmake.options
@@ -49,7 +49,7 @@ IF(${TD_WINDOWS})
option(
BUILD_TEST
"If build unit tests using googletest"
- OFF
+ ON
)
ELSE ()
diff --git a/contrib/test/craft/raftMain.c b/contrib/test/craft/raftMain.c
index 12be3deb2e33aba9be9b45acd1595a749ab1b2c5..e1c66422b3b90b23ff8c6f01cf07aa8adace5983 100644
--- a/contrib/test/craft/raftMain.c
+++ b/contrib/test/craft/raftMain.c
@@ -243,7 +243,7 @@ void console(SRaftServer *pRaftServer) {
} else if (strcmp(cmd, "dropnode") == 0) {
- char host[HOST_LEN];
+ char host[HOST_LEN] = {0};
uint32_t port;
parseAddr(param1, host, HOST_LEN, &port);
uint64_t rid = raftId(host, port);
@@ -258,7 +258,7 @@ void console(SRaftServer *pRaftServer) {
} else if (strcmp(cmd, "put") == 0) {
- char buf[256];
+ char buf[256] = {0};
snprintf(buf, sizeof(buf), "%s--%s", param1, param2);
putValue(&pRaftServer->raft, buf);
diff --git a/docs-cn/07-develop/06-subscribe.mdx b/docs-cn/07-develop/06-subscribe.mdx
index ad5561fa09087c4c562ac340506f56d756bd98b2..0f531e07c9dce7dbb03bacebf8e5cbefae82671f 100644
--- a/docs-cn/07-develop/06-subscribe.mdx
+++ b/docs-cn/07-develop/06-subscribe.mdx
@@ -145,7 +145,7 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
taos_unsubscribe(tsub, keep);
```
-其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下,每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。
+其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下(注:`taos.cfg` 配置文件中 `DataDir` 参数值默认为 **/var/lib/taos/**,但是 Windows 服务器上本身不存在该目录,所以需要在 Windows 的配置文件中修改 `DataDir` 参数值为相应的已存在目录"),每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。
代码介绍完毕,我们来看一下实际的运行效果。假设:
diff --git a/docs-cn/12-taos-sql/07-function.md b/docs-cn/12-taos-sql/07-function.md
index f6e564419ddaa18931b0f0e0e4e7b5b3219a92f6..87929e884e7ce735fd9841d520afe7e897cb860b 100644
--- a/docs-cn/12-taos-sql/07-function.md
+++ b/docs-cn/12-taos-sql/07-function.md
@@ -1766,6 +1766,8 @@ SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2
1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。
- 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。
+**支持的版本**:2.6.0.0 及以后的版本。
+
**示例**:
```sql
diff --git a/docs-cn/27-train-faq/01-faq.md b/docs-cn/27-train-faq/01-faq.md
index a657a95e8d0087eb50265adb86fb34f04d43d501..b16e24d4343c146471ccf1df8ebf3111c92cd3ba 100644
--- a/docs-cn/27-train-faq/01-faq.md
+++ b/docs-cn/27-train-faq/01-faq.md
@@ -33,15 +33,15 @@ title: 常见问题及反馈
### 2. Windows 平台下 JDBCDriver 找不到动态链接库,怎么办?
-请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。
+请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。
### 3. 创建数据表时提示 more dnodes are needed
-请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。
+请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。
### 4. 如何让 TDengine crash 时生成 core 文件?
-请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。
+请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。
### 5. 遇到错误“Unable to establish connection” 怎么办?
@@ -128,19 +128,30 @@ properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
Connection = DriverManager.getConnection(url, properties);
```
-### 13.JDBC 报错: the executed SQL is not a DML or a DDL?
+### 13. Windows 系统下客户端无法正常显示中文字符?
+
+Windows 系统中一般是采用 GBK/GB18030 存储中文字符,而 TDengine 的默认字符集为 UTF-8 ,在 Windows 系统中使用 TDengine 客户端时,客户端驱动会将字符统一转换为 UTF-8 编码后发送到服务端存储,因此在应用开发过程中,调用接口时正确配置当前的中文字符集即可。
+
+【 v2.2.1.5以后版本 】在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置:
+
+```
+locale C
+charset UTF-8
+```
+
+### 14. JDBC 报错: the executed SQL is not a DML or a DDL?
请更新至最新的 JDBC 驱动,参考 [Java 连接器](/reference/connector/java)
-### 14. taos connect failed, reason: invalid timestamp
+### 15. taos connect failed, reason: invalid timestamp
常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。
-### 15. 表名显示不全
+### 16. 表名显示不全
由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
-### 16. 如何进行数据迁移?
+### 17. 如何进行数据迁移?
TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机器 A 移动机器 B 时,注意如下两件事:
@@ -148,7 +159,7 @@ TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode 下,修复 dnodeEps.json 的 dnodeId 对应的 FQDN,重启。确保机器内所有机器的此文件是完全相同的。
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
-### 17. 如何在命令行程序 taos 中临时调整日志级别
+### 18. 如何在命令行程序 taos 中临时调整日志级别
为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令:
@@ -169,7 +180,7 @@ ALTER LOCAL RESETLOG;
-### 18. go 语言编写组件编译失败怎样解决?
+### 19. go 语言编写组件编译失败怎样解决?
TDengine 2.3.0.0 及之后的版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。
使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。
@@ -184,7 +195,7 @@ go env -w GOPROXY=https://goproxy.cn,direct
如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用
`cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。
-### 19. 如何查询数据占用的存储空间大小?
+### 20. 如何查询数据占用的存储空间大小?
默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。
@@ -193,3 +204,50 @@ go env -w GOPROXY=https://goproxy.cn,direct
若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。
若仅仅想查看指定(超级)表的数据块分布及大小,可查看[_block_dist 函数](https://docs.taosdata.com/taos-sql/select/#_block_dist-%E5%87%BD%E6%95%B0)
+
+### 21. 客户端连接串如何保证高可用?
+
+请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html)
+
+### 22. 时间戳的时区信息是怎样处理的?
+
+TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。
+
+客户端在处理时间戳字符串时,会采取如下逻辑:
+
+1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。
+2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。
+3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
+4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
+
+### 23. TDengine 2.0 都会用到哪些网络端口?
+
+在 TDengine 2.0 版本中,会用到以下这些网络端口(以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么这里列举的端口都会随之出现变化),管理员可以参考这里的信息调整防火墙设置:
+
+| 协议 | 默认端口 | 用途说明 | 修改方法 |
+| :--- | :-------- | :---------------------------------- | :------------------------------- |
+| TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 |
+| TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
+| TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 |
+| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。2.4.0.0 及以上版本由 taosAdapter 配置。 |
+| TCP | 6042 | Arbitrator 的服务端口。 | 随 Arbitrator 启动参数设置变化。 |
+| TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 |
+| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化( 2.4.0.0 及以上版本)。 |
+| UDP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化( 2.4.0.0 及以上版本)。 |
+| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
+| UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 |
+| UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
+
+### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功??
+
+taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。
+
+需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。
+
+有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/)
+
+### 25. 发生了 OOM 怎么办?
+
+OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。
+
+TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 maxVgroupsPerDb 影响,每个 VNode 占用的内存大小受 Blocks 和 Cache 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。
diff --git a/docs-en/07-develop/01-connect/index.md b/docs-en/07-develop/01-connect/index.md
index 21b2149f4451e8e5d388a41f1a0a06b6adc00a96..b9217b828d0d08c4ff1eacd27406d4e3bfba8eac 100644
--- a/docs-en/07-develop/01-connect/index.md
+++ b/docs-en/07-develop/01-connect/index.md
@@ -1,6 +1,6 @@
---
-sidebar_label: Connection
-title: Connect to TDengine
+sidebar_label: Connect
+title: Connect
description: "This document explains how to establish connections to TDengine, and briefly introduces how to install and use TDengine connectors."
---
diff --git a/docs-en/07-develop/03-insert-data/01-sql-writing.mdx b/docs-en/07-develop/03-insert-data/01-sql-writing.mdx
index ae170a2bef3496c49026e05d7d60399cc88e90a7..498bcc8c423d64ee2847d2748c6f14d2d4c07e74 100644
--- a/docs-en/07-develop/03-insert-data/01-sql-writing.mdx
+++ b/docs-en/07-develop/03-insert-data/01-sql-writing.mdx
@@ -1,5 +1,5 @@
---
-sidebar_label: SQL
+sidebar_label: Insert Using SQL
title: Insert Using SQL
---
diff --git a/docs-en/07-develop/03-insert-data/index.md b/docs-en/07-develop/03-insert-data/index.md
index ba31a951ff0805b48f90c87ddc635c04978d3cd2..1a71e719a56448e4b535632e570ce8a04d2282bb 100644
--- a/docs-en/07-develop/03-insert-data/index.md
+++ b/docs-en/07-develop/03-insert-data/index.md
@@ -1,5 +1,5 @@
---
-title: Insert
+title: Insert Data
---
TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, and OpenTSDB JSON protocol. Data can be inserted row by row, or in batches. Data from one or more collection points can be inserted simultaneously. Data can be inserted with multiple threads, and out of order data and historical data can be inserted as well. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create STables and tables in advance if using schemaless protocols, and the schemas can be adjusted automatically based on the data being inserted.
diff --git a/docs-en/07-develop/04-query-data/_category_.yml b/docs-en/07-develop/04-query-data/_category_.yml
index 5912a48fc31ed36235c0d34d8b0909bf3b518aaa..809db34621a63505ceace7ba182e07c698bdbddb 100644
--- a/docs-en/07-develop/04-query-data/_category_.yml
+++ b/docs-en/07-develop/04-query-data/_category_.yml
@@ -1 +1 @@
-label: Select Data
+label: Query Data
diff --git a/docs-en/07-develop/04-query-data/index.mdx b/docs-en/07-develop/04-query-data/index.mdx
index 74562c88232afc2f41fdbe5d4c34d582b0b141bd..a212fa9529215fc24c55c95a166cfc1a407359b2 100644
--- a/docs-en/07-develop/04-query-data/index.mdx
+++ b/docs-en/07-develop/04-query-data/index.mdx
@@ -1,6 +1,6 @@
---
-Sidebar_label: Select
-title: Select
+Sidebar_label: Query data
+title: Query data
description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors."
---
diff --git a/docs-en/07-develop/06-subscribe.mdx b/docs-en/07-develop/06-subscribe.mdx
index 474841ff8932216d327f39a4f0cb39ba26e6615b..782fcdbaf221419dd231bd10958e26b8f4f856e5 100644
--- a/docs-en/07-develop/06-subscribe.mdx
+++ b/docs-en/07-develop/06-subscribe.mdx
@@ -1,5 +1,5 @@
---
-sidebar_label: Subscription
+sidebar_label: Data Subscription
description: "Lightweight service for data subscription and publishing. Time series data inserted into TDengine continuously can be pushed automatically to subscribing clients."
title: Data Subscription
---
@@ -151,7 +151,7 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
taos_unsubscribe(tsub, keep);
```
-The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription, the subscription will be restarted from the beginning if the corresponding progress file is removed.
+The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription(Note: The default value of `DataDir` in the `taos.cfg` file is **/var/lib/taos/**. However, **/var/lib/taos/** does not exist on the Windows server. So you need to change the `DataDir` value to the corresponding existing directory."), the subscription will be restarted from the beginning if the corresponding progress file is removed.
Now let's see the effect of the above sample code, assuming below prerequisites have been done.
diff --git a/docs-en/07-develop/08-udf.md b/docs-en/07-develop/08-udf.md
index 0ee61740cc8b8aad7dd39707a1153b022822f0a9..49bc95bd91a4c31d42d2b21ef05d69225f1bd963 100644
--- a/docs-en/07-develop/08-udf.md
+++ b/docs-en/07-develop/08-udf.md
@@ -1,6 +1,6 @@
---
sidebar_label: UDF
-title: User Defined Functions
+title: User Defined Functions(UDF)
description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand query capability"
---
diff --git a/docs-en/12-taos-sql/07-function.md b/docs-en/12-taos-sql/07-function.md
index 0d6e7f25649872f514dce21bcba38a3af4ba7a5d..825aeea354fb684e47ceed7afb2bc66d97b23c09 100644
--- a/docs-en/12-taos-sql/07-function.md
+++ b/docs-en/12-taos-sql/07-function.md
@@ -22,8 +22,8 @@ SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
**More explanation**:
-- Wildcard (\*) can be used to represent all columns, it's used to get the number of all rows
-- The number of non-NULL values will be returned if this function is used on a specific column
+- Wildcard (\*) is used to represent all columns. The `COUNT` function is used to get the total number of all rows.
+- The number of non-NULL values will be returned if this function is used on a specific column.
**Examples**:
@@ -87,7 +87,7 @@ SELECT TWA(field_name) FROM tb_name WHERE clause;
**More explanations**:
-- From version 2.1.3.0, function TWA can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
+- Since version 2.1.3.0, function TWA can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
### IRATE
@@ -105,7 +105,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
**More explanations**:
-- From version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
+- Since version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
### SUM
@@ -149,7 +149,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
-**Applicable table types**: table, STable (starting from version 2.0.15.1)
+**Applicable table types**: table, STable (since version 2.0.15.1)
**Examples**:
@@ -193,13 +193,13 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause];
**Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence. It can't be used on timestamp column or tags.
-**Return value type**:Same as the data type of the column being operated
+**Return value type**:Same as the data type of the column being operated upon
**Applicable column types**:Data types except for timestamp
**More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned.
-**Applicable version**:From version 2.6.0.0
+**Applicable version**:Since version 2.6.0.0
**Examples**:
@@ -234,7 +234,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
**More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case.
-**Applicable versions**:From version 2.6.0.0
+**Applicable versions**:Since version 2.6.0.0
**Examples**:
@@ -271,7 +271,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
**Description**: The minimum value of a specific column in a table or STable
-**Return value type**: Same as the data type of the column being operated
+**Return value type**: Same as the data type of the column being operated upon
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
@@ -301,7 +301,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
**Description**: The maximum value of a specific column of a table or STable
-**Return value type**: Same as the data type of the column being operated
+**Return value type**: Same as the data type of the column being operated upon
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
@@ -331,7 +331,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
**Description**: The first non-null value of a specific column in a table or STable
-**Return value type**: Same as the column being operated
+**Return value type**: Same as the column being operated upon
**Applicable column types**: Any data type
@@ -341,7 +341,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
- FIRST(\*) can be used to get the first non-null value of all columns
- NULL will be returned if all the values of the specified column are all NULL
-- No result will NOT be returned if all the columns in the result set are all NULL
+- A result will NOT be returned if all the columns in the result set are all NULL
**Examples**:
@@ -367,7 +367,7 @@ SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
**Description**: The last non-NULL value of a specific column in a table or STable
-**Return value type**: Same as the column being operated
+**Return value type**: Same as the column being operated upon
**Applicable column types**: Any data type
@@ -403,7 +403,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
**Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
-**Return value type**: Same as the column being operated
+**Return value type**: Same as the column being operated upon
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
@@ -442,7 +442,7 @@ SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
**Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
-**Return value type**: Same as the column being operated
+**Return value type**: Same as the column being operated upon
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
@@ -549,7 +549,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
**Description**: The last row of a table or STable
-**Return value type**: Same as the column being operated
+**Return value type**: Same as the column being operated upon
**Applicable column types**: Any data type
@@ -576,7 +576,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
Query OK, 1 row(s) in set (0.001042s)
```
-### INTERP [From version 2.3.1]
+### INTERP [Since version 2.3.1]
```
SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
@@ -584,7 +584,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
-**Return value type**: Same as the column being operated
+**Return value type**: Same as the column being operated upon
**Applicable column types**: Numeric data types
@@ -593,7 +593,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
**More explanations**
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
-- The input data of `INTERP` is the value of the specified column, `where` can be used to filter the original data. If no `where` condition is specified then all original data is the input.
+- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. If `RANGE` is not specified, then the timestamp of the first row that matches the filter condition is treated as timestamp1, the timestamp of the last row that matches the filter condition is treated as timestamp2.
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. If `EVERY` parameter is not used, the time windows will be considered as no ending timestamp, i.e. there is only one time window from timestamp1.
- Interpolation is performed based on `FILL` parameter. No interpolation is performed if `FILL` is not used, that means either the original data that matches is returned or nothing is returned.
@@ -632,7 +632,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
taos> SELECT INTERP(current) FROM t1 where ts >= '2017-07-14 17:00:00' and ts <= '2017-07-14 20:00:00' RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR);
```
-### INTERP [Prior to version 2.3.1]
+### INTERP [Since version 2.0.15.0]
```
SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
@@ -640,7 +640,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL
**Description**: The value of a specific column that matches the specified time slice
-**Return value type**: Same as the column being operated
+**Return value type**: Same as the column being operated upon
**Applicable column types**: Numeric data type
@@ -648,7 +648,6 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL
**More explanations**:
-- It can be used from version 2.0.15.0
- Time slice must be specified. If there is no data matching the specified time slice, interpolation is performed based on `FILL` parameter. Conditions such as tags or `tbname` can be used `Where` clause can be used to filter data.
- The timestamp specified must be within the time range of the data rows of the table or STable. If it is beyond the valid time range, nothing is returned even with `FILL` parameter.
- `INTERP` can be used to query only single time point once. `INTERP` can be used with `EVERY` to get the interpolation value every time interval.
@@ -696,11 +695,11 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
**Parameter value range**: k: [1,100] offset_val: [0,100]
-**Return value type**: Same as the column being operated
+**Return value type**: Same as the column being operated upon
**Applicable column types**: Any data type except form timestamp, i.e. the primary key
-**Applicable versions**: From version 2.6.0.0
+**Applicable versions**: Since version 2.6.0.0
**Examples**:
@@ -732,11 +731,11 @@ SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp.
-**Return value type**: Same as the column or tag being operated
+**Return value type**: Same as the column or tag being operated upon
**Applicable column types**: Any data types except for timestamp
-**Applicable versions**: From version 2.6.0.0
+**Applicable versions**: Since version 2.6.0.0
**More explanations**:
@@ -780,7 +779,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored.
-**Return value type**: Same as the column being operated
+**Return value type**: Same as the column being operated upon
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
@@ -789,8 +788,8 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
**More explanations**:
- The number of result rows is the number of rows subtracted by one, no output for the first row
-- From version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname`
-- From version 2.6.0, `ignore_negative` parameter is supported
+- Since version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname`
+- Since version 2.6.0, `ignore_negative` parameter is supported
**Examples**:
@@ -874,7 +873,7 @@ Query OK, 1 row(s) in set (0.000836s)
SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The round up value of a specific column
+**Description**: The rounded up value of a specific column
**Return value type**: Same as the column being used
@@ -896,9 +895,9 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The round down value of a specific column
+**Description**: The rounded down value of a specific column
-**More explanations**: The restrictions are same as `CEIL` function.
+**More explanations**: The restrictions are same as those of the `CEIL` function.
### ROUND
@@ -906,7 +905,7 @@ SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
-**Description**: The round value of a specific column.
+**Description**: The rounded value of a specific column.
**More explanations**: The restrictions are same as `CEIL` function.
@@ -933,7 +932,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
- Can only be used with aggregate functions
- `Group by tbname` must be used together on a STable to force the result on a single timeline
-**Applicable versions**: From 2.3.0.x
+**Applicable versions**: Since 2.3.0.x
### MAVG
@@ -958,7 +957,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
- Can't be used with aggregate functions.
- Must be used with `GROUP BY tbname` when it's used on a STable to force the result on each single timeline.
-**Applicable versions**: From 2.3.0.x
+**Applicable versions**: Since 2.3.0.x
### SAMPLE
@@ -981,7 +980,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
- Arithmetic operation can't be operated on the result of `SAMPLE` function
- Must be used with `Group by tbname` when it's used on a STable to force the result on each single timeline
-**Applicable versions**: From 2.3.0.x
+**Applicable versions**: Since 2.3.0.x
### ASIN
@@ -1460,8 +1459,8 @@ SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WH
**More explanations**:
-- Arithmetic operations can be performed on two or more columns, `()` can be used to control the precedence
-- NULL doesn't participate the operation, if one of the operands is NULL then result is NULL
+- Arithmetic operations can be performed on two or more columns, Parentheses `()` can be used to control the order of precedence.
+- NULL doesn't participate in the operation i.e. if one of the operands is NULL then result is NULL.
**Examples**:
@@ -1586,7 +1585,7 @@ Query OK, 6 row(s) in set (0.002613s)
## Time Functions
-From version 2.6.0.0, below time related functions can be used in TDengine.
+Since version 2.6.0.0, below time related functions can be used in TDengine.
### NOW
@@ -1840,6 +1839,8 @@ SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2
1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day).
- The precision of the returned timestamp is same as the precision set for the current data base in use
+**Applicable versions**:Since version 2.6.0.0
+
**Examples**:
```sql
diff --git a/docs-en/12-taos-sql/08-interval.md b/docs-en/12-taos-sql/08-interval.md
index 1b5265b44b6b63f8f5472e1e8760d1f45401fc21..acfb0de0e1521fd8c6a068497a3df7a17941524c 100644
--- a/docs-en/12-taos-sql/08-interval.md
+++ b/docs-en/12-taos-sql/08-interval.md
@@ -3,36 +3,36 @@ sidebar_label: Interval
title: Aggregate by Time Window
---
-Aggregate by time window is supported in TDengine. For example, each temperature sensor reports the temperature every second, the average temperature every 10 minutes can be retrieved by query with time window.
-Window related clauses are used to divide the data set to be queried into subsets and then aggregate. There are three kinds of windows, time window, status window, and session window. There are two kinds of time windows, sliding window and flip time window.
+Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window.
+Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window.
## Time Window
-`INTERVAL` clause is used to generate time windows of the same time interval, `SLIDING` is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining continuous query both the size of time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time window.
+The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.

-`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. Below SQL statement is illegal because no aggregate or selection function is used with `INTERVAL`.
+`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`.
```
SELECT * FROM temp_tb_1 INTERVAL(1m);
```
-The time step specified by `SLIDING` can't exceed the time interval specified by `INTERVAL`. Below SQL statement is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`.
+The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`.
```
SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
```
-When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. From version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side.
+When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip/tumbling window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. Since version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side.
## Status Window
-In case of using integer, bool, or string to represent the device status at a moment, the continuous rows with same status belong to same status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now.
+In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now.

-`STATE_WINDOW` is used to specify the column based on which to define status window, for example:
+`STATE_WINDOW` is used to specify the column on which the status window will be based. For example:
```
SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
@@ -44,7 +44,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val);
```
-The primary key, i.e. timestamp, is used to determine which session window the row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different time windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
+The primary key, i.e. timestamp, is used to determine which session window a row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different session windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.

@@ -73,7 +73,7 @@ SELECT function_list FROM stb_name
### Restrictions
-- Aggregate functions and select functions can be used in `function_list`, with each function having only one output, for example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple output can't be used, for example DIFF or arithmetic operations.
+- Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used.
- `LAST_ROW` can't be used together with window aggregate.
- Scalar functions, like CEIL/FLOOR, can't be used with window aggregate.
- `WHERE` clause can be used to specify the starting and ending time and other filter conditions
@@ -87,8 +87,8 @@ SELECT function_list FROM stb_name
:::info
-1. Huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum interpolation values that can be returned in single query is 10,000,000.
-2. The result set is in ascending order of timestamp in aggregate by time window aggregate.
+1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000.
+2. The result set is in ascending order of timestamp when you aggregate by time window.
3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `GROUP BY` is not used in the query, the result set will be returned in ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group.
:::
@@ -97,13 +97,13 @@ Aggregate by time window is also used in continuous query, please refer to [Cont
## Examples
-The table of intelligent meters can be created by the SQL statement below:
+A table of intelligent meters can be created by the SQL statement below:
```sql
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
```
-The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the below SQL statement, with missing values filled with the previous non-NULL values.
+The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values.
```
SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters
diff --git a/docs-en/12-taos-sql/09-limit.md b/docs-en/12-taos-sql/09-limit.md
index b987cbcb7886dd35d4fbfefb945d8f36f8d4f399..db55cdd69e7bd29ca66ee15b61f28991568d9556 100644
--- a/docs-en/12-taos-sql/09-limit.md
+++ b/docs-en/12-taos-sql/09-limit.md
@@ -4,8 +4,8 @@ title: Limits & Restrictions
## Naming Rules
-1. Only English characters, digits and underscore are allowed
-2. Can't start with a digit
+1. Only characters from the English alphabet, digits and underscore are allowed
+2. Names cannot start with a digit
3. Case insensitive without escape character "\`"
4. Identifier with escape character "\`"
To support more flexible table or column names, a new escape character "\`" is introduced. For more details please refer to [escape](/taos-sql/escape).
@@ -16,38 +16,38 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`.
## General Limits
-- Maximum length of database name is 32 bytes
-- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator
-- Maximum length of each data row is 48K bytes from version 2.1.7.0 , before which the limit is 16K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
-- Maximum of column name is 64.
+- Maximum length of database name is 32 bytes.
+- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
+- Maximum length of each data row is 48K bytes since version 2.1.7.0 , before which the limit was 16K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
+- Maximum length of column name is 64.
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
- Maximum length of tag name is 64.
- Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values should not exceed 16K bytes.
-- Maximum length of singe SQL statement is 1048576, i.e. 1 MB bytes. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576].
-- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`, functions in the query statement may constitute columns. Error will be returned if the limit is exceeded.
-- Maximum numbers of databases, STables, tables are only depending on the system resources.
+- Maximum length of singe SQL statement is 1048576, i.e. 1 MB. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576].
+- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded.
+- Maximum numbers of databases, STables, tables are dependent only on the system resources.
- Maximum of database name is 32 bytes, and it can't include "." or special characters.
-- Maximum replica number of database is 3
-- Maximum length of user name is 23 bytes
-- Maximum length of password is 15 bytes
-- Maximum number of rows depends on the storage space only.
-- Maximum number of tables depends on the number of nodes only.
-- Maximum number of databases depends on the number of nodes only.
-- Maximum number of vnodes for single database is 64.
+- Maximum number of replicas for a database is 3.
+- Maximum length of user name is 23 bytes.
+- Maximum length of password is 15 bytes.
+- Maximum number of rows depends only on the storage space.
+- Maximum number of tables depends only on the number of nodes.
+- Maximum number of databases depends only on the number of nodes.
+- Maximum number of vnodes for a single database is 64.
## Restrictions of `GROUP BY`
-`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with one restriction that only one column and the number of unique values on that column is lower than 100,000. Please note that `GROUP BY` can't be performed on float or double types.
+`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with the only restriction being it can only be performed on one data column and the number of unique values in that column is lower than 100,000. Please note that `GROUP BY` cannot be performed on float or double types.
## Restrictions of `IS NOT NULL`
-`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `<\>""` can only be used on non-numeric data types.
+`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `< > ""` can only be used on non-numeric data types.
## Restrictions of `ORDER BY`
- Only one `order by` is allowed for normal table and subtable.
- At most two `order by` are allowed for STable, and the second one must be `ts`.
-- `order by tag` must be used with `group by tag` on same tag, this rule is also applicable to `tbname`.
+- `order by tag` must be used with `group by tag` on same tag. This rule is also applicable to `tbname`.
- `order by column` must be used with `group by column` or `top/bottom` on same column. This rule is applicable to table and STable.
- `order by ts` is applicable to table and STable.
- If `order by ts` is used with `group by`, the result set is sorted using `ts` in each group.
@@ -56,7 +56,7 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`.
### Name Restrictions of Table/Column
-The name of a table or column can only be composed of ASCII characters, digits and underscore, while it can't start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator.
+The name of a table or column can only be composed of ASCII characters, digits and underscore and it cannot start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator.
### Name Restrictions After Escaping
diff --git a/docs-en/12-taos-sql/10-json.md b/docs-en/12-taos-sql/10-json.md
index abe6649330618eb3df45f5bed03335a65f93a434..7460a5e0ba3ce78ee7744569cda460c477cac19c 100644
--- a/docs-en/12-taos-sql/10-json.md
+++ b/docs-en/12-taos-sql/10-json.md
@@ -4,7 +4,7 @@ title: JSON Type
## Syntax
-1. Tag of JSON type
+1. Tag of type JSON
```sql
create STable s1 (ts timestamp, v1 int) tags (info json);
@@ -12,7 +12,7 @@ title: JSON Type
create table s1_1 using s1 tags ('{"k1": "v1"}');
```
-2. -> Operator of JSON
+2. "->" Operator of JSON
```sql
select * from s1 where info->'k1' = 'v1';
@@ -20,7 +20,7 @@ title: JSON Type
select info->'k1' from s1;
```
-3. contains Operator of JSON
+3. "contains" Operator of JSON
```sql
select * from s1 where info contains 'k2';
@@ -30,7 +30,7 @@ title: JSON Type
## Applicable Operations
-1. When JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used.
+1. When a JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used.
```sql
select * from s1 where info->'k1' match 'v*';
@@ -42,9 +42,9 @@ title: JSON Type
select * from s1 where info->'k1' is not null;
```
-2. Tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query, for example `group by json->'key'`
+2. A tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query; for example `group by json->'key'`
-3. `Distinct` can be used with tag of JSON type
+3. `Distinct` can be used with a tag of type JSON
```sql
select distinct info->'k1' from s1;
@@ -52,9 +52,9 @@ title: JSON Type
4. Tag Operations
- The value of JSON tag can be altered. Please note that the full JSON will be overriden when doing this.
+ The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this.
- The name of JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed.
+ The name of a JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed.
## Other Restrictions
@@ -64,17 +64,17 @@ title: JSON Type
- JSON format:
- - The input string for JSON can be empty, i.e. "", "\t", or NULL, but can't be non-NULL string, bool or array.
- - object can be {}, and the whole JSON is empty if so. Key can be "", and it's ignored if so.
- - value can be int, double, string, boll or NULL, can't be array. Nesting is not allowed, that means value can't be another JSON.
+ - The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array.
+ - object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so.
+ - value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON.
- If one key occurs twice in JSON, only the first one is valid.
- Escape characters are not allowed in JSON.
-- NULL is returned if querying a key that doesn't exist in JSON.
+- NULL is returned when querying a key that doesn't exist in JSON.
- If a tag of JSON is the result of inner query, it can't be parsed and queried in the outer query.
-For example, the below SQL statements are not supported.
+For example, the SQL statements below are not supported.
```sql;
select jtag->'key' from (select jtag from STable);
diff --git a/docs-en/13-operation/01-pkg-install.md b/docs-en/13-operation/01-pkg-install.md
index 8dd6de34280ee3702bc955d00dfb24fcb73e940e..c098002962d62aa0acc7a94462c052303cb2ed90 100644
--- a/docs-en/13-operation/01-pkg-install.md
+++ b/docs-en/13-operation/01-pkg-install.md
@@ -6,7 +6,7 @@ description: Install, Uninstall, Start, Stop and Upgrade
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-TDengine community version provides dev and rpm packages for users to choose based on the system environment. deb supports Debian, Ubuntu and systems derived from them. rpm supports CentOS, RHEL, SUSE and systems derived from them. Furthermore, tar.gz package is provided for enterprise customers.
+TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers.
## Install
@@ -124,7 +124,7 @@ taoskeeper is installed, enable it by `systemctl enable taoskeeper`
```
:::info
-Some configuration will be prompted for users to provide when install.sh is executing, the interactive mode can be disabled by executing `./install.sh -e no`. `./install -h` can show all parameters and detailed explanation.
+Users will be prompted to enter some configuration information when install.sh is executing. The interactive mode can be disabled by executing `./install.sh -e no`. `./install.sh -h` can show all parameters with detailed explanation.
:::
@@ -132,7 +132,7 @@ Some configuration will be prompted for users to provide when install.sh is exec
:::note
-When installing on the first node in the cluster, when "Enter FQDN:" is prompted, nothing needs to be provided. When installing on following nodes, when "Enter FQDN:" is prompted, the end point of the first dnode in the cluster can be input if it is already up; or just ignore it and configure later after installation is done.
+When installing on the first node in the cluster, at the "Enter FQDN:" prompt, nothing needs to be provided. When installing on subsequent nodes, at the "Enter FQDN:" prompt, you must enter the end point of the first dnode in the cluster if it is already up. You can also just ignore it and configure it later after installation is finished.
:::
@@ -181,14 +181,14 @@ taosKeeper is removed successfully!
:::note
-- It's strongly suggested not to use multiple kinds of installation packages on a single host TDengine
-- After deb package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as in the command below and then reinstalling.
+- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine.
+- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
```bash
$ sudo rm -f /var/lib/dpkg/info/tdengine*
```
-- After rpm package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as in the command below and then reinstalling.
+- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
```bash
$ sudo rpm -e --noscripts tdengine
@@ -219,7 +219,7 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
During the installation process:
- Configuration directory, data directory, and log directory are created automatically if they don't exist
-- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg if not existing
+- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg
- The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data
- The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log
- The executables at /usr/local/taos/bin are linked to /usr/bin
@@ -228,7 +228,7 @@ During the installation process:
:::note
-- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution because data can't be recovered
+- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
## Start and Stop
@@ -263,18 +263,19 @@ Active: inactive (dead)
There are two aspects in upgrade operation: upgrade installation package and upgrade a running server.
-Upgrading package should follow the steps mentioned previously to first uninstall the old version then install the new version.
+To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
-Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 section match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
+Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
- Stop inserting data
-- Make sure all data are persisted into disk
+- Make sure all data is persisted to disk
+- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.)
- Stop the cluster of TDengine
- Uninstall old version and install new version
- Start the cluster of TDengine
-- Make some simple queries to make sure no data loss
-- Make some simple data insertion to make sure the cluster works well
-- Restore business data
+- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
+- Run some simple data insertion statements to make sure the cluster works well
+- Restore business services
:::warning
diff --git a/docs-en/13-operation/02-planning.mdx b/docs-en/13-operation/02-planning.mdx
index 4b8ed1f1b893446a521425b9eb1f6ec32b112505..c1baf92dbfa8d93f83174c05c2ea631d1a469739 100644
--- a/docs-en/13-operation/02-planning.mdx
+++ b/docs-en/13-operation/02-planning.mdx
@@ -2,17 +2,17 @@
title: Resource Planning
---
-The computing and storage resources need to be planned if using TDengine to build an IoT platform. How to plan the CPU, memory and disk required will be described in this chapter.
+It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter.
## Memory Requirement of Server Side
-The number of vgroups created for each database is the same as the number of CPU cores by default and can be configured by parameter `maxVgroupsPerDb`, each vnode in a vgroup stores one replica. Each vnode consumes a fixed size of memory, i.e. `blocks` \* `cache`. Besides, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below:
+By default, the number of vgroups created for each database is the same as the number of CPU cores. This can be configured by the parameter `maxVgroupsPerDb`. Each vnode in a vgroup stores one replica. Each vnode consumes a fixed amount of memory, i.e. `blocks` \* `cache`. In addition, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below:
```
Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
```
-For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M.
+For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` is 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M.
In the real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`.
@@ -22,10 +22,10 @@ In the real operation of TDengine, we are more concerned about the memory used b
In the above formula:
-1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula mentioned previously then dividing by number of dnodes and multiplying the number of replicas.
+1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula for Database Memory Size, mentioned above, then dividing by number of dnodes and multiplying the number of replicas.
```
- vnode_memory = sum(Database memory) / number_of_dnodes * replica
+ vnode_memory = (sum(Database Memory Size) / number_of_dnodes) * replica
```
2. "mnode_memory" of a `taosd` process is the memory consumed by a mnode. If there is one (and only one) mnode hosted in a `taosd` process, the memory consumed by "mnode" is "0.2KB \* the total number of tables in the cluster".
@@ -56,8 +56,8 @@ So, at least 3GB needs to be reserved for such a client.
The CPU resources required depend on two aspects:
-- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The computing resource consumed between inserting 1 row one time and inserting 10 rows one time is very small. So, the more the rows to insert one time, the higher the efficiency. Inserting in bach also exposes requirements for the client side which needs to cache rows and insert in batch once the cached rows reaches a threshold.
-- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, etc provided by user.
+- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold.
+- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, and other requirements provided by users.
In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources.
@@ -71,12 +71,12 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
For example, there are 10,000,000 meters, while each meter collects data every 15 minutes and the data size of each collection is 128 bytes, so the raw data size of one year is: 10000000 \* 128 \* 24 \* 60 / 15 \* 365 = 44.8512(TB). Assuming compression ratio is 5, the actual disk size is: 44.851 / 5 = 8.97024(TB).
-Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device, and this is transparent to application programs.
+Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device. This is completely transparent to application programs.
-To increase the performance, multiple disks can be setup for parallel data reading or data inserting. Please note that an expensive disk array is not necessary because replications are used in TDengine to provide high availability.
+To increase performance, multiple disks can be setup for parallel data reading or data inserting. Please note that an expensive disk array is not necessary because replications are used in TDengine to provide high availability.
## Number of Hosts
-A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulas mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily.
+A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily.
**Quick Estimation for CPU, Memory and Disk** Please refer to [Resource Estimate](https://www.taosdata.com/config/config.html).
diff --git a/docs-en/13-operation/03-tolerance.md b/docs-en/13-operation/03-tolerance.md
index 9f74760278cd34a50c232f528549e90842631e18..d4d48d7fcdc2c990b6ea0821e2347c70a809ed79 100644
--- a/docs-en/13-operation/03-tolerance.md
+++ b/docs-en/13-operation/03-tolerance.md
@@ -7,26 +7,26 @@ title: Fault Tolerance & Disaster Recovery
TDengine uses **WAL**, i.e. Write Ahead Log, to achieve fault tolerance and high reliability.
-When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally due to any reason and then restarted.
+When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally for any reason and then restarted.
There are 2 configuration parameters related to WAL:
- walLevel:
- - 0:wal is disabled;
- - 1:wal is enabled without fsync;
- - 2:wal is enabled with fsync.
-- fsync:only valid when walLevel is set to 2, it specifies the interval of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
+ - 0:wal is disabled
+ - 1:wal is enabled without fsync
+ - 2:wal is enabled with fsync
+- fsync:This parameter is only valid when walLevel is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
-To achieve absolutely no data loss, walLevel needs to be set to 2 and fsync needs to be set to 1. The penalty is the performance of data ingestion downgrades. However, if the concurrent threads of data insertion on the client side can reach a big enough number, for example 50, the data ingestion performance would be still good enough, our verification shows that the drop is only 30% compared to fsync is set to 3,000 milliseconds.
+To achieve absolutely no data loss, walLevel should be set to 2 and fsync should be set to 1. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when fsync is set to 3,000 milliseconds.
## Disaster Recovery
-TDengine uses replications to provide high availability and disaster recovery capability.
+TDengine uses replication to provide high availability and disaster recovery capability.
-TDengine cluster is managed by mnode. To make sure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee the metadata consistency.
+A TDengine cluster is managed by mnode. To ensure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency.
-The number of replicas for the time series data in TDengine is associated with each database, there can be a lot of databases in a cluster while each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1.
+The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1.
The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table.
-As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is set to bigger than 1, high availability can be achieved without any other assistance. If dnodes of TDengine cluster are deployed in geographically different data centers, disaster recovery can be achieved too.
+As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers.
diff --git a/docs-en/13-operation/08-export.md b/docs-en/13-operation/08-export.md
index fa9625a7c5f6b0e6706d726bff410cee647286bb..5780de42faeaedbc1c985ad2aa2f52fe56c76971 100644
--- a/docs-en/13-operation/08-export.md
+++ b/docs-en/13-operation/08-export.md
@@ -2,11 +2,13 @@
title: Data Export
---
-There are two ways of exporting data from a TDengine cluster, one is SQL statement in TDengine CLI, the other one is `taosdump`.
+There are two ways of exporting data from a TDengine cluster:
+- Using a SQL statement in TDengine CLI
+- Using the `taosdump` tool
## Export Using SQL
-If you want to export the data of a table or a STable, please execute below SQL statement in TDengine CLI.
+If you want to export the data of a table or a STable, please execute the SQL statement below, in the TDengine CLI.
```sql
select * from >> data.csv;
@@ -16,4 +18,4 @@ The data of table or STable specified by `tb_name` will be exported into a file
## Export Using taosdump
-With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).
+With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).
diff --git a/docs-en/13-operation/09-status.md b/docs-en/13-operation/09-status.md
index ca8974bb8f4efec4c6d7c87c60b3ca67ad35c613..51396524ea281ae665c9fdf61d2e6e6202995537 100644
--- a/docs-en/13-operation/09-status.md
+++ b/docs-en/13-operation/09-status.md
@@ -3,7 +3,7 @@ sidebar_label: Connections & Tasks
title: Manage Connections and Query Tasks
---
-A system operator can use TDengine CLI to show the connections, ongoing queries, stream computing, and can close connection or stop ongoing query task or stream computing.
+A system operator can use the TDengine CLI to show connections, ongoing queries, stream computing, and can close connections or stop ongoing query tasks or stream computing.
## Show Connections
@@ -13,7 +13,7 @@ SHOW CONNECTIONS;
One column of the output of the above SQL command is "ip:port", which is the end point of the client.
-## Close Connections Forcedly
+## Force Close Connections
```sql
KILL CONNECTION ;
@@ -27,9 +27,9 @@ In the above SQL command, `connection-id` is from the first column of the output
SHOW QUERIES;
```
-The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection, in format of "connection-id:query-no".
+The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection. The format is "connection-id:query-no".
-## Close Queries Forcedly
+## Force Close Queries
```sql
KILL QUERY ;
@@ -43,9 +43,9 @@ In the above SQL command, `query-id` is from the first column of the output of `
SHOW STREAMS;
```
-The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection, in the format of "connection-id:stream-no".
+The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection. The format is "connection-id:stream-no".
-## Close Continuous Query Forcedly
+## Force Close Continuous Query
```sql
KILL STREAM ;
diff --git a/docs-en/13-operation/10-monitor.md b/docs-en/13-operation/10-monitor.md
index 615f79ca73f25115f5b4f19863c0f152f4fecf69..a4679983f2bc77bb4e438f5d43fa1b8beb39b120 100644
--- a/docs-en/13-operation/10-monitor.md
+++ b/docs-en/13-operation/10-monitor.md
@@ -2,13 +2,13 @@
title: TDengine Monitoring
---
-After TDengine is started, a database named `log` for monitoring is created automatically. The information about CPU, memory, disk, bandwidth, number of requests, disk I/O speed, slow query is written into `log` database on the basis of a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
+After TDengine is started, a database named `log` is created automatically to help with monitoring. Information that includes CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, is written into the `log` database at a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
The collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in the configuration file.
## TDinsight
-TDinsight is a complete solution which uses the monitor database `log` mentioned previously and Grafana to monitor a TDengine cluster.
+TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster.
From version 2.3.3.0, more monitoring data has been added in the `log` database. Please refer to [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) to learn more details about using TDinsight to monitor TDengine.
diff --git a/docs-en/13-operation/17-diagnose.md b/docs-en/13-operation/17-diagnose.md
index 53d808ef511b72acbf7cff22dc8c0d5a5b05408e..2b474fddba4af5ba0c29103cd8ab1249d10d055b 100644
--- a/docs-en/13-operation/17-diagnose.md
+++ b/docs-en/13-operation/17-diagnose.md
@@ -4,13 +4,13 @@ title: Problem Diagnostics
## Network Connection Diagnostics
-When the client is unable to access the server, the network connection between the client side and the server side needs to be checked to find out the root cause and resolve problems.
+When a TDengine client is unable to access a TDengine server, the network connection between the client side and the server side must be checked to find the root cause and resolve problems.
-The diagnostic for network connection can be executed between Linux and Linux or between Linux and Windows.
+Diagnostics for network connections can be executed between Linux and Linux or between Linux and Windows.
Diagnostic steps:
-1. If the port range to be diagnosed are being occupied by a `taosd` server process, please first stop `taosd.
+1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
3. On the client side, execute command `taos -n client -h -P -l ` to send a testing package to the specified server and port.
@@ -65,13 +65,13 @@ Output of the client side for the example is below:
12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011
```
-The output needs to be checked carefully for the system operator to find out the root cause and solve the problem.
+The output needs to be checked carefully for the system operator to find the root cause and resolve the problem.
## Startup Status and RPC Diagnostic
-`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a comman task for a system operator to do to determine whether `taosd` has been started successfully, especially in case of cluster.
+`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a common task which should be performed by a system operator, especially in the case of a cluster, to determine whether `taosd` has been started successfully.
-`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or `taosd` is abnormal.
+`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or whether `taosd` is abnormal.
## Sync and Arbitrator Diagnostic
@@ -80,13 +80,13 @@ taos -n sync -P 6040 -h
taos -n sync -P 6042 -h
```
-The above commands can be executed on Linux Shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well.
+The above commands can be executed in a Linux shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well.
## Network Speed Diagnostic
`taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP`
-From version 2.2.0.0, the above command can be executed on Linux Shell to test the network speed, it sends uncompressed package to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below:
+From version 2.2.0.0 onwards, the above command can be executed in a Linux shell to test network speed. The command sends uncompressed packages to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below:
-n:When set to "speed", it means testing network speed.
-h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used.
@@ -99,23 +99,23 @@ From version 2.2.0.0, the above command can be executed on Linux Shell to test t
`taos -n fqdn -h `
-From version 2.2.0.0, the above command can be executed on Linux Shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below:
+From version 2.2.0.0 onward, the above command can be executed in a Linux shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below:
-n:When set to "fqdn", it means testing the speed of resolving FQDN.
-h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default.
## Server Log
-The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131, for debug purpose it needs to be escalated to 135 or 143.
+The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
-Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily, so on server side important information is stored at different place from other logs.
+Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily and so on the server side, important information is stored in a different place from other logs.
- The log at level of INFO, WARNING and ERROR is stored in `taosinfo` so that it is easy to find important information
- The log at level of DEBUG (135) and TRACE (143) and other information not handled by `taosinfo` are stored in `taosdlog`
## Client Log
-An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded, for debugging purposes it needs to be changed to 135 or 143 so that logs at DEBUG or TRACE level can be recorded.
+An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded.
The maximum length of a single log file is controlled by parameter `numOfLogLines` and only 2 log files are kept for each `taosd` server process.
diff --git a/docs-en/13-operation/index.md b/docs-en/13-operation/index.md
index a9801c0390f294d6b39b1219cc4055149871ef9c..c64749c40e26f091e4a25e0238827ebceff4b069 100644
--- a/docs-en/13-operation/index.md
+++ b/docs-en/13-operation/index.md
@@ -2,7 +2,7 @@
title: Administration
---
-This chapter is mainly written for system administrators, covering download, install/uninstall, data import/export, system monitoring, user management, connection management, etc. Capacity planning and system optimization are also covered.
+This chapter is mainly written for system administrators. It covers download, install/uninstall, data import/export, system monitoring, user management, connection management, capacity planning and system optimization.
```mdx-code-block
import DocCardList from '@theme/DocCardList';
diff --git a/docs-en/14-reference/02-rest-api/02-rest-api.mdx b/docs-en/14-reference/02-rest-api/02-rest-api.mdx
index 0edc901bc373683a49dfde061f796dc0ae79ab4f..990af861961e9daf4ac775462e21d6d9852d17c1 100644
--- a/docs-en/14-reference/02-rest-api/02-rest-api.mdx
+++ b/docs-en/14-reference/02-rest-api/02-rest-api.mdx
@@ -2,10 +2,10 @@
title: REST API
---
-To support the development of various types of platforms, TDengine provides an API that conforms to the REST principle, namely REST API. To minimize the learning cost, different from the other database REST APIs, TDengine directly requests the SQL command contained in the request BODY through HTTP POST to operate the database and only requires a URL.
+To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database.
:::note
-One difference from the native connector is that the REST interface is stateless, so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name prefix. (Since version 2.2.0.0, it is supported to specify db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default. And it requires that the `db_name` must be specified in the URL.)
+One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. (Since version 2.2.0.0, TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default and it requires that the `db_name` must be specified in the URL.)
:::
## Installation
@@ -16,9 +16,9 @@ The REST interface does not rely on any TDengine native library, so the client a
If the TDengine server is already installed, it can be verified as follows:
-The following is an Ubuntu environment using the `curl` tool (to confirm that it is installed) to verify that the REST interface is working.
+The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment.
-The following example lists all databases, replacing `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
+The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
```html
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql
@@ -89,7 +89,7 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
-- The custom authentication information is as follows (Let's introduce token later)
+- The custom authentication information is as follows. More details about "token" later.
```
Authorization: Taosd
@@ -136,7 +136,7 @@ The return result is in JSON format, as follows:
Description:
-- status: tell if the operation result is success or failure.
+- status: tells you whethre the operation result is success or failure.
- head: the definition of the table, or just one column "affected_rows" if no result set is returned. (As of version 2.0.17.0, it is recommended not to rely on the head return value to determine the data column type but rather use column_meta. In later versions, the head item may be removed from the return value.)
- column_meta: this item is added to the return value to indicate the data type of each column in the data with version 2.0.17.0 and later versions. Each column is described by three values: column name, column type, and type length. For example, `["current",6,4]` means that the column name is "current", the column type is 6, which is the float type, and the type length is 4, which is the float type with 4 bytes. If the column type is binary or nchar, the type length indicates the maximum length of content stored in the column, not the length of the specific data in this return value. When the column type is nchar, the type length indicates the number of Unicode characters that can be saved, not bytes.
- data: The exact data returned, presented row by row, or just [[affected_rows]] if no result set is returned. The order of the data columns in each row of data is the same as that of the data columns described in column_meta.
diff --git a/docs-en/14-reference/08-taos-shell.md b/docs-en/14-reference/08-taos-shell.md
index 9bb5178300931e4b3808716badf06c85a4bbf396..002b515093258152e85dd9d7437e424dfa98c874 100644
--- a/docs-en/14-reference/08-taos-shell.md
+++ b/docs-en/14-reference/08-taos-shell.md
@@ -1,10 +1,10 @@
---
-title: TDengine Command Line (CLI)
-sidebar_label: TDengine CLI
+title: TDengine Command Line Interface (CLI)
+sidebar_label: Command Line Interface
description: Instructions and tips for using the TDengine CLI
---
-The TDengine command-line application (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances.
+The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances.
## Installation
diff --git a/docs-en/14-reference/index.md b/docs-en/14-reference/index.md
index 89f675902d01ba2d2c1b322408c372429d6bda1c..f350eebfc1a1ca2feaedc18c4b4fa798742e31b4 100644
--- a/docs-en/14-reference/index.md
+++ b/docs-en/14-reference/index.md
@@ -2,11 +2,11 @@
title: Reference
---
-The reference guide is the detailed introduction to TDengine, various TDengine's connectors in different languages, and the tools that come with it.
+The reference guide is a detailed introduction to TDengine including various TDengine connectors in different languages, and the tools that come with TDengine.
```mdx-code-block
import DocCardList from '@theme/DocCardList';
import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
-```
\ No newline at end of file
+```
diff --git a/docs-examples/go/connect/cgoexample/main.go b/docs-examples/go/connect/cgoexample/main.go
index 8b9aba4ce4217c00605bc8796c788f3dd52805e6..ba7ed0f728a1cd546dbc3199ce4c0dc854ebee91 100644
--- a/docs-examples/go/connect/cgoexample/main.go
+++ b/docs-examples/go/connect/cgoexample/main.go
@@ -20,4 +20,4 @@ func main() {
// use
// var taosDSN = "root:taosdata@tcp(localhost:6030)/dbName"
-// if you want to connect to a default database.
+// if you want to connect a specified database named "dbName".
diff --git a/docs-examples/go/connect/restexample/main.go b/docs-examples/go/connect/restexample/main.go
index 9c05e7eed80dee4ae7e6b20637d265f388d7438d..1efc98b988c183c4c680884057bf2a72a9dd19e9 100644
--- a/docs-examples/go/connect/restexample/main.go
+++ b/docs-examples/go/connect/restexample/main.go
@@ -18,6 +18,6 @@ func main() {
defer taos.Close()
}
-// use
+// use
// var taosDSN = "root:taosdata@http(localhost:6041)/dbName"
-// if you want to connect to a default database.
+// if you want to connect a specified database named "dbName".
diff --git a/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java b/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java
index c6ce2ef9785a010daa55ad29415f81711760cd57..84292f7e8682dbb8171c807da74a603f4ae8256e 100644
--- a/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java
+++ b/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java
@@ -22,4 +22,4 @@ public class JNIConnectExample {
// use
// String jdbcUrl = "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata";
-// if you want to connect to a default database.
\ No newline at end of file
+// if you want to connect a specified database named "dbName".
\ No newline at end of file
diff --git a/include/dnode/qnode/qnode.h b/include/dnode/qnode/qnode.h
index 1ab101f705ac3f71fad134c200a22f903e4a8e86..90a952939577fc9cd945d0dc9fd8bde8d906667f 100644
--- a/include/dnode/qnode/qnode.h
+++ b/include/dnode/qnode/qnode.h
@@ -26,14 +26,17 @@ extern "C" {
typedef struct SQnode SQnode;
typedef struct {
- int64_t numOfStartTask;
- int64_t numOfStopTask;
- int64_t numOfRecvedFetch;
- int64_t numOfSentHb;
- int64_t numOfSentFetch;
- int64_t numOfTaskInQueue;
+ int64_t numOfProcessedQuery;
+ int64_t numOfProcessedCQuery;
+ int64_t numOfProcessedFetch;
+ int64_t numOfProcessedDrop;
+ int64_t memSizeInCache;
+ int64_t dataSizeSend;
+ int64_t dataSizeRecv;
+ int64_t numOfQueryInQueue;
int64_t numOfFetchInQueue;
- int64_t numOfErrors;
+ int64_t waitTimeInQueryQUeue;
+ int64_t waitTimeInFetchQUeue;
} SQnodeLoad;
typedef struct {
@@ -71,10 +74,10 @@ int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad);
* @param pQnode The qnode object.
* @param pMsg The request message
*/
-int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg);
+int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg);
#ifdef __cplusplus
}
#endif
-#endif /*_TD_QNODE_H_*/
\ No newline at end of file
+#endif /*_TD_QNODE_H_*/
diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h
index 3d86adb573cd27dfce3b93409b96a11b47b7aaf5..922136b590cb007c6acd040c7ce81d135c0dad4f 100644
--- a/include/libs/function/functionMgt.h
+++ b/include/libs/function/functionMgt.h
@@ -23,6 +23,9 @@ extern "C" {
#include "function.h"
#include "querynodes.h"
+#define FUNC_AGGREGATE_UDF_ID 5001
+#define FUNC_SCALAR_UDF_ID 5002
+
typedef enum EFunctionType {
// aggregate function
FUNCTION_TYPE_APERCENTILE = 1,
@@ -126,21 +129,12 @@ typedef enum EFunctionType {
struct SqlFunctionCtx;
struct SResultRowEntryInfo;
struct STimeWindow;
-struct SCatalog;
-
-typedef struct SFmGetFuncInfoParam {
- struct SCatalog* pCtg;
- void* pRpc;
- const SEpSet* pMgmtEps;
- char* pErrBuf;
- int32_t errBufLen;
-} SFmGetFuncInfoParam;
int32_t fmFuncMgtInit();
void fmFuncMgtDestroy();
-int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc);
+int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen);
bool fmIsBuiltinFunc(const char* pFunc);
diff --git a/include/libs/index/index.h b/include/libs/index/index.h
index 05db99db0f199169ce71e4a76d56899361aa403b..c3d31ffe3853d76d6ab6803dfc10f54dad2445c6 100644
--- a/include/libs/index/index.h
+++ b/include/libs/index/index.h
@@ -192,11 +192,16 @@ void indexTermDestroy(SIndexTerm* p);
void indexInit();
/* index filter */
+typedef struct SIndexMetaArg {
+ void* metaHandle;
+ uint64_t suid;
+} SIndexMetaArg;
+
typedef enum { SFLT_NOT_INDEX, SFLT_COARSE_INDEX, SFLT_ACCURATE_INDEX } SIdxFltStatus;
SIdxFltStatus idxGetFltStatus(SNode* pFilterNode);
-int32_t doFilterTag(const SNode* pFilterNode, SArray* result);
+int32_t doFilterTag(const SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* result);
/*
* destory index env
*
diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h
index 3ae2d18e5dab941c7693667719d2a87de80a8724..2648a468dd3fa82fe91825d60b739387d9255bd7 100644
--- a/include/libs/nodes/plannodes.h
+++ b/include/libs/nodes/plannodes.h
@@ -56,6 +56,9 @@ typedef struct SScanLogicNode {
int8_t intervalUnit;
int8_t slidingUnit;
SNode* pTagCond;
+ int8_t triggerType;
+ int64_t watermark;
+ int16_t tsColId;
} SScanLogicNode;
typedef struct SJoinLogicNode {
@@ -216,6 +219,9 @@ typedef struct STableScanPhysiNode {
int64_t sliding;
int8_t intervalUnit;
int8_t slidingUnit;
+ int8_t triggerType;
+ int64_t watermark;
+ int16_t tsColId;
} STableScanPhysiNode;
typedef STableScanPhysiNode STableSeqScanPhysiNode;
diff --git a/include/libs/qworker/qworker.h b/include/libs/qworker/qworker.h
index 9e3b318019e6a689ed8b976870659f4890bcec44..5942d00cb212002d5309cec4cba253dc7e3d7388 100644
--- a/include/libs/qworker/qworker.h
+++ b/include/libs/qworker/qworker.h
@@ -52,22 +52,24 @@ typedef struct {
int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb);
-int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
-int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
-int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
-int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
-int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
-int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
-int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
+int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
void qWorkerDestroy(void **qWorkerMgmt);
+int64_t qWorkerGetWaitTimeInQueue(void *qWorkerMgmt, EQueueType type);
+
#ifdef __cplusplus
}
#endif
diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h
index 2e04afdbdc8d06029808da29398392f481832d75..a587ad6ef22fb80538147a61980ae4cdadd8ec03 100644
--- a/include/libs/sync/sync.h
+++ b/include/libs/sync/sync.h
@@ -66,12 +66,6 @@ typedef struct SSyncCfg {
SNodeInfo nodeInfo[TSDB_MAX_REPLICA];
} SSyncCfg;
-typedef struct SSnapshot {
- void* data;
- SyncIndex lastApplyIndex;
- SyncTerm lastApplyTerm;
-} SSnapshot;
-
typedef struct SFsmCbMeta {
SyncIndex index;
bool isWeak;
@@ -93,6 +87,12 @@ typedef struct SReConfigCbMeta {
uint64_t flag;
} SReConfigCbMeta;
+typedef struct SSnapshot {
+ void *data;
+ SyncIndex lastApplyIndex;
+ SyncTerm lastApplyTerm;
+} SSnapshot;
+
typedef struct SSyncFSM {
void* data;
@@ -101,23 +101,17 @@ typedef struct SSyncFSM {
void (*FpRollBackCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
void (*FpRestoreFinishCb)(struct SSyncFSM* pFsm);
- int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
-
- // if (*ppIter == NULL)
- // *ppIter = new iter;
- // else
- // *ppIter.next();
- //
- // if success, return 0. else return error code
- int32_t (*FpSnapshotRead)(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, void** ppIter, char** ppBuf,
- int32_t* len);
+ void (*FpReConfigCb)(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta);
- // apply data into fsm
- int32_t (*FpSnapshotApply)(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, char* pBuf, int32_t len);
+ int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
- void (*FpReConfigCb)(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta);
+ int32_t (*FpSnapshotStartRead)(struct SSyncFSM* pFsm, void** ppReader);
+ int32_t (*FpSnapshotStopRead)(struct SSyncFSM* pFsm, void* pReader);
+ int32_t (*FpSnapshotDoRead)(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len);
- // int32_t (*FpRestoreSnapshot)(struct SSyncFSM* pFsm, const SSnapshot* snapshot);
+ int32_t (*FpSnapshotStartWrite)(struct SSyncFSM* pFsm, void** ppWriter);
+ int32_t (*FpSnapshotStopWrite)(struct SSyncFSM* pFsm, void* pWriter, bool isApply);
+ int32_t (*FpSnapshotDoWrite)(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_t len);
} SSyncFSM;
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index c63d8668b592921efbebf6cac913468a904c6608..65cfe8de0be9e387cecba70141c0bab513d6fc63 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -69,6 +69,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_DUP_KEY TAOS_DEF_ERROR_CODE(0, 0x0027)
#define TSDB_CODE_NEED_RETRY TAOS_DEF_ERROR_CODE(0, 0x0028)
#define TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE TAOS_DEF_ERROR_CODE(0, 0x0029)
+#define TSDB_CODE_INVALID_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x0030)
#define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0040)
#define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0041)
@@ -655,7 +656,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_FUNC_FUNTION_PARA_NUM TAOS_DEF_ERROR_CODE(0, 0x2801)
#define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802)
#define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803)
-#define TSDB_CODE_FUNC_INVALID_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
+#define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
//udf
#define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901)
diff --git a/include/util/tqueue.h b/include/util/tqueue.h
index dbc4d03177e4c489240c04aac37710ce995102d4..466c577c0079d07774722ff2efdd30bf207e0fc3 100644
--- a/include/util/tqueue.h
+++ b/include/util/tqueue.h
@@ -46,6 +46,7 @@ typedef struct {
void *ahandle;
int32_t workerId;
int32_t threadNum;
+ int64_t timestamp;
} SQueueInfo;
typedef enum {
@@ -80,7 +81,7 @@ int32_t taosAddIntoQset(STaosQset *qset, STaosQueue *queue, void *ahandle);
void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue);
int32_t taosGetQueueNumber(STaosQset *qset);
-int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp);
+int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp);
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp);
void taosResetQsetThread(STaosQset *qset, void *pItem);
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index 0c5c72906b9963fe4847b5ef3c6b1c42c29cea0f..eb4c4cb59feac8c8a0db6cd85f45f3482b31e96f 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -1249,6 +1249,8 @@ void resetConnectDB(STscObj* pTscObj) {
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4) {
assert(pResultInfo != NULL && pRsp != NULL);
+ taosMemoryFreeClear(pResultInfo->pRspMsg);
+
pResultInfo->pRspMsg = (const char*)pRsp;
pResultInfo->pData = (void*)pRsp->data;
pResultInfo->numOfRows = htonl(pRsp->numOfRows);
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index 0f34d52d35c1610f8236fcdc8b1b758fd839ed88..349564ae28b8601565d57533ae57b5a1b49e51f5 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -611,6 +611,7 @@ int32_t blockDataFromBuf1(SSDataBlock* pBlock, const char* buf, size_t capacity)
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i);
+ pCol->hasNull = true;
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
size_t metaSize = capacity * sizeof(int32_t);
@@ -1153,7 +1154,9 @@ void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows) {
if (IS_VAR_DATA_TYPE(pColumn->info.type)) {
pColumn->varmeta.length = 0;
} else {
- memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows));
+ if (pColumn->nullbitmap != NULL) {
+ memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows));
+ }
}
}
@@ -1290,8 +1293,8 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) {
static void colDataTrimFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) {
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
- memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n));
- memset(&pColInfoData->varmeta.offset[total - n - 1], 0, n);
+ memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n) * sizeof(int32_t));
+ memset(&pColInfoData->varmeta.offset[total - n], 0, n);
} else {
int32_t bytes = pColInfoData->info.bytes;
memmove(pColInfoData->pData, ((char*)pColInfoData->pData + n * bytes), (total - n) * bytes);
@@ -1460,7 +1463,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
}
void blockDebugShowData(const SArray* dataBlocks) {
- char pBuf[128];
+ char pBuf[128] = {0};
int32_t sz = taosArrayGetSize(dataBlocks);
for (int32_t i = 0; i < sz; i++) {
SSDataBlock* pDataBlock = taosArrayGet(dataBlocks, i);
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index d0a2ddd9bb6379d702b8c4d46c60085d3fa05b0c..141ec4f03b76238d6c15695c7ea3a8ea112d9e4b 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -293,7 +293,7 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "jniDebugFlag", jniDebugFlag, 0, 255, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, 1) != 0) return -1;
- if (cfgAddInt32(pCfg, "idxDebugFlag", 0, 0, 255, 1) != 0) return -1;
+ if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 1) != 0) return -1;
return 0;
}
diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c
index 685ee168939925a05309006e2475ab3a9c22b2d3..10ba58af298c59306badc2e299e588e3ec46874f 100644
--- a/source/common/src/ttime.c
+++ b/source/common/src/ttime.c
@@ -521,10 +521,10 @@ int32_t convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_VARBINARY) {
newColData = taosMemoryCalloc(1, charLen + 1);
memcpy(newColData, varDataVal(inputData), charLen);
- bool ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight);
+ int32_t ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight);
if (ret != TSDB_CODE_SUCCESS) {
taosMemoryFree(newColData);
- return ret;
+ return TSDB_CODE_INVALID_TIMESTAMP;
}
taosMemoryFree(newColData);
} else if (type == TSDB_DATA_TYPE_NCHAR) {
@@ -783,7 +783,7 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio
// 2020-07-03 17:48:42
// and the parameter can also be a variable.
const char* fmtts(int64_t ts) {
- static char buf[96];
+ static char buf[96] = {0};
size_t pos = 0;
struct tm tm;
diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c
index 916973b4ca596ce7b6ee9d5bd89a4840161c6b86..65794b7b8136f0d6314880399ac08a195eecd22a 100644
--- a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c
+++ b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c
@@ -16,7 +16,11 @@
#define _DEFAULT_SOURCE
#include "qmInt.h"
-void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) {}
+void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) {
+ SQnodeLoad qload = {0};
+ qndGetLoad(pMgmt->pQnode, &qload);
+
+}
int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pMsg) {
SMonQmInfo qmInfo = {0};
diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c
index 35c94b7fbe786434cfb59191c8899949099d0325..e7fc261b67a8a6416cdbafae07552a5c9576bc22 100644
--- a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c
+++ b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c
@@ -36,7 +36,7 @@ static void qmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
code = qmProcessGetMonitorInfoReq(pMgmt, pMsg);
break;
default:
- code = qndProcessQueryMsg(pMgmt->pQnode, pMsg);
+ code = qndProcessQueryMsg(pMgmt->pQnode, pInfo->timestamp, pMsg);
break;
}
diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
index c7509eb9d8a7e1ed47bbc65f8b8e1e2d15364ebc..987fc5441653a09c27d889b03af30150622f96a3 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
@@ -62,7 +62,7 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
dmProcessNetTestReq(pDnode, pRpc);
return;
} else if (pRpc->msgType == TDMT_MND_SYSTABLE_RETRIEVE_RSP || pRpc->msgType == TDMT_VND_FETCH_RSP) {
- qWorkerProcessFetchRsp(NULL, NULL, pRpc);
+ qWorkerProcessFetchRsp(NULL, NULL, pRpc, 0);
return;
} else if (pRpc->msgType == TDMT_MND_STATUS_RSP && pEpSet != NULL) {
dmSetMnodeEpSet(&pDnode->data, pEpSet);
diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c
index 995fe83cc5a69502a99a1b807ceffb6c4ec80a52..0ac36c20ed3463a5bd9e794c83edab22b1675408 100644
--- a/source/dnode/mnode/impl/src/mndMain.c
+++ b/source/dnode/mnode/impl/src/mndMain.c
@@ -369,8 +369,8 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) {
mError("failed to process sync msg:%p type:%s since %s", pMsg, TMSG_INFO(pMsg->msgType), terrstr());
return TAOS_SYNC_PROPOSE_OTHER_ERROR;
}
-
- char logBuf[512];
+
+ char logBuf[512] = {0};
char *syncNodeStr = sync2SimpleStr(pMgmt->sync);
snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr);
syncRpcMsgLog2(logBuf, pMsg);
diff --git a/source/dnode/mnode/impl/src/mndQuery.c b/source/dnode/mnode/impl/src/mndQuery.c
index 78b70c9a74133b859b4175b195d4a939c37ebccc..97594f2b913334ac17e2bd5e6c8fc95e19a03e9e 100644
--- a/source/dnode/mnode/impl/src/mndQuery.c
+++ b/source/dnode/mnode/impl/src/mndQuery.c
@@ -26,19 +26,19 @@ int32_t mndProcessQueryMsg(SRpcMsg *pMsg) {
mTrace("msg:%p, in query queue is processing", pMsg);
switch (pMsg->msgType) {
case TDMT_VND_QUERY:
- code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg);
+ code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_QUERY_CONTINUE:
- code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg);
+ code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_FETCH:
- code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg);
+ code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_DROP_TASK:
- code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg);
+ code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_QUERY_HEARTBEAT:
- code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg);
+ code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg, 0);
break;
default:
terrno = TSDB_CODE_VND_APP_ERROR;
diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c
index 17cf5d43b575dbe2840bec24a29e97dc399ccd7d..3f3f4f5b5d70dbb70f88f395b86d84833010c873 100644
--- a/source/dnode/mnode/impl/src/mndSubscribe.c
+++ b/source/dnode/mnode/impl/src/mndSubscribe.c
@@ -940,7 +940,7 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
}
// do not show for cleared subscription
-#if 0
+#if 1
int32_t sz = taosArrayGetSize(pSub->unassignedVgs);
for (int32_t i = 0; i < sz; i++) {
SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i);
diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c
index c6ab916ee178e1ff7bbfc7561a06d8e6ee1e3598..8b602d796c47f29efa8dcfb059d2aff5b3b9de40 100644
--- a/source/dnode/mnode/impl/src/mndSync.c
+++ b/source/dnode/mnode/impl/src/mndSync.c
@@ -48,6 +48,10 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
mError("trans:%d, failed to propose since %s", transId, tstrerror(pMgmt->errCode));
}
tsem_post(&pMgmt->syncSem);
+ } else {
+ if (cbMeta.index - sdbGetApplyIndex(pMnode->pSdb) > 100) {
+ sdbWriteFile(pMnode->pSdb);
+ }
}
}
@@ -64,40 +68,9 @@ void mndRestoreFinish(struct SSyncFSM *pFsm) {
mInfo("mnode sync restore finished");
mndTransPullup(pMnode);
mndSetRestore(pMnode, true);
- }
-}
-
-int32_t mndSnapshotRead(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, void **ppIter, char **ppBuf, int32_t *len) {
- SMnode *pMnode = pFsm->data;
- mInfo("start to read snapshot from sdb");
-
- int32_t code = sdbReadSnapshot(pMnode->pSdb, (SSdbIter **)ppIter, ppBuf, len);
- if (code != 0) {
- mError("failed to read snapshot from sdb since %s", terrstr());
- } else {
- if (*ppIter == NULL) {
- mInfo("successfully to read snapshot from sdb");
- }
- }
-
- return code;
-}
-
-int32_t mndSnapshotApply(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, char *pBuf, int32_t len) {
- SMnode *pMnode = pFsm->data;
- mndSetRestore(pMnode, false);
- mInfo("start to apply snapshot to sdb, len:%d", len);
-
- int32_t code = sdbApplySnapshot(pMnode->pSdb, pBuf, len);
- if (code != 0) {
- mError("failed to apply snapshot to sdb, len:%d", len);
} else {
- mInfo("successfully to apply snapshot to sdb, len:%d", len);
- mndSetRestore(pMnode, true);
+ mInfo("mnode sync restore finished, and will set ready after first deploy");
}
-
- // taosMemoryFree(pBuf);
- return code;
}
void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) {
@@ -116,20 +89,55 @@ void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta)
}
}
+int32_t mndSnapshotStartRead(struct SSyncFSM *pFsm, void **ppReader) {
+ mInfo("start to read snapshot from sdb");
+ SMnode *pMnode = pFsm->data;
+ return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader);
+}
+
+int32_t mndSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) {
+ mInfo("stop to read snapshot from sdb");
+ SMnode *pMnode = pFsm->data;
+ return sdbStopRead(pMnode->pSdb, pReader);
+}
+
+int32_t mndSnapshotDoRead(struct SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) {
+ SMnode *pMnode = pFsm->data;
+ return sdbDoRead(pMnode->pSdb, pReader, ppBuf, len);
+}
+
+int32_t mndSnapshotStartWrite(struct SSyncFSM *pFsm, void **ppWriter) {
+ mInfo("start to apply snapshot to sdb");
+ SMnode *pMnode = pFsm->data;
+ return sdbStartWrite(pMnode->pSdb, (SSdbIter **)ppWriter);
+}
+
+int32_t mndSnapshotStopWrite(struct SSyncFSM *pFsm, void *pWriter, bool isApply) {
+ mInfo("stop to apply snapshot to sdb, apply:%d", isApply);
+ SMnode *pMnode = pFsm->data;
+ return sdbStopWrite(pMnode->pSdb, pWriter, isApply);
+}
+
+int32_t mndSnapshotDoWrite(struct SSyncFSM *pFsm, void *pWriter, void *pBuf, int32_t len) {
+ SMnode *pMnode = pFsm->data;
+ return sdbDoWrite(pMnode->pSdb, pWriter, pBuf, len);
+}
+
SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
pFsm->data = pMnode;
-
pFsm->FpCommitCb = mndSyncCommitMsg;
pFsm->FpPreCommitCb = NULL;
pFsm->FpRollBackCb = NULL;
-
- pFsm->FpGetSnapshot = mndSyncGetSnapshot;
pFsm->FpRestoreFinishCb = mndRestoreFinish;
- pFsm->FpSnapshotRead = mndSnapshotRead;
- pFsm->FpSnapshotApply = mndSnapshotApply;
pFsm->FpReConfigCb = mndReConfig;
-
+ pFsm->FpGetSnapshot = mndSyncGetSnapshot;
+ pFsm->FpSnapshotStartRead = mndSnapshotStartRead;
+ pFsm->FpSnapshotStopRead = mndSnapshotStopRead;
+ pFsm->FpSnapshotDoRead = mndSnapshotDoRead;
+ pFsm->FpSnapshotStartWrite = mndSnapshotStartWrite;
+ pFsm->FpSnapshotStopWrite = mndSnapshotStopWrite;
+ pFsm->FpSnapshotDoWrite = mndSnapshotDoWrite;
return pFsm;
}
diff --git a/source/dnode/mnode/impl/test/acct/CMakeLists.txt b/source/dnode/mnode/impl/test/acct/CMakeLists.txt
index 40f8b0726e28446170a71bbbccde979376448fbb..d72292e34bd605ec91b16788fadd9f1ff1c68cc4 100644
--- a/source/dnode/mnode/impl/test/acct/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/acct/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME acctTest
- COMMAND acctTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME acctTest
+ COMMAND acctTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/dnode/mnode/impl/test/func/CMakeLists.txt b/source/dnode/mnode/impl/test/func/CMakeLists.txt
index ecb4f851be9d95a7c894d1e2ef2b3d9ce83067d3..2a8eb0a39d89275ae204e6405de2b774b4412619 100644
--- a/source/dnode/mnode/impl/test/func/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/func/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME funcTest
- COMMAND funcTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME funcTest
+ COMMAND funcTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/dnode/mnode/impl/test/profile/CMakeLists.txt b/source/dnode/mnode/impl/test/profile/CMakeLists.txt
index 8b811ebfed3a56ab139ecfc81f3556af2f9bb032..b6586192b2b4c6e428c2f00fddb11527a1747707 100644
--- a/source/dnode/mnode/impl/test/profile/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/profile/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME profileTest
- COMMAND profileTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME profileTest
+ COMMAND profileTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/dnode/mnode/impl/test/sdb/sdbTest.cpp b/source/dnode/mnode/impl/test/sdb/sdbTest.cpp
index df535c4456615b8b501236f2c7ad1684c2f4ac6f..43be55dd1de822d098475747a7b5b6452f379058 100644
--- a/source/dnode/mnode/impl/test/sdb/sdbTest.cpp
+++ b/source/dnode/mnode/impl/test/sdb/sdbTest.cpp
@@ -492,7 +492,7 @@ TEST_F(MndTestSdb, 01_Write_Str) {
ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2);
ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1);
- ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2 );
+ ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2);
sdbSetApplyIndex(pSdb, -1);
ASSERT_EQ(sdbGetApplyIndex(pSdb), -1);
ASSERT_EQ(mnode.insertTimes, 2);
@@ -895,7 +895,35 @@ TEST_F(MndTestSdb, 01_Read_Str) {
ASSERT_EQ(code, TSDB_CODE_SDB_OBJ_CREATING);
}
+ {
+ SSdbIter *pReader = NULL;
+ SSdbIter *pWritter = NULL;
+ void *pBuf = NULL;
+ int32_t len = 0;
+ int32_t code = 0;
+
+ code = sdbStartRead(pSdb, &pReader);
+ ASSERT_EQ(code, 0);
+ code = sdbStartWrite(pSdb, &pWritter);
+ ASSERT_EQ(code, 0);
+
+ while (sdbDoRead(pSdb, pReader, &pBuf, &len) == 0) {
+ if (pBuf != NULL && len != 0) {
+ sdbDoWrite(pSdb, pWritter, pBuf, len);
+ taosMemoryFree(pBuf);
+ } else {
+ break;
+ }
+ }
+
+ sdbStopRead(pSdb, pReader);
+ sdbStopWrite(pSdb, pWritter, true);
+ }
+
+ ASSERT_EQ(sdbGetSize(pSdb, SDB_CONSUMER), 1);
+ ASSERT_EQ(sdbGetTableVer(pSdb, SDB_CONSUMER), 4);
+
sdbCleanup(pSdb);
- ASSERT_EQ(mnode.insertTimes, 5);
- ASSERT_EQ(mnode.deleteTimes, 5);
+ ASSERT_EQ(mnode.insertTimes, 9);
+ ASSERT_EQ(mnode.deleteTimes, 9);
}
\ No newline at end of file
diff --git a/source/dnode/mnode/impl/test/show/CMakeLists.txt b/source/dnode/mnode/impl/test/show/CMakeLists.txt
index 69e93e7086147de77676ea02017a6ce5533acf42..9b4e21501ed478e527adfa69a5a2297e173876e1 100644
--- a/source/dnode/mnode/impl/test/show/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/show/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME showTest
- COMMAND showTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME showTest
+ COMMAND showTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/dnode/mnode/sdb/inc/sdb.h b/source/dnode/mnode/sdb/inc/sdb.h
index 411d4c59ea88333cfe37d104c2dba83c78b4378e..c66b47a24b13f0c9efd55dc965743416737177ea 100644
--- a/source/dnode/mnode/sdb/inc/sdb.h
+++ b/source/dnode/mnode/sdb/inc/sdb.h
@@ -187,6 +187,7 @@ typedef struct SSdb {
typedef struct SSdbIter {
TdFilePtr file;
int64_t total;
+ char *name;
} SSdbIter;
typedef struct {
@@ -380,8 +381,13 @@ SSdbRow *sdbAllocRow(int32_t objSize);
void *sdbGetRowObj(SSdbRow *pRow);
void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc);
-int32_t sdbReadSnapshot(SSdb *pSdb, SSdbIter **ppIter, char **ppBuf, int32_t *len);
-int32_t sdbApplySnapshot(SSdb *pSdb, char *pBuf, int32_t len);
+int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter);
+int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter);
+int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len);
+
+int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter);
+int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply);
+int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len);
const char *sdbTableName(ESdbType type);
void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper);
diff --git a/source/dnode/mnode/sdb/src/sdb.c b/source/dnode/mnode/sdb/src/sdb.c
index aef3476440b960c3e87ac8f8e4a9a83420438218..485b729deb52ffcdf4c5b76c1999124a5157f5b2 100644
--- a/source/dnode/mnode/sdb/src/sdb.c
+++ b/source/dnode/mnode/sdb/src/sdb.c
@@ -71,6 +71,7 @@ void sdbCleanup(SSdb *pSdb) {
}
if (pSdb->tmpDir != NULL) {
+ taosRemoveDir(pSdb->tmpDir);
taosMemoryFreeClear(pSdb->tmpDir);
}
diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c
index b2dcbd68e3fa58e57f51689795fead4f982357f0..834e7a00c8c58638a9ac51ec498f87d66abe2b1e 100644
--- a/source/dnode/mnode/sdb/src/sdbFile.c
+++ b/source/dnode/mnode/sdb/src/sdbFile.c
@@ -445,168 +445,166 @@ int32_t sdbDeploy(SSdb *pSdb) {
return 0;
}
-static SSdbIter *sdbOpenIter(SSdb *pSdb) {
- char datafile[PATH_MAX] = {0};
- char tmpfile[PATH_MAX] = {0};
- snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
- snprintf(tmpfile, sizeof(tmpfile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP);
-
- taosThreadMutexLock(&pSdb->filelock);
- if (taosCopyFile(datafile, tmpfile) != 0) {
- taosThreadMutexUnlock(&pSdb->filelock);
- terrno = TAOS_SYSTEM_ERROR(errno);
- mError("failed to copy file %s to %s since %s", datafile, tmpfile, terrstr());
- return NULL;
- }
- taosThreadMutexUnlock(&pSdb->filelock);
-
+static SSdbIter *sdbCreateIter(SSdb *pSdb) {
SSdbIter *pIter = taosMemoryCalloc(1, sizeof(SSdbIter));
if (pIter == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
- pIter->file = taosOpenFile(tmpfile, TD_FILE_READ);
- if (pIter->file == NULL) {
- terrno = TAOS_SYSTEM_ERROR(errno);
- mError("failed to read file:%s since %s", tmpfile, terrstr());
+ char name[PATH_MAX + 100] = {0};
+ snprintf(name, sizeof(name), "%s%ssdb.data.%" PRIu64, pSdb->tmpDir, TD_DIRSEP, (uint64_t)pIter);
+ pIter->name = strdup(name);
+ if (pIter->name == NULL) {
taosMemoryFree(pIter);
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
return pIter;
}
-static void sdbCloseIter(SSdb *pSdb, SSdbIter *pIter) {
+static void sdbCloseIter(SSdbIter *pIter) {
if (pIter == NULL) return;
+
if (pIter->file != NULL) {
taosCloseFile(&pIter->file);
+ pIter->file = NULL;
}
- char tmpfile[PATH_MAX] = {0};
- snprintf(tmpfile, sizeof(tmpfile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP);
- taosRemoveFile(tmpfile);
+ if (pIter->name != NULL) {
+ taosRemoveFile(pIter->name);
+ taosMemoryFree(pIter->name);
+ pIter->name = NULL;
+ }
+ mInfo("sdbiter:%p, is closed, total:%" PRId64, pIter, pIter->total);
taosMemoryFree(pIter);
- mInfo("sdbiter:%p, is closed", pIter);
}
-static SSdbIter *sdbGetIter(SSdb *pSdb, SSdbIter **ppIter) {
- SSdbIter *pIter = NULL;
- if (ppIter != NULL) pIter = *ppIter;
+int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) {
+ SSdbIter *pIter = sdbCreateIter(pSdb);
+ if (pIter == NULL) return -1;
- if (pIter == NULL) {
- pIter = sdbOpenIter(pSdb);
- if (pIter != NULL) {
- mInfo("sdbiter:%p, is created to read snapshot", pIter);
- *ppIter = pIter;
- } else {
- mError("failed to create sdbiter to read snapshot since %s", terrstr());
- *ppIter = NULL;
- return NULL;
- }
- } else {
- mInfo("sdbiter:%p, continue to read snapshot, total:%" PRId64, pIter, pIter->total);
+ char datafile[PATH_MAX] = {0};
+ snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
+
+ taosThreadMutexLock(&pSdb->filelock);
+ if (taosCopyFile(datafile, pIter->name) < 0) {
+ taosThreadMutexUnlock(&pSdb->filelock);
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ mError("failed to copy file %s to %s since %s", datafile, pIter->name, terrstr());
+ sdbCloseIter(pIter);
+ return -1;
+ }
+ taosThreadMutexUnlock(&pSdb->filelock);
+
+ pIter->file = taosOpenFile(pIter->name, TD_FILE_READ);
+ if (pIter->file == NULL) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ mError("failed to open file:%s since %s", pIter->name, terrstr());
+ sdbCloseIter(pIter);
+ return -1;
}
- return pIter;
+ *ppIter = pIter;
+ mInfo("sdbiter:%p, is created to read snapshot, file:%s", pIter, pIter->name);
+ return 0;
}
-int32_t sdbReadSnapshot(SSdb *pSdb, SSdbIter **ppIter, char **ppBuf, int32_t *len) {
- SSdbIter *pIter = sdbGetIter(pSdb, ppIter);
- if (pIter == NULL) return -1;
+int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter) {
+ sdbCloseIter(pIter);
+ return 0;
+}
+int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len) {
int32_t maxlen = 100;
- char *pBuf = taosMemoryCalloc(1, maxlen);
+ void *pBuf = taosMemoryCalloc(1, maxlen);
if (pBuf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
- sdbCloseIter(pSdb, pIter);
return -1;
}
int32_t readlen = taosReadFile(pIter->file, pBuf, maxlen);
- if (readlen < 0 || (readlen == 0 && errno != 0)) {
+ if (readlen < 0 || readlen > maxlen) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("sdbiter:%p, failed to read snapshot since %s, total:%" PRId64, pIter, terrstr(), pIter->total);
*ppBuf = NULL;
*len = 0;
- *ppIter = NULL;
- sdbCloseIter(pSdb, pIter);
taosMemoryFree(pBuf);
return -1;
} else if (readlen == 0) {
mInfo("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total);
*ppBuf = NULL;
*len = 0;
- *ppIter = NULL;
- sdbCloseIter(pSdb, pIter);
taosMemoryFree(pBuf);
return 0;
- } else if ((readlen < maxlen && errno != 0) || readlen == maxlen) {
+ } else { // (readlen <= maxlen)
pIter->total += readlen;
mInfo("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total);
*ppBuf = pBuf;
*len = readlen;
return 0;
- } else if (readlen < maxlen && errno == 0) {
- mInfo("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total);
- *ppBuf = pBuf;
- *len = readlen;
- *ppIter = NULL;
- sdbCloseIter(pSdb, pIter);
- return 0;
- } else {
- // impossible
- mError("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total);
- *ppBuf = NULL;
- *len = 0;
- *ppIter = NULL;
- sdbCloseIter(pSdb, pIter);
- taosMemoryFree(pBuf);
- return -1;
}
}
-int32_t sdbApplySnapshot(SSdb *pSdb, char *pBuf, int32_t len) {
- char datafile[PATH_MAX] = {0};
- char tmpfile[PATH_MAX] = {0};
- snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
- snprintf(tmpfile, sizeof(tmpfile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP);
+int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter) {
+ SSdbIter *pIter = sdbCreateIter(pSdb);
+ if (pIter == NULL) return -1;
- TdFilePtr pFile = taosOpenFile(tmpfile, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
- if (pFile == NULL) {
+ pIter->file = taosOpenFile(pIter->name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
+ if (pIter->file == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
- mError("failed to write %s since %s", tmpfile, terrstr());
+ mError("failed to open %s since %s", pIter->name, terrstr());
return -1;
}
- int32_t writelen = taosWriteFile(pFile, pBuf, len);
- if (writelen != len) {
- terrno = TAOS_SYSTEM_ERROR(errno);
- mError("failed to write %s since %s", tmpfile, terrstr());
- taosCloseFile(&pFile);
- return -1;
- }
+ *ppIter = pIter;
+ mInfo("sdbiter:%p, is created to write snapshot, file:%s", pIter, pIter->name);
+ return 0;
+}
- if (taosFsyncFile(pFile) != 0) {
- terrno = TAOS_SYSTEM_ERROR(errno);
- mError("failed to fsync %s since %s", tmpfile, terrstr());
- taosCloseFile(&pFile);
- return -1;
+int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply) {
+ int32_t code = 0;
+
+ if (!isApply) {
+ sdbCloseIter(pIter);
+ mInfo("sdbiter:%p, not apply to sdb", pIter);
+ return 0;
}
- (void)taosCloseFile(&pFile);
+ taosFsyncFile(pIter->file);
+ taosCloseFile(&pIter->file);
+ pIter->file = NULL;
- if (taosRenameFile(tmpfile, datafile) != 0) {
+ char datafile[PATH_MAX] = {0};
+ snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
+ if (taosRenameFile(pIter->name, datafile) != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
- mError("failed to rename file %s to %s since %s", tmpfile, datafile, terrstr());
+ mError("sdbiter:%p, failed to rename file %s to %s since %s", pIter, pIter->name, datafile, terrstr());
+ sdbCloseIter(pIter);
return -1;
}
+ sdbCloseIter(pIter);
if (sdbReadFile(pSdb) != 0) {
- mError("failed to read from %s since %s", datafile, terrstr());
+ mError("sdbiter:%p, failed to read from %s since %s", pIter, datafile, terrstr());
+ return -1;
+ }
+
+ mInfo("sdbiter:%p, successfully applyed to sdb", pIter);
+ return 0;
+}
+
+int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len) {
+ int32_t writelen = taosWriteFile(pIter->file, pBuf, len);
+ if (writelen != len) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ mError("failed to write len:%d since %s, total:%" PRId64, len, terrstr(), pIter->total);
return -1;
}
+ pIter->total += writelen;
+ mInfo("sdbiter:%p, write:%d bytes to snapshot, total:%" PRId64, pIter, writelen, pIter->total);
return 0;
}
\ No newline at end of file
diff --git a/source/dnode/qnode/src/qnode.c b/source/dnode/qnode/src/qnode.c
index 6d31e20d9bc03908025b100dd135c7e706a0b647..40aa572a56709a97e454cdc82cb7e97852356b27 100644
--- a/source/dnode/qnode/src/qnode.c
+++ b/source/dnode/qnode/src/qnode.c
@@ -40,37 +40,46 @@ void qndClose(SQnode *pQnode) {
taosMemoryFree(pQnode);
}
-int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { return 0; }
+int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) {
+ SMsgCb* pCb = &pQnode->msgCb;
-int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg) {
+ pLoad->numOfQueryInQueue = pCb->qsizeFp(pCb->mgmt, pQnode->qndId, QUERY_QUEUE);
+ pLoad->numOfFetchInQueue = pCb->qsizeFp(pCb->mgmt, pQnode->qndId, FETCH_QUEUE);
+ pLoad->waitTimeInQueryQUeue = qWorkerGetWaitTimeInQueue(pQnode->pQuery, QUERY_QUEUE);
+ pLoad->waitTimeInFetchQUeue = qWorkerGetWaitTimeInQueue(pQnode->pQuery, FETCH_QUEUE);
+
+ return 0;
+}
+
+int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg) {
int32_t code = -1;
SReadHandle handle = {.pMsgCb = &pQnode->msgCb};
qTrace("message in qnode queue is processing");
switch (pMsg->msgType) {
case TDMT_VND_QUERY:
- code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg);
+ code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_QUERY_CONTINUE:
- code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg);
+ code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_FETCH:
- code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg);
+ code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_FETCH_RSP:
- code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg);
+ code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_CANCEL_TASK:
- code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg);
+ code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_DROP_TASK:
- code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg);
+ code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_CONSUME:
// code = tqProcessConsumeReq(pQnode->pTq, pMsg);
// break;
case TDMT_VND_QUERY_HEARTBEAT:
- code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg);
+ code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
default:
qError("unknown msg type:%d in qnode queue", pMsg->msgType);
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index 1973bedb0c46f02397e16972c1f8bb0ed6230089..e4343e3bbf63a9dd847cc1bd2f79e2ef35721cd3 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -105,10 +105,12 @@ tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STab
void *pMemRef);
int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo);
bool isTsdbCacheLastRow(tsdbReaderT *pReader);
-int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list);
+int32_t tsdbGetAllTableList(SMeta *pMeta, uint64_t uid, SArray *list);
+void * tsdbGetIdx(SMeta *pMeta);
int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT *pHandle);
-bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle);
-void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo);
+
+bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle);
+void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo);
int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT *pTsdbReadHandle, SColumnDataAgg ***pBlockStatis, bool *allHave);
SArray *tsdbRetrieveDataBlock(tsdbReaderT *pTsdbReadHandle, SArray *pColumnIdList);
void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond, int32_t tWinIdx);
@@ -174,7 +176,7 @@ struct SMetaEntry {
int64_t version;
int8_t type;
tb_uid_t uid;
- char *name;
+ char * name;
union {
struct {
SSchemaWrapper schemaRow;
@@ -202,17 +204,17 @@ struct SMetaEntry {
struct SMetaReader {
int32_t flags;
- SMeta *pMeta;
+ SMeta * pMeta;
SDecoder coder;
SMetaEntry me;
- void *pBuf;
+ void * pBuf;
int32_t szBuf;
};
struct SMTbCursor {
- TBC *pDbc;
- void *pKey;
- void *pVal;
+ TBC * pDbc;
+ void * pKey;
+ void * pVal;
int32_t kLen;
int32_t vLen;
SMetaReader mr;
diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h
index ba25c5e2866995d71c1c7cdee2473a87b609d2fe..0e67d9e426f1b708e927d986f7c9d797acc8759d 100644
--- a/source/dnode/vnode/src/inc/vnodeInt.h
+++ b/source/dnode/vnode/src/inc/vnodeInt.h
@@ -103,6 +103,7 @@ SArray* metaGetSmaTbUids(SMeta* pMeta);
int32_t metaSnapshotReaderOpen(SMeta* pMeta, SMetaSnapshotReader** ppReader, int64_t sver, int64_t ever);
int32_t metaSnapshotReaderClose(SMetaSnapshotReader* pReader);
int32_t metaSnapshotRead(SMetaSnapshotReader* pReader, void** ppData, uint32_t* nData);
+void* metaGetIdx(SMeta* pMeta);
int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg);
int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid);
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index 7182f496c4d6410a705a82dba1c92ff6561a5faf..f610f18126ef86a268801f73f5a951c97a380867 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -31,9 +31,9 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
int vLen = 0;
const void *pKey = NULL;
const void *pVal = NULL;
- void *pBuf = NULL;
+ void * pBuf = NULL;
int32_t szBuf = 0;
- void *p = NULL;
+ void * p = NULL;
SMetaReader mr = {0};
// validate req
@@ -87,7 +87,7 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq) {
}
// drop all child tables
- TBC *pCtbIdxc = NULL;
+ TBC * pCtbIdxc = NULL;
SArray *pArray = taosArrayInit(8, sizeof(tb_uid_t));
tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, &pMeta->txn);
@@ -142,8 +142,8 @@ _exit:
int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
SMetaEntry oStbEntry = {0};
SMetaEntry nStbEntry = {0};
- TBC *pUidIdxc = NULL;
- TBC *pTbDbc = NULL;
+ TBC * pUidIdxc = NULL;
+ TBC * pTbDbc = NULL;
const void *pData;
int nData;
int64_t oversion;
@@ -262,7 +262,7 @@ _err:
}
int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids) {
- void *pData = NULL;
+ void * pData = NULL;
int nData = 0;
int rc = 0;
tb_uid_t uid;
@@ -288,7 +288,7 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi
}
static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
- void *pData = NULL;
+ void * pData = NULL;
int nData = 0;
int rc = 0;
int64_t version;
@@ -324,14 +324,14 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
}
static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) {
- void *pVal = NULL;
+ void * pVal = NULL;
int nVal = 0;
- const void *pData = NULL;
+ const void * pData = NULL;
int nData = 0;
int ret = 0;
tb_uid_t uid;
int64_t oversion;
- SSchema *pColumn = NULL;
+ SSchema * pColumn = NULL;
SMetaEntry entry = {0};
SSchemaWrapper *pSchema;
int c;
@@ -479,7 +479,7 @@ _err:
static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) {
SMetaEntry ctbEntry = {0};
SMetaEntry stbEntry = {0};
- void *pVal = NULL;
+ void * pVal = NULL;
int nVal = 0;
int ret;
int c;
@@ -510,7 +510,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
oversion = *(int64_t *)pData;
// search table.db
- TBC *pTbDbc = NULL;
+ TBC * pTbDbc = NULL;
SDecoder dc1 = {0};
SDecoder dc2 = {0};
@@ -534,7 +534,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
metaDecodeEntry(&dc2, &stbEntry);
SSchemaWrapper *pTagSchema = &stbEntry.stbEntry.schemaTag;
- SSchema *pColumn = NULL;
+ SSchema * pColumn = NULL;
int32_t iCol = 0;
for (;;) {
pColumn = NULL;
@@ -639,8 +639,8 @@ int metaAlterTable(SMeta *pMeta, int64_t version, SVAlterTbReq *pReq) {
static int metaSaveToTbDb(SMeta *pMeta, const SMetaEntry *pME) {
STbDbKey tbDbKey;
- void *pKey = NULL;
- void *pVal = NULL;
+ void * pKey = NULL;
+ void * pVal = NULL;
int kLen = 0;
int vLen = 0;
SEncoder coder = {0};
@@ -755,14 +755,14 @@ static void metaDestroyTagIdxKey(STagIdxKey *pTagIdxKey) {
}
static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
- void *pData = NULL;
+ void * pData = NULL;
int nData = 0;
STbDbKey tbDbKey = {0};
SMetaEntry stbEntry = {0};
- STagIdxKey *pTagIdxKey = NULL;
+ STagIdxKey * pTagIdxKey = NULL;
int32_t nTagIdxKey;
const SSchema *pTagColumn; // = &stbEntry.stbEntry.schema.pSchema[0];
- const void *pTagData = NULL; //
+ const void * pTagData = NULL; //
SDecoder dc = {0};
// get super table
@@ -804,7 +804,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME) {
SEncoder coder = {0};
- void *pVal = NULL;
+ void * pVal = NULL;
int vLen = 0;
int rcode = 0;
SSkmDbKey skmDbKey = {0};
@@ -880,3 +880,11 @@ _err:
metaULock(pMeta);
return -1;
}
+// refactor later
+void *metaGetIdx(SMeta *pMeta) {
+#ifdef USE_INVERTED_INDEX
+ return pMeta->pTagIvtIdx;
+#else
+ return pMeta->pTagIdx;
+#endif
+}
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index 192016166a4d386aa6873955d9411efe32df2412..96ce6e8eeeeaf17243d8e29baa733c369437c931 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -235,6 +235,15 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
}
}
}
+ while (1) {
+ pIter = taosHashIterate(pTq->pStreamTasks, pIter);
+ if (pIter == NULL) break;
+ SStreamTask* pTask = (SStreamTask*)pIter;
+ if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) {
+ int32_t code = qUpdateQualifiedTableId(pTask->exec.executor, tbUidList, isAdd);
+ ASSERT(code == 0);
+ }
+ }
return 0;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index 34d2f7efdfc047f108e06914737c3a284d5cf8a9..fbfa70c1176a163ef3a3995ab48fe6010762dc77 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -248,8 +248,8 @@ static SArray* createCheckInfoFromTableGroup(STsdbReadHandle* pTsdbReadHandle, S
}
taosArrayPush(pTableCheckInfo, &info);
- tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId,
- info.lastKey, pTsdbReadHandle->idStr);
+ tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId, info.lastKey,
+ pTsdbReadHandle->idStr);
}
// TODO group table according to the tag value.
@@ -352,13 +352,16 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, STsdbReadHandle* pReadHandle,
}
if (level == TSDB_RETENTION_L0) {
- tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L0);
+ tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle,
+ TSDB_RETENTION_L0);
return VND_RSMA0(pVnode);
} else if (level == TSDB_RETENTION_L1) {
- tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L1);
+ tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle,
+ TSDB_RETENTION_L1);
return VND_RSMA1(pVnode);
} else {
- tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L2);
+ tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle,
+ TSDB_RETENTION_L2);
return VND_RSMA2(pVnode);
}
}
@@ -401,7 +404,7 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond*
if (pCond->numOfCols > 0) {
int32_t rowLen = 0;
- for(int32_t i = 0; i < pCond->numOfCols; ++i) {
+ for (int32_t i = 0; i < pCond->numOfCols; ++i) {
rowLen += pCond->colList[i].bytes;
}
@@ -685,7 +688,7 @@ SArray* tsdbGetQueriedTableList(tsdbReaderT* pHandle) {
}
// leave only one table for each group
-//static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) {
+// static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) {
// assert(pGroupList);
// size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList);
//
@@ -717,7 +720,7 @@ SArray* tsdbGetQueriedTableList(tsdbReaderT* pHandle) {
// return pNew;
//}
-//tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList,
+// tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList,
// uint64_t qId, uint64_t taskId) {
// STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList);
//
@@ -1324,7 +1327,6 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
if ((ascScan && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) ||
(!ascScan && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) {
-
bool cacheDataInFileBlockHole = (ascScan && (key != TSKEY_INITIAL_VAL && key < binfo.window.skey)) ||
(!ascScan && (key != TSKEY_INITIAL_VAL && key > binfo.window.ekey));
if (cacheDataInFileBlockHole) {
@@ -1367,7 +1369,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
pTsdbReadHandle->realNumOfRows = binfo.rows;
cur->rows = binfo.rows;
- cur->win = binfo.window;
+ cur->win = binfo.window;
cur->mixBlock = false;
cur->blockCompleted = true;
@@ -1378,9 +1380,9 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
cur->lastKey = binfo.window.skey - 1;
cur->pos = -1;
}
- } else { // partially copy to dest buffer
+ } else { // partially copy to dest buffer
// make sure to only load once
- bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows -1 && (!ascScan)));
+ bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows - 1 && (!ascScan)));
if (pTsdbReadHandle->outputCapacity < binfo.rows && firstTimeExtract) {
code = doLoadFileDataBlock(pTsdbReadHandle, pBlock, pCheckInfo, cur->slot);
if (code != TSDB_CODE_SUCCESS) {
@@ -1889,7 +1891,7 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa
bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
- int32_t step = ascScan? 1 : -1;
+ int32_t step = ascScan ? 1 : -1;
int32_t start = cur->pos;
int32_t end = endPos;
@@ -1904,8 +1906,8 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa
// the time window should always be ascending order: skey <= ekey
cur->win = (STimeWindow){.skey = tsArray[start], .ekey = tsArray[end]};
cur->mixBlock = (numOfRows != pBlockInfo->rows);
- cur->lastKey = tsArray[endPos] + step;
- cur->blockCompleted = (ascScan? (endPos == pBlockInfo->rows - 1):(endPos == 0));
+ cur->lastKey = tsArray[endPos] + step;
+ cur->blockCompleted = (ascScan ? (endPos == pBlockInfo->rows - 1) : (endPos == 0));
// The value of pos may be -1 or pBlockInfo->rows, and it is invalid in both cases.
int32_t pos = endPos + step;
@@ -1921,7 +1923,7 @@ int32_t getEndPosInDataBlock(STsdbReadHandle* pTsdbReadHandle, SDataBlockInfo* p
// NOTE: reverse the order to find the end position in data block
int32_t endPos = -1;
bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
- int32_t order = ascScan? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
+ int32_t order = ascScan ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
SQueryFilePos* cur = &pTsdbReadHandle->cur;
SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0];
@@ -1981,7 +1983,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
assert(pCols->numOfRows == pBlock->numOfRows && tsArray[0] == pBlock->keyFirst &&
tsArray[pBlock->numOfRows - 1] == pBlock->keyLast);
- bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
+ bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
int32_t step = ascScan ? 1 : -1;
// for search the endPos, so the order needs to reverse
@@ -1992,8 +1994,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
STimeWindow* pWin = &blockInfo.window;
tsdbDebug("%p uid:%" PRIu64 " start merge data block, file block range:%" PRIu64 "-%" PRIu64
- " rows:%d, start:%d, end:%d, %s", pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows,
- cur->pos, endPos, pTsdbReadHandle->idStr);
+ " rows:%d, start:%d, end:%d, %s",
+ pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows, cur->pos, endPos,
+ pTsdbReadHandle->idStr);
// compared with the data from in-memory buffer, to generate the correct timestamp array list
int32_t numOfRows = 0;
@@ -2112,8 +2115,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
}
// still assign data into current row
- numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
- pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
+ numOfRows +=
+ mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
+ pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
@@ -2178,8 +2182,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
* if cache is empty, load remain file block data. In contrast, if there are remain data in cache, do NOT
* copy them all to result buffer, since it may be overlapped with file data block.
*/
- if (node == NULL ||
- ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) ||
+ if (node == NULL || ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) ||
((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) < pTsdbReadHandle->window.ekey) && !ascScan)) {
// no data in cache or data in cache is greater than the ekey of time window, load data from file block
if (cur->win.skey == TSKEY_INITIAL_VAL) {
@@ -2200,7 +2203,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
}
cur->blockCompleted = (((pos > endPos || cur->lastKey > pTsdbReadHandle->window.ekey) && ascScan) ||
- ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan));
+ ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan));
if (!ascScan) {
TSWAP(cur->win.skey, cur->win.ekey);
@@ -2819,6 +2822,12 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
return numOfRows;
}
+void* tsdbGetIdx(SMeta* pMeta) {
+ if (pMeta == NULL) {
+ return NULL;
+ }
+ return metaGetIdx(pMeta);
+}
int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) {
SMCtbCursor* pCur = metaOpenCtbCursor(pMeta, uid);
@@ -3407,65 +3416,65 @@ int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle) {
STimeWindow updateLastrowForEachGroup(STableListInfo* pList) {
STimeWindow window = {INT64_MAX, INT64_MIN};
-// int32_t totalNumOfTable = 0;
-// SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t));
-//
-// // NOTE: starts from the buffer in case of descending timestamp order check data blocks
-// size_t numOfGroups = taosArrayGetSize(groupList->pGroupList);
-// for (int32_t j = 0; j < numOfGroups; ++j) {
-// SArray* pGroup = taosArrayGetP(groupList->pGroupList, j);
-// TSKEY key = TSKEY_INITIAL_VAL;
-//
-// STableKeyInfo keyInfo = {0};
-//
-// size_t numOfTables = taosArrayGetSize(pGroup);
-// for (int32_t i = 0; i < numOfTables; ++i) {
-// STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i);
-//
-// // if the lastKey equals to INT64_MIN, there is no data in this table
-// TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey;
-// if (key < lastKey) {
-// key = lastKey;
-//
-// // keyInfo.pTable = pInfo->pTable;
-// keyInfo.lastKey = key;
-// pInfo->lastKey = key;
-//
-// if (key < window.skey) {
-// window.skey = key;
-// }
-//
-// if (key > window.ekey) {
-// window.ekey = key;
-// }
-// }
-// }
-//
-// // more than one table in each group, only one table left for each group
-// // if (keyInfo.pTable != NULL) {
-// // totalNumOfTable++;
-// // if (taosArrayGetSize(pGroup) == 1) {
-// // // do nothing
-// // } else {
-// // taosArrayClear(pGroup);
-// // taosArrayPush(pGroup, &keyInfo);
-// // }
-// // } else { // mark all the empty groups, and remove it later
-// // taosArrayDestroy(pGroup);
-// // taosArrayPush(emptyGroup, &j);
-// // }
-// }
-//
-// // window does not being updated, so set the original
-// if (window.skey == INT64_MAX && window.ekey == INT64_MIN) {
-// window = TSWINDOW_INITIALIZER;
-// assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups);
-// }
-//
-// taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup));
-// taosArrayDestroy(emptyGroup);
-//
-// groupList->numOfTables = totalNumOfTable;
+ // int32_t totalNumOfTable = 0;
+ // SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t));
+ //
+ // // NOTE: starts from the buffer in case of descending timestamp order check data blocks
+ // size_t numOfGroups = taosArrayGetSize(groupList->pGroupList);
+ // for (int32_t j = 0; j < numOfGroups; ++j) {
+ // SArray* pGroup = taosArrayGetP(groupList->pGroupList, j);
+ // TSKEY key = TSKEY_INITIAL_VAL;
+ //
+ // STableKeyInfo keyInfo = {0};
+ //
+ // size_t numOfTables = taosArrayGetSize(pGroup);
+ // for (int32_t i = 0; i < numOfTables; ++i) {
+ // STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i);
+ //
+ // // if the lastKey equals to INT64_MIN, there is no data in this table
+ // TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey;
+ // if (key < lastKey) {
+ // key = lastKey;
+ //
+ // // keyInfo.pTable = pInfo->pTable;
+ // keyInfo.lastKey = key;
+ // pInfo->lastKey = key;
+ //
+ // if (key < window.skey) {
+ // window.skey = key;
+ // }
+ //
+ // if (key > window.ekey) {
+ // window.ekey = key;
+ // }
+ // }
+ // }
+ //
+ // // more than one table in each group, only one table left for each group
+ // // if (keyInfo.pTable != NULL) {
+ // // totalNumOfTable++;
+ // // if (taosArrayGetSize(pGroup) == 1) {
+ // // // do nothing
+ // // } else {
+ // // taosArrayClear(pGroup);
+ // // taosArrayPush(pGroup, &keyInfo);
+ // // }
+ // // } else { // mark all the empty groups, and remove it later
+ // // taosArrayDestroy(pGroup);
+ // // taosArrayPush(emptyGroup, &j);
+ // // }
+ // }
+ //
+ // // window does not being updated, so set the original
+ // if (window.skey == INT64_MAX && window.ekey == INT64_MIN) {
+ // window = TSWINDOW_INITIALIZER;
+ // assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups);
+ // }
+ //
+ // taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup));
+ // taosArrayDestroy(emptyGroup);
+ //
+ // groupList->numOfTables = totalNumOfTable;
return window;
}
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index 40f75804dd36e23c06f4bcc189f355aea6b71a56..74b6982008da95f55448b699e5896b86fefeb1b1 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -191,9 +191,9 @@ int vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
SReadHandle handle = {.meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb};
switch (pMsg->msgType) {
case TDMT_VND_QUERY:
- return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg);
+ return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
case TDMT_VND_QUERY_CONTINUE:
- return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg);
+ return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
default:
vError("unknown msg type:%d in query queue", pMsg->msgType);
return TSDB_CODE_VND_APP_ERROR;
@@ -206,13 +206,16 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
switch (pMsg->msgType) {
case TDMT_VND_FETCH:
- return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg);
+ return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_FETCH_RSP:
- return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg);
+ return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_CANCEL_TASK:
- return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg);
+ return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_DROP_TASK:
- return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg);
+ return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg, 0);
+ case TDMT_VND_QUERY_HEARTBEAT:
+ return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg, 0);
+
case TDMT_VND_TABLE_META:
return vnodeGetTableMeta(pVnode, pMsg);
case TDMT_VND_CONSUME:
@@ -231,9 +234,6 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg);
case TDMT_VND_TASK_RECOVER_RSP:
return tqProcessTaskRecoverRsp(pVnode->pTq, pMsg);
-
- case TDMT_VND_QUERY_HEARTBEAT:
- return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg);
default:
vError("unknown msg type:%d in fetch queue", pMsg->msgType);
return TSDB_CODE_VND_APP_ERROR;
@@ -260,7 +260,7 @@ int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
SMsgHead *pHead = pMsg->pCont;
- char logBuf[512];
+ char logBuf[512] = {0};
char *syncNodeStr = sync2SimpleStr(pVnode->sync);
snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr);
syncRpcMsgLog2(logBuf, pMsg);
@@ -678,6 +678,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
int32_t nRows;
int32_t tsize, ret;
SEncoder encoder = {0};
+ SArray *newTbUids = NULL;
terrno = TSDB_CODE_SUCCESS;
pRsp->code = 0;
@@ -698,6 +699,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
}
submitRsp.pArray = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(SSubmitBlkRsp));
+ newTbUids = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(int64_t));
if (!submitRsp.pArray) {
pRsp->code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
@@ -727,6 +729,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
goto _exit;
}
}
+ taosArrayPush(newTbUids, &createTbReq.uid);
submitBlkRsp.uid = createTbReq.uid;
submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2);
@@ -754,8 +757,10 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
submitRsp.affectedRows += submitBlkRsp.affectedRows;
taosArrayPush(submitRsp.pArray, &submitBlkRsp);
}
+ tqUpdateTbUidList(pVnode->pTq, newTbUids, true);
_exit:
+ taosArrayDestroy(newTbUids);
tEncodeSize(tEncodeSSubmitRsp, &submitRsp, tsize, ret);
pRsp->pCont = rpcMallocCont(tsize);
pRsp->contLen = tsize;
diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c
index d8f3110a16fbd118e966a34d2d8d8d8c58519f54..d1468778531d08cb8f2744c7e953b452a28df810 100644
--- a/source/dnode/vnode/src/vnd/vnodeSync.c
+++ b/source/dnode/vnode/src/vnd/vnodeSync.c
@@ -80,7 +80,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta)
}
if (cbMeta.index > beginIndex) {
- char logBuf[256];
+ char logBuf[256] = {0};
snprintf(
logBuf, sizeof(logBuf),
"==callback== ==CommitCb== execute, pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, beginIndex :%ld\n",
@@ -115,7 +115,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta)
tmsgPutToQueue(&(pVnode->msgCb), APPLY_QUEUE, &applyMsg);
} else {
- char logBuf[256];
+ char logBuf[256] = {0};
snprintf(logBuf, sizeof(logBuf),
"==callback== ==CommitCb== do not execute, pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, "
"beginIndex :%ld\n",
@@ -126,7 +126,7 @@ void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta)
}
void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
- char logBuf[256];
+ char logBuf[256] = {0};
snprintf(logBuf, sizeof(logBuf),
"==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm, cbMeta.index,
cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
@@ -134,7 +134,7 @@ void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMet
}
void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
- char logBuf[256];
+ char logBuf[256] = {0};
snprintf(logBuf, sizeof(logBuf), "==callback== ==RollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n",
pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg);
@@ -142,14 +142,13 @@ void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta
SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
+ memset(pFsm, 0, sizeof(*pFsm));
pFsm->data = pVnode;
pFsm->FpCommitCb = vnodeSyncCommitMsg;
pFsm->FpPreCommitCb = vnodeSyncPreCommitMsg;
pFsm->FpRollBackCb = vnodeSyncRollBackMsg;
pFsm->FpGetSnapshot = vnodeSyncGetSnapshot;
pFsm->FpRestoreFinishCb = NULL;
- pFsm->FpSnapshotRead = NULL;
- pFsm->FpSnapshotApply = NULL;
pFsm->FpReConfigCb = NULL;
return pFsm;
diff --git a/source/libs/catalog/src/ctgDbg.c b/source/libs/catalog/src/ctgDbg.c
index 849c66fd126dcbb0b0bdee1de1ec54ea8bd3697c..fdab50db0f65fd67d16d6f5b134f847dc0f882bc 100644
--- a/source/libs/catalog/src/ctgDbg.c
+++ b/source/libs/catalog/src/ctgDbg.c
@@ -71,6 +71,16 @@ void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) {
qDebug("empty db vgroup");
}
+ if (pResult->pDbInfo && taosArrayGetSize(pResult->pDbInfo) > 0) {
+ num = taosArrayGetSize(pResult->pDbInfo);
+ for (int32_t i = 0; i < num; ++i) {
+ SDbInfo *pDb = taosArrayGet(pResult->pDbInfo, i);
+ qDebug("db %d dbInfo: vgVer:%d, tbNum:%d, dbId:%" PRIx64, i, pDb->vgVer, pDb->tbNum, pDb->dbId);
+ }
+ } else {
+ qDebug("empty db info");
+ }
+
if (pResult->pTableHash && taosArrayGetSize(pResult->pTableHash) > 0) {
num = taosArrayGetSize(pResult->pTableHash);
for (int32_t i = 0; i < num; ++i) {
@@ -127,6 +137,7 @@ int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps
SCatalogReq req = {0};
req.pTableMeta = taosArrayInit(2, sizeof(SName));
req.pDbVgroup = taosArrayInit(2, TSDB_DB_FNAME_LEN);
+ req.pDbInfo = taosArrayInit(2, TSDB_DB_FNAME_LEN);
req.pTableHash = taosArrayInit(2, sizeof(SName));
req.pUdf = taosArrayInit(2, TSDB_FUNC_NAME_LEN);
req.pDbCfg = taosArrayInit(2, TSDB_DB_FNAME_LEN);
@@ -149,9 +160,11 @@ int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps
strcpy(dbFName, "1.db1");
taosArrayPush(req.pDbVgroup, dbFName);
taosArrayPush(req.pDbCfg, dbFName);
+ taosArrayPush(req.pDbInfo, dbFName);
strcpy(dbFName, "1.db2");
taosArrayPush(req.pDbVgroup, dbFName);
taosArrayPush(req.pDbCfg, dbFName);
+ taosArrayPush(req.pDbInfo, dbFName);
strcpy(funcName, "udf1");
taosArrayPush(req.pUdf, funcName);
diff --git a/source/libs/catalog/test/catalogTests.cpp b/source/libs/catalog/test/catalogTests.cpp
index 4409f2c3217fa6e23dbe466cfd7f8dc77c05de8c..81d206a0f3fee7f33f24b9740c973ab8d89b10d1 100644
--- a/source/libs/catalog/test/catalogTests.cpp
+++ b/source/libs/catalog/test/catalogTests.cpp
@@ -41,7 +41,6 @@
namespace {
extern "C" int32_t ctgdGetClusterCacheNum(struct SCatalog* pCatalog, int32_t type);
-extern "C" int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *action);
extern "C" int32_t ctgdEnableDebug(char *option);
extern "C" int32_t ctgdGetStatNum(char *option, void *res);
diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c
index 26a0f3bf6cf85bfe4d81a0ab5d8913d7e1767eeb..831b7017b2632a3e52e3050c08b2c29ffa463eeb 100644
--- a/source/libs/command/src/explain.c
+++ b/source/libs/command/src/explain.c
@@ -560,8 +560,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
}
- EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length);
- EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+ if (pAggNode->pAggFuncs) {
+ EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length);
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+ }
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pAggNode->node.pOutputDataBlockDesc->totalRowSize);
if (pAggNode->pGroupKeys) {
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
index ecc3ca4aa327512cba5a87ed52ce1ff44c306374..88f4bdbd3db6aef21f8df64744128a9e89743466 100644
--- a/source/libs/executor/inc/executorimpl.h
+++ b/source/libs/executor/inc/executorimpl.h
@@ -156,13 +156,11 @@ typedef struct STaskAttr {
} STaskAttr;
struct SOperatorInfo;
-struct SAggSupporter;
-struct SOptrBasicInfo;
+//struct SAggSupporter;
+//struct SOptrBasicInfo;
-typedef void (*__optr_encode_fn_t)(struct SOperatorInfo* pOperator, struct SAggSupporter* pSup,
- struct SOptrBasicInfo* pInfo, char** result, int32_t* length);
-typedef bool (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, struct SAggSupporter* pSup,
- struct SOptrBasicInfo* pInfo, char* result, int32_t length);
+typedef int32_t (*__optr_encode_fn_t)(struct SOperatorInfo* pOperator, char** result, int32_t* length);
+typedef int32_t (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, char* result, int32_t length);
typedef int32_t (*__optr_open_fn_t)(struct SOperatorInfo* pOptr);
typedef SSDataBlock* (*__optr_fn_t)(struct SOperatorInfo* pOptr);
@@ -375,30 +373,33 @@ typedef struct SessionWindowSupporter {
} SessionWindowSupporter;
typedef struct SStreamBlockScanInfo {
- SArray* pBlockLists; // multiple SSDatablock.
- SSDataBlock* pRes; // result SSDataBlock
- SSDataBlock* pUpdateRes; // update SSDataBlock
- int32_t updateResIndex;
- int32_t blockType; // current block type
- int32_t validBlockIndex; // Is current data has returned?
- SColumnInfo* pCols; // the output column info
- uint64_t numOfExec; // execution times
- void* streamBlockReader;// stream block reader handle
- SArray* pColMatchInfo; //
- SNode* pCondition;
- SArray* tsArray;
- SUpdateInfo* pUpdateInfo;
- int32_t primaryTsIndex; // primary time stamp slot id
- void* pDataReader;
- SReadHandle readHandle;
- uint64_t tableUid; // queried super table uid
+ SArray* pBlockLists; // multiple SSDatablock.
+ SSDataBlock* pRes; // result SSDataBlock
+ SSDataBlock* pUpdateRes; // update SSDataBlock
+ int32_t updateResIndex;
+ int32_t blockType; // current block type
+ int32_t validBlockIndex; // Is current data has returned?
+ SColumnInfo* pCols; // the output column info
+ uint64_t numOfExec; // execution times
+ void* streamBlockReader;// stream block reader handle
+ SArray* pColMatchInfo; //
+ SNode* pCondition;
+ SArray* tsArray;
+ SUpdateInfo* pUpdateInfo;
+
+ SExprInfo* pPseudoExpr;
+ int32_t numOfPseudoExpr;
+
+ int32_t primaryTsIndex; // primary time stamp slot id
+ void* pDataReader;
+ SReadHandle readHandle;
+ uint64_t tableUid; // queried super table uid
EStreamScanMode scanMode;
SOperatorInfo* pOperatorDumy;
SInterval interval; // if the upstream is an interval operator, the interval info is also kept here.
- SCatchSupporter childAggSup;
- SArray* childIds;
+ SArray* childIds;
SessionWindowSupporter sessionSup;
- bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA.
+ bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA.
} SStreamBlockScanInfo;
typedef struct SSysTableScanInfo {
@@ -437,18 +438,21 @@ typedef struct SAggSupporter {
typedef struct STimeWindowSupp {
int8_t calTrigger;
int64_t waterMark;
+ TSKEY maxTs;
SColumnInfoData timeWindowData; // query time window info for scalar function execution.
} STimeWindowAggSupp;
typedef struct SIntervalAggOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo; // basic info
+ SAggSupporter aggSup; // aggregate supporter
+
SGroupResInfo groupResInfo; // multiple results build supporter
SInterval interval; // interval info
int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
STimeWindow win; // query time range
bool timeWindowInterpo; // interpolation needed or not
char** pRow; // previous row/tuple of already processed datablock
- SAggSupporter aggSup; // aggregate supporter
STableQueryInfo* pCurrent; // current tableQueryInfo struct
int32_t order; // current SSDataBlock scan order
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
@@ -459,19 +463,23 @@ typedef struct SIntervalAggOperatorInfo {
} SIntervalAggOperatorInfo;
typedef struct SStreamFinalIntervalOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo; // basic info
+ SAggSupporter aggSup; // aggregate supporter
+
SGroupResInfo groupResInfo; // multiple results build supporter
SInterval interval; // interval info
int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
- SAggSupporter aggSup; // aggregate supporter
int32_t order; // current SSDataBlock scan order
STimeWindowAggSupp twAggSup;
SArray* pChildren;
} SStreamFinalIntervalOperatorInfo;
typedef struct SAggOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo;
SAggSupporter aggSup;
+
STableQueryInfo *current;
uint64_t groupId;
SGroupResInfo groupResInfo;
@@ -484,8 +492,10 @@ typedef struct SAggOperatorInfo {
} SAggOperatorInfo;
typedef struct SProjectOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo;
SAggSupporter aggSup;
+
SSDataBlock* existDataBlock;
SArray* pPseudoColInfo;
SLimit limit;
@@ -509,7 +519,10 @@ typedef struct SFillOperatorInfo {
} SFillOperatorInfo;
typedef struct SGroupbyOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
+
SArray* pGroupCols; // group by columns, SArray
SArray* pGroupColVals; // current group column values, SArray
SNode* pCondition;
@@ -517,7 +530,6 @@ typedef struct SGroupbyOperatorInfo {
char* keyBuf; // group by keys for hash
int32_t groupKeyLen; // total group by column width
SGroupResInfo groupResInfo;
- SAggSupporter aggSup;
SExprInfo* pScalarExprInfo;
int32_t numOfScalarExpr; // the number of scalar expression in group operator
SqlFunctionCtx* pScalarFuncCtx;
@@ -554,8 +566,10 @@ typedef struct SWindowRowsSup {
} SWindowRowsSup;
typedef struct SSessionAggOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo;
SAggSupporter aggSup;
+
SGroupResInfo groupResInfo;
SWindowRowsSup winSup;
bool reptScan; // next round scan
@@ -568,6 +582,7 @@ typedef struct SResultWindowInfo {
SResultRowPosition pos;
STimeWindow win;
bool isOutput;
+ bool isClosed;
} SResultWindowInfo;
typedef struct SStreamSessionAggOperatorInfo {
@@ -593,8 +608,10 @@ typedef struct STimeSliceOperatorInfo {
} STimeSliceOperatorInfo;
typedef struct SStateWindowOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo;
SAggSupporter aggSup;
+
SGroupResInfo groupResInfo;
SWindowRowsSup winSup;
SColumn stateCol; // start row index
@@ -606,8 +623,10 @@ typedef struct SStateWindowOperatorInfo {
} SStateWindowOperatorInfo;
typedef struct SSortedMergeOperatorInfo {
-
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
+
SArray* pSortInfo;
int32_t numOfSources;
SSortHandle *pSortHandle;
@@ -619,7 +638,6 @@ typedef struct SSortedMergeOperatorInfo {
int32_t numOfResPerPage;
char** groupVal;
SArray *groupInfo;
- SAggSupporter aggSup;
} SSortedMergeOperatorInfo;
typedef struct SSortOperatorInfo {
@@ -737,10 +755,11 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
SSDataBlock* pResultBlock, SArray* pGroupColList, SNode* pCondition,
SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SReadHandle* pHandle,
- uint64_t uid, SSDataBlock* pResBlock, SArray* pColList,
- SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pCondition,
- SOperatorInfo* pOperatorDumy);
+
+SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle,
+ SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo,
+ STimeWindowAggSupp* pTwSup, int16_t tsColId);
+
SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols,
SInterval* pInterval, STimeWindow* pWindow, SSDataBlock* pResBlock, int32_t fillType, SNodeListNode* fillVal,
@@ -780,24 +799,37 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo);
void doDestroyTask(SExecTaskInfo* pTaskInfo);
int32_t getMaximumIdleDurationSec();
+/*
+ * ops: root operator
+ * data: *data save the result of encode, need to be freed by caller
+ * length: *length save the length of *data
+ * return: result code, 0 means success
+ */
+int32_t encodeOperator(SOperatorInfo* ops, char** data, int32_t *length);
+
+/*
+ * ops: root operator, created by caller
+ * data: save the result of decode
+ * length: the length of data
+ * return: result code, 0 means success
+ */
+int32_t decodeOperator(SOperatorInfo* ops, char* data, int32_t length);
+
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
EOPTR_EXEC_MODEL model);
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo** pRes, int32_t* capacity,
int32_t* resNum);
-bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char* result,
- int32_t length);
-void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char** result,
- int32_t* length);
+int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result, int32_t length);
+int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length);
+
STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts,
SInterval* pInterval, int32_t precision, STimeWindow* win);
int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn,
int32_t startPos, TSKEY ekey, __block_search_fn_t searchFn, STableQueryInfo* item,
int32_t order);
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
-int32_t initCacheSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey,
- const char* pDir);
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey);
SResultRow* getNewResultRow_rv(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize);
SResultWindowInfo* getSessionTimeWindow(SArray* pWinInfos, TSKEY ts, int64_t gap,
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index 5a02547f58aa4cf73c5297dda771ba0900bce141..aea9d70f316806286fbd69f0d13dc72ccae54dfe 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -233,7 +233,7 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int
void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList) {
if (pGroupResInfo->pRows != NULL) {
- taosArrayDestroy(pGroupResInfo->pRows);
+ taosArrayDestroyP(pGroupResInfo->pRows, taosMemoryFree);
}
pGroupResInfo->pRows = pArrayList;
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index 2208c94e9c0aecc377a5502ebe9f639ea28a2562..ce46573830e8b1a25a1893191710166e78a6a89d 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -28,13 +28,13 @@
#include "ttime.h"
#include "executorimpl.h"
+#include "index.h"
#include "query.h"
#include "tcompare.h"
#include "tcompression.h"
#include "thash.h"
#include "ttypes.h"
#include "vnode.h"
-#include "index.h"
#define IS_MAIN_SCAN(runtime) ((runtime)->scanFlag == MAIN_SCAN)
#define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN)
@@ -87,7 +87,7 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
#define realloc u_realloc
#endif
-#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
+#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
//#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList)
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0)
@@ -2601,6 +2601,7 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI
pStart += sizeof(int32_t) * numOfRows;
if (colLen[i] > 0) {
+ taosMemoryFreeClear(pColInfoData->pData);
pColInfoData->pData = taosMemoryMalloc(colLen[i]);
}
} else {
@@ -2758,6 +2759,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
pExchangeInfo->loadInfo.totalRows);
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
completed += 1;
+ taosMemoryFreeClear(pDataInfo->pRsp);
continue;
}
@@ -2765,6 +2767,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
code = setSDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data,
pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL);
if (code != 0) {
+ taosMemoryFreeClear(pDataInfo->pRsp);
goto _error;
}
@@ -2785,10 +2788,12 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
pDataInfo->status = EX_SOURCE_DATA_NOT_READY;
code = doSendFetchDataRequest(pExchangeInfo, pTaskInfo, i);
if (code != TSDB_CODE_SUCCESS) {
+ taosMemoryFreeClear(pDataInfo->pRsp);
goto _error;
}
}
+ taosMemoryFreeClear(pDataInfo->pRsp);
return pExchangeInfo->pResult;
}
@@ -2890,7 +2895,8 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
pDataInfo->totalRows, pLoadInfo->totalRows);
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
- pExchangeInfo->current += 1;
+ pExchangeInfo->current += 1;
+ taosMemoryFreeClear(pDataInfo->pRsp);
continue;
}
@@ -2916,6 +2922,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
}
pOperator->resultInfo.totalRows += pRes->info.rows;
+ taosMemoryFreeClear(pDataInfo->pRsp);
return pExchangeInfo->pResult;
}
}
@@ -3491,17 +3498,24 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
return (rows == 0) ? NULL : pInfo->pRes;
}
-void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char** result,
- int32_t* length) {
+int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length) {
+ if(result == NULL || length == NULL){
+ return TSDB_CODE_TSC_INVALID_INPUT;
+ }
+ SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info);
+ SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo));
int32_t size = taosHashGetSize(pSup->pResultRowHashTable);
size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length
- int32_t totalSize = sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize);
- *result = taosMemoryCalloc(1, totalSize);
+ int32_t totalSize = sizeof(int32_t) + sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize);
+
+ *result = (char*)taosMemoryCalloc(1, totalSize);
if (*result == NULL) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
+ return TSDB_CODE_OUT_OF_MEMORY;
}
- *(int32_t*)(*result) = size;
+
int32_t offset = sizeof(int32_t);
+ *(int32_t*)(*result + offset) = size;
+ offset += sizeof(int32_t);
// prepare memory
SResultRowPosition* pos = &pInfo->resultRowInfo.cur;
@@ -3523,12 +3537,11 @@ void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi
// recalculate the result size
int32_t realTotalSize = offset + sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize;
if (realTotalSize > totalSize) {
- char* tmp = taosMemoryRealloc(*result, realTotalSize);
+ char* tmp = (char*)taosMemoryRealloc(*result, realTotalSize);
if (tmp == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(*result);
*result = NULL;
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
+ return TSDB_CODE_OUT_OF_MEMORY;
} else {
*result = tmp;
}
@@ -3548,17 +3561,18 @@ void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi
pIter = taosHashIterate(pSup->pResultRowHashTable, pIter);
}
- if (length) {
- *length = offset;
- }
- return;
+ *(int32_t*)(*result) = offset;
+ *length = offset;
+
+ return TDB_CODE_SUCCESS;
}
-bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char* result,
- int32_t length) {
- if (!result || length <= 0) {
- return false;
+int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result, int32_t length) {
+ if(result == NULL || length <= 0){
+ return TSDB_CODE_TSC_INVALID_INPUT;
}
+ SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info);
+ SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo));
// int32_t size = taosHashGetSize(pSup->pResultRowHashTable);
int32_t count = *(int32_t*)(result);
@@ -3571,7 +3585,7 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi
uint64_t tableGroupId = *(uint64_t*)(result + offset);
SResultRow* resultRow = getNewResultRow_rv(pSup->pResultBuf, tableGroupId, pSup->resultRowSize);
if (!resultRow) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT);
+ return TSDB_CODE_TSC_INVALID_INPUT;
}
// add a new result set for a new group
@@ -3581,7 +3595,7 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi
offset += keyLen;
int32_t valueLen = *(int32_t*)(result + offset);
if (valueLen != pSup->resultRowSize) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT);
+ return TSDB_CODE_TSC_INVALID_INPUT;
}
offset += sizeof(int32_t);
int32_t pageId = resultRow->pageId;
@@ -3600,9 +3614,9 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi
}
if (offset != length) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_INVALID_INPUT);
+ return TSDB_CODE_TSC_INVALID_INPUT;
}
- return true;
+ return TDB_CODE_SUCCESS;
}
enum {
@@ -3960,11 +3974,11 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
return TSDB_CODE_OUT_OF_MEMORY;
}
- uint32_t defaultPgsz = 0;
+ uint32_t defaultPgsz = 0;
uint32_t defaultBufsz = 0;
getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
- int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, TD_TMP_DIR_PATH);
+ int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, TD_TMP_DIR_PATH);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -4001,7 +4015,7 @@ void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows) {
}
}
-//static STableQueryInfo* initTableQueryInfo(const STableListInfo* pTableListInfo) {
+// static STableQueryInfo* initTableQueryInfo(const STableListInfo* pTableListInfo) {
// int32_t size = taosArrayGetSize(pTableListInfo->pTableList);
// if (size == 0) {
// return NULL;
@@ -4434,9 +4448,11 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT
}
static tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
- STableListInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond);
+ STableListInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId,
+ SNode* pTagCond);
-static int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, SNode* pTagCond);
+static int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo,
+ SNode* pTagCond);
static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo);
static SArray* extractColumnInfo(SNodeList* pNodeList);
@@ -4473,7 +4489,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) {
STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
- tsdbReaderT pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond);
+ tsdbReaderT pDataReader =
+ doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond);
if (pDataReader == NULL && terrno != 0) {
return NULL;
}
@@ -4492,9 +4509,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) {
SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode; // simple child table.
STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
-
- int32_t numOfCols = 0;
-
+ STimeWindowAggSupp twSup = {.waterMark = pTableScanNode->watermark,
+ .calTrigger = pTableScanNode->triggerType, .maxTs = INT64_MIN};
tsdbReaderT pDataReader = NULL;
if (pHandle->vnode) {
pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond);
@@ -4503,24 +4519,15 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
}
if (pDataReader == NULL && terrno != 0) {
- qDebug("pDataReader is NULL");
+ qDebug("%s pDataReader is NULL", GET_TASKID(pTaskInfo));
// return NULL;
} else {
- qDebug("pDataReader is not NULL");
+ qDebug("%s pDataReader is not NULL", GET_TASKID(pTaskInfo));
}
-
- SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc;
- SOperatorInfo* pOperatorDumy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo);
-
SArray* tableIdList = extractTableIdList(pTableListInfo);
+ SOperatorInfo* pOperator = createStreamScanOperatorInfo(pDataReader, pHandle,
+ tableIdList, pTableScanNode, pTaskInfo, &twSup, pTableScanNode->tsColId);
- SSDataBlock* pResBlock = createResDataBlock(pDescNode);
- SArray* pCols =
- extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID);
-
- SOperatorInfo* pOperator =
- createStreamScanOperatorInfo(pHandle->reader, pDataReader, pHandle, pScanPhyNode->uid, pResBlock, pCols,
- tableIdList, pTaskInfo, pScanPhyNode->node.pConditions, pOperatorDumy);
taosArrayDestroy(tableIdList);
return pOperator;
} else if (QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN == type) {
@@ -4602,8 +4609,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pOptr = createGroupOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pAggNode->node.pConditions,
pScalarExprInfo, numOfScalarExpr, pTaskInfo);
} else {
- pOptr = createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pScalarExprInfo, numOfScalarExpr,
- pTaskInfo);
+ pOptr =
+ createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pScalarExprInfo, numOfScalarExpr, pTaskInfo);
}
} else if (QUERY_NODE_PHYSICAL_PLAN_INTERVAL == type || QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) {
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
@@ -4619,7 +4626,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
.precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
STimeWindowAggSupp as = {.waterMark = pIntervalPhyNode->window.watermark,
- .calTrigger = pIntervalPhyNode->window.triggerType};
+ .calTrigger = pIntervalPhyNode->window.triggerType,
+ .maxTs = INT64_MIN};
int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTaskInfo);
@@ -4707,9 +4715,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
}
int32_t compareTimeWindow(const void* p1, const void* p2, const void* param) {
- const SQueryTableDataCond *pCond = param;
- const STimeWindow *pWin1 = p1;
- const STimeWindow *pWin2 = p2;
+ const SQueryTableDataCond* pCond = param;
+ const STimeWindow* pWin1 = p1;
+ const STimeWindow* pWin2 = p2;
if (pCond->order == TSDB_ORDER_ASC) {
return pWin1->skey - pWin2->skey;
} else if (pCond->order == TSDB_ORDER_DESC) {
@@ -4729,8 +4737,8 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi
return terrno;
}
- //pCond->twindow = pTableScanNode->scanRange;
- //TODO: get it from stable scan node
+ // pCond->twindow = pTableScanNode->scanRange;
+ // TODO: get it from stable scan node
pCond->numOfTWindows = 1;
pCond->twindows = taosMemoryCalloc(pCond->numOfTWindows, sizeof(STimeWindow));
pCond->twindows[0] = pTableScanNode->scanRange;
@@ -4751,11 +4759,7 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi
TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey);
}
}
- taosqsort(pCond->twindows,
- pCond->numOfTWindows,
- sizeof(STimeWindow),
- pCond,
- compareTimeWindow);
+ taosqsort(pCond->twindows, pCond->numOfTWindows, sizeof(STimeWindow), pCond, compareTimeWindow);
pCond->type = BLOCK_LOAD_OFFSET_SEQ_ORDER;
// pCond->type = pTableScanNode->scanFlag;
@@ -4915,27 +4919,31 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod
return pList;
}
-int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid,
- STableListInfo* pListInfo, SNode* pTagCond) {
+int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo,
+ SNode* pTagCond) {
int32_t code = TSDB_CODE_SUCCESS;
pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo));
if (tableType == TSDB_SUPER_TABLE) {
- if(pTagCond){
+ if (pTagCond) {
+ SIndexMetaArg metaArg = {.metaHandle = tsdbGetIdx(metaHandle), .suid = tableUid};
+
SArray* res = taosArrayInit(8, sizeof(uint64_t));
- code = doFilterTag(pTagCond, res);
+ code = doFilterTag(pTagCond, &metaArg, res);
if (code != TSDB_CODE_SUCCESS) {
- qError("doFilterTag error:%d", code);
+ qError("failed to get tableIds, reason: %s, suid: %" PRIu64 "", tstrerror(code), tableUid);
taosArrayDestroy(res);
terrno = code;
return code;
+ } else {
+ qDebug("sucess to get tableIds, size: %d, suid: %" PRIu64 "", (int)taosArrayGetSize(res), tableUid);
}
- for(int i = 0; i < taosArrayGetSize(res); i++){
+ for (int i = 0; i < taosArrayGetSize(res); i++) {
STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i)};
taosArrayPush(pListInfo->pTableList, &info);
}
taosArrayDestroy(res);
- }else{
+ } else {
code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList);
}
} else { // Create one table group.
@@ -4960,7 +4968,8 @@ SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) {
tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond) {
- int32_t code = getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond);
+ int32_t code =
+ getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -4984,6 +4993,91 @@ _error:
return NULL;
}
+int32_t encodeOperator(SOperatorInfo* ops, char** result, int32_t *length){
+ int32_t code = TDB_CODE_SUCCESS;
+ char *pCurrent = NULL;
+ int32_t currLength = 0;
+ if(ops->fpSet.encodeResultRow){
+ if(result == NULL || length == NULL){
+ return TSDB_CODE_TSC_INVALID_INPUT;
+ }
+ code = ops->fpSet.encodeResultRow(ops, &pCurrent, &currLength);
+
+ if(code != TDB_CODE_SUCCESS){
+ if(*result != NULL){
+ taosMemoryFree(*result);
+ *result = NULL;
+ }
+ return code;
+ }
+
+ if(*result == NULL){
+ *result = (char*)taosMemoryCalloc(1, currLength + sizeof(int32_t));
+ if (*result == NULL) {
+ taosMemoryFree(pCurrent);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ memcpy(*result + sizeof(int32_t), pCurrent, currLength);
+ *(int32_t*)(*result) = currLength + sizeof(int32_t);
+ }else{
+ int32_t sizePre = *(int32_t*)(*result);
+ char* tmp = (char*)taosMemoryRealloc(*result, sizePre + currLength);
+ if (tmp == NULL) {
+ taosMemoryFree(pCurrent);
+ taosMemoryFree(*result);
+ *result = NULL;
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ *result = tmp;
+ memcpy(*result + sizePre, pCurrent, currLength);
+ *(int32_t*)(*result) += currLength;
+ }
+ taosMemoryFree(pCurrent);
+ *length = *(int32_t*)(*result);
+ }
+
+ for (int32_t i = 0; i < ops->numOfDownstream; ++i) {
+ code = encodeOperator(ops->pDownstream[i], result, length);
+ if(code != TDB_CODE_SUCCESS){
+ return code;
+ }
+ }
+ return TDB_CODE_SUCCESS;
+}
+
+int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length){
+ int32_t code = TDB_CODE_SUCCESS;
+ if(ops->fpSet.decodeResultRow){
+ if(result == NULL || length <= 0){
+ return TSDB_CODE_TSC_INVALID_INPUT;
+ }
+ char* data = result + 2 * sizeof(int32_t);
+ int32_t dataLength = *(int32_t*)(result + sizeof(int32_t));
+ code = ops->fpSet.decodeResultRow(ops, data, dataLength - sizeof(int32_t));
+ if(code != TDB_CODE_SUCCESS){
+ return code;
+ }
+
+ int32_t totalLength = *(int32_t*)result;
+ if(totalLength == dataLength + sizeof(int32_t)) { // the last data
+ result = NULL;
+ length = 0;
+ }else{
+ result += dataLength;
+ *(int32_t*)(result) = totalLength - dataLength;
+ length = totalLength - dataLength;
+ }
+ }
+
+ for (int32_t i = 0; i < ops->numOfDownstream; ++i) {
+ code = decodeOperator(ops->pDownstream[i], result, length);
+ if(code != TDB_CODE_SUCCESS){
+ return code;
+ }
+ }
+ return TDB_CODE_SUCCESS;
+}
+
int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
EOPTR_EXEC_MODEL model) {
uint64_t queryId = pPlan->id.queryId;
@@ -4995,8 +5089,8 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead
goto _complete;
}
- (*pTaskInfo)->pRoot =
- createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, &(*pTaskInfo)->tableqinfoList, pPlan->pTagCond);
+ (*pTaskInfo)->pRoot = createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId,
+ &(*pTaskInfo)->tableqinfoList, pPlan->pTagCond);
if (NULL == (*pTaskInfo)->pRoot) {
code = terrno;
goto _complete;
@@ -5181,20 +5275,6 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo
return TSDB_CODE_SUCCESS;
}
-int32_t initCacheSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey, const char* pDir) {
- pCatchSup->keySize = sizeof(int64_t) + sizeof(int64_t) + sizeof(TSKEY);
- pCatchSup->pKeyBuf = taosMemoryCalloc(1, pCatchSup->keySize);
- _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
- pCatchSup->pWindowHashTable = taosHashInit(10000, hashFn, true, HASH_NO_LOCK);
- if (pCatchSup->pKeyBuf == NULL || pCatchSup->pWindowHashTable == NULL) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
-
- int32_t pageSize = rowSize * 32;
- int32_t bufSize = pageSize * 4096;
- return createDiskbasedBuf(&pCatchSup->pDataBuf, pageSize, bufSize, pKey, pDir);
-}
-
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey) {
pSup->keySize = sizeof(int64_t) + sizeof(TSKEY);
pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize);
@@ -5212,5 +5292,5 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey) {
if (bufSize <= pageSize) {
bufSize = pageSize * 4;
}
- return createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, "/tmp/");
+ return createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, TD_TMP_DIR_PATH);
}
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index bbdb3b2b7e00ca32da52a7b7b794abb63a5e64b8..b954eb3a221a187bc4fe96a3088125e149304ece 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -158,7 +158,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
return false;
}
-static void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock);
+static void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock);
static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
uint32_t* status) {
@@ -250,7 +250,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
// currently only the tbname pseudo column
if (pTableScanInfo->numOfPseudoExpr > 0) {
- addTagPseudoColumnData(pTableScanInfo, pBlock);
+ addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pPseudoExpr, pTableScanInfo->numOfPseudoExpr, pBlock);
}
int64_t st = taosGetTimestampMs();
@@ -287,18 +287,18 @@ static void prepareForDescendingScan(STableScanInfo* pTableScanInfo, SqlFunction
compareTimeWindow);
}
-void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock) {
+void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock) {
// currently only the tbname pseudo column
- if (pTableScanInfo->numOfPseudoExpr == 0) {
+ if (numOfPseudoExpr == 0) {
return;
}
SMetaReader mr = {0};
- metaReaderInit(&mr, pTableScanInfo->readHandle.meta, 0);
+ metaReaderInit(&mr, pHandle->meta, 0);
metaGetTableEntryByUid(&mr, pBlock->info.uid);
- for (int32_t j = 0; j < pTableScanInfo->numOfPseudoExpr; ++j) {
- SExprInfo* pExpr = &pTableScanInfo->pPseudoExpr[j];
+ for (int32_t j = 0; j < numOfPseudoExpr; ++j) {
+ SExprInfo* pExpr = &pPseudoExpr[j];
int32_t dstSlotId = pExpr->base.resSchema.slotId;
@@ -309,7 +309,7 @@ void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock)
// this is to handle the tbname
if (fmIsScanPseudoColumnFunc(functionId)) {
- setTbNameColData(pTableScanInfo->readHandle.meta, pBlock, pColInfoData, functionId);
+ setTbNameColData(pHandle->meta, pBlock, pColInfoData, functionId);
} else { // these are tags
const char* p = NULL;
if (pColInfoData->info.type == TSDB_DATA_TYPE_JSON) {
@@ -755,91 +755,6 @@ static SSDataBlock* getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool inverti
return NULL;
}
-void static setSupKeyBuf(SCatchSupporter* pSup, int64_t groupId, int64_t childId, TSKEY ts) {
- int64_t* pKey = (int64_t*)pSup->pKeyBuf;
- pKey[0] = groupId;
- pKey[1] = childId;
- pKey[2] = ts;
-}
-
-static int32_t catchWidonwInfo(SSDataBlock* pDataBlock, SCatchSupporter* pSup, int32_t pageId, int32_t tsIndex,
- int64_t childId) {
- SColumnInfoData* pColDataInfo = taosArrayGet(pDataBlock->pDataBlock, tsIndex);
- TSKEY* tsCols = (int64_t*)pColDataInfo->pData;
- for (int32_t i = 0; i < pDataBlock->info.rows; i++) {
- setSupKeyBuf(pSup, pDataBlock->info.groupId, childId, tsCols[i]);
- SWindowPosition* p1 = (SWindowPosition*)taosHashGet(pSup->pWindowHashTable, pSup->pKeyBuf, pSup->keySize);
- if (p1 == NULL) {
- SWindowPosition pos = {.pageId = pageId, .rowId = i};
- int32_t code = taosHashPut(pSup->pWindowHashTable, pSup->pKeyBuf, pSup->keySize, &pos, sizeof(SWindowPosition));
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
- } else {
- p1->pageId = pageId;
- p1->rowId = i;
- }
- }
- return TSDB_CODE_SUCCESS;
-}
-
-static int32_t catchDatablock(SSDataBlock* pDataBlock, SCatchSupporter* pSup, int32_t tsIndex, int64_t childId) {
- int32_t start = 0;
- int32_t stop = 0;
- int32_t pageSize = getBufPageSize(pSup->pDataBuf);
- while (start < pDataBlock->info.rows) {
- blockDataSplitRows(pDataBlock, pDataBlock->info.hasVarCol, start, &stop, pageSize);
- SSDataBlock* pDB = blockDataExtractBlock(pDataBlock, start, stop - start + 1);
- if (pDB == NULL) {
- return terrno;
- }
- int32_t pageId = -1;
- void* pPage = getNewBufPage(pSup->pDataBuf, pDataBlock->info.groupId, &pageId);
- if (pPage == NULL) {
- blockDataDestroy(pDB);
- return terrno;
- }
- int32_t size = blockDataGetSize(pDB) + sizeof(int32_t) + pDB->info.numOfCols * sizeof(int32_t);
- assert(size <= pageSize);
- blockDataToBuf(pPage, pDB);
- setBufPageDirty(pPage, true);
- releaseBufPage(pSup->pDataBuf, pPage);
- blockDataDestroy(pDB);
- start = stop + 1;
- int32_t code = catchWidonwInfo(pDB, pSup, pageId, tsIndex, childId);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
- }
- return TSDB_CODE_SUCCESS;
-}
-
-static SSDataBlock* getDataFromCatch(SStreamBlockScanInfo* pInfo) {
- SSDataBlock* pBlock = pInfo->pUpdateRes;
- if (pInfo->updateResIndex < pBlock->info.rows) {
- blockDataCleanup(pInfo->pRes);
- SCatchSupporter* pCSup = &pInfo->childAggSup;
- SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, 0);
- TSKEY* tsCols = (TSKEY*)pColDataInfo->pData;
- int32_t size = taosArrayGetSize(pInfo->childIds);
- for (int32_t i = 0; i < size; i++) {
- int64_t id = *(int64_t*)taosArrayGet(pInfo->childIds, i);
- setSupKeyBuf(pCSup, pBlock->info.groupId, id, tsCols[pInfo->updateResIndex]);
- SWindowPosition* pos = (SWindowPosition*)taosHashGet(pCSup->pWindowHashTable, pCSup->pKeyBuf, pCSup->keySize);
- void* buf = getBufPage(pCSup->pDataBuf, pos->pageId);
- SSDataBlock* pDB = createOneDataBlock(pInfo->pRes, false);
- blockDataFromBuf(pDB, buf);
- SSDataBlock* pSub = blockDataExtractBlock(pDB, pos->rowId, 1);
- blockDataMerge(pInfo->pRes, pSub);
- blockDataDestroy(pDB);
- blockDataDestroy(pSub);
- }
- pInfo->updateResIndex++;
- return pInfo->pRes;
- }
- return NULL;
-}
-
static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
// NOTE: this operator does never check if current status is done or not
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@@ -910,8 +825,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
pInfo->pRes->info.groupId = groupId;
}
- int32_t numOfCols = pInfo->pRes->info.numOfCols;
- for (int32_t i = 0; i < numOfCols; ++i) {
+ for (int32_t i = 0; i < taosArrayGetSize(pInfo->pColMatchInfo); ++i) {
SColMatchInfo* pColMatchInfo = taosArrayGet(pInfo->pColMatchInfo, i);
if (!pColMatchInfo->output) {
continue;
@@ -941,10 +855,16 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
pTaskInfo->code = terrno;
return NULL;
}
+
rows = pBlockInfo->rows;
+
+ // currently only the tbname pseudo column
+ if (pInfo->numOfPseudoExpr > 0) {
+ addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes);
+ }
+
doFilter(pInfo->pCondition, pInfo->pRes, NULL);
blockDataUpdateTsWindow(pInfo->pRes, 0);
-
break;
}
@@ -972,10 +892,9 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
}
}
-SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SReadHandle* pHandle,
- uint64_t uid, SSDataBlock* pResBlock, SArray* pColList,
- SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pCondition,
- SOperatorInfo* pOperatorDumy) {
+SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle,
+ SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo,
+ STimeWindowAggSupp* pTwSup, int16_t tsColId) {
SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@@ -983,22 +902,28 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR
goto _error;
}
- STableScanInfo* pSTInfo = (STableScanInfo*)pOperatorDumy->info;
+ SScanPhysiNode* pScanPhyNode = &pTableScanNode->scan;
+
+ SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc;
+ SOperatorInfo* pTableScanDummy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo);
- int32_t numOfOutput = taosArrayGetSize(pColList);
+ STableScanInfo* pSTInfo = (STableScanInfo*)pTableScanDummy->info;
- SArray* pColIds = taosArrayInit(4, sizeof(int16_t));
+ int32_t numOfCols = 0;
+ pInfo->pColMatchInfo = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID);
+
+ int32_t numOfOutput = taosArrayGetSize(pInfo->pColMatchInfo);
+ SArray* pColIds = taosArrayInit(numOfOutput, sizeof(int16_t));
for (int32_t i = 0; i < numOfOutput; ++i) {
- SColMatchInfo* id = taosArrayGet(pColList, i);
- int16_t colId = id->colId;
+ SColMatchInfo* id = taosArrayGet(pInfo->pColMatchInfo, i);
+
+ int16_t colId = id->colId;
taosArrayPush(pColIds, &colId);
}
- pInfo->pColMatchInfo = pColList;
-
// set the extract column id to streamHandle
- tqReadHandleSetColIdList((STqReadHandle*)streamReadHandle, pColIds);
- int32_t code = tqReadHandleSetTbUidList(streamReadHandle, pTableIdList);
+ tqReadHandleSetColIdList((STqReadHandle*)pHandle->reader, pColIds);
+ int32_t code = tqReadHandleSetTbUidList(pHandle->reader, pTableIdList);
if (code != 0) {
goto _error;
}
@@ -1014,37 +939,39 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR
goto _error;
}
- pInfo->primaryTsIndex = 0; // TODO(liuyao) get it from physical plan
+ pInfo->primaryTsIndex = tsColId;
if (pSTInfo->interval.interval > 0) {
- pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, 10000); // TODO(liuyao) get watermark from physical plan
+ pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, pTwSup->waterMark);
} else {
pInfo->pUpdateInfo = NULL;
}
- pInfo->readHandle = *pHandle;
- pInfo->tableUid = uid;
- pInfo->streamBlockReader = streamReadHandle;
- pInfo->pRes = pResBlock;
- pInfo->pCondition = pCondition;
- pInfo->pDataReader = pDataReader;
- pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
- pInfo->pOperatorDumy = pOperatorDumy;
- pInfo->interval = pSTInfo->interval;
- pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1};
-
- initCacheSupporter(&pInfo->childAggSup, 1024, "StreamFinalInterval",
- "/tmp/"); // TODO(liuyao) get row size from phy plan
+ // create the pseduo columns info
+ if (pTableScanNode->scan.pScanPseudoCols != NULL) {
+ pInfo->pPseudoExpr = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->numOfPseudoExpr);
+ }
- pOperator->name = "StreamBlockScanOperator";
+ pInfo->readHandle = *pHandle;
+ pInfo->tableUid = pScanPhyNode->uid;
+ pInfo->streamBlockReader = pHandle->reader;
+ pInfo->pRes = createResDataBlock(pDescNode);
+ pInfo->pCondition = pScanPhyNode->node.pConditions;
+ pInfo->pDataReader = pDataReader;
+ pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
+ pInfo->pOperatorDumy = pTableScanDummy;
+ pInfo->interval = pSTInfo->interval;
+ pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1};
+
+ pOperator->name = "StreamBlockScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
- pOperator->blocking = false;
- pOperator->status = OP_NOT_OPENED;
- pOperator->info = pInfo;
- pOperator->numOfExprs = pResBlock->info.numOfCols;
- pOperator->pTaskInfo = pTaskInfo;
-
- pOperator->fpSet =
- createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, NULL, operatorDummyCloseFn, NULL, NULL, NULL);
+ pOperator->blocking = false;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->info = pInfo;
+ pOperator->numOfExprs = pInfo->pRes->info.numOfCols;
+ pOperator->pTaskInfo = pTaskInfo;
+
+ pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL,
+ NULL, operatorDummyCloseFn, NULL, NULL, NULL);
return pOperator;
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index 5d0a0a0270da9a54508be98f11a146d18201849b..829968d37f9a8a97cf1f256b493035ad0129f71a 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -622,18 +622,103 @@ static void saveDataBlockLastRow(char** pRow, SArray* pDataBlock, int32_t rowInd
}
}
-static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock,
- uint64_t tableGroupId) {
+typedef int64_t (*__get_value_fn_t)(void* data, int32_t index);
+
+int32_t binarySearch(void* keyList, int num, TSKEY key, int order,
+ __get_value_fn_t getValuefn) {
+ int firstPos = 0, lastPos = num - 1, midPos = -1;
+ int numOfRows = 0;
+
+ if (num <= 0) return -1;
+ if (order == TSDB_ORDER_DESC) {
+ // find the first position which is smaller or equal than the key
+ while (1) {
+ if (key >= getValuefn(keyList, lastPos)) return lastPos;
+ if (key == getValuefn(keyList, firstPos)) return firstPos;
+ if (key < getValuefn(keyList, firstPos)) return firstPos - 1;
+
+ numOfRows = lastPos - firstPos + 1;
+ midPos = (numOfRows >> 1) + firstPos;
+
+ if (key < getValuefn(keyList, midPos)) {
+ lastPos = midPos - 1;
+ } else if (key > getValuefn(keyList, midPos)) {
+ firstPos = midPos + 1;
+ } else {
+ break;
+ }
+ }
+
+ } else {
+ // find the first position which is bigger or equal than the key
+ while (1) {
+ if (key <= getValuefn(keyList, firstPos)) return firstPos;
+ if (key == getValuefn(keyList, lastPos)) return lastPos;
+
+ if (key > getValuefn(keyList, lastPos)) {
+ lastPos = lastPos + 1;
+ if (lastPos >= num)
+ return -1;
+ else
+ return lastPos;
+ }
+
+ numOfRows = lastPos - firstPos + 1;
+ midPos = (numOfRows >> 1) + firstPos;
+
+ if (key < getValuefn(keyList, midPos)) {
+ lastPos = midPos - 1;
+ } else if (key > getValuefn(keyList, midPos)) {
+ firstPos = midPos + 1;
+ } else {
+ break;
+ }
+ }
+ }
+
+ return midPos;
+}
+
+int64_t getReskey(void* data, int32_t index) {
+ SArray* res = (SArray*) data;
+ SResKeyPos* pos = taosArrayGetP(res, index);
+ return *(int64_t*)pos->key;
+}
+
+static int32_t saveResult(SResultRow* result, uint64_t groupId, SArray* pUpdated) {
+ int32_t size = taosArrayGetSize(pUpdated);
+ int32_t index = binarySearch(pUpdated, size, result->win.skey, TSDB_ORDER_DESC, getReskey);
+ if (index == -1) {
+ index = 0;
+ } else {
+ TSKEY resTs = getReskey(pUpdated, index);
+ if (resTs < result->win.skey) {
+ index++;
+ } else {
+ return TSDB_CODE_SUCCESS;
+ }
+ }
+
+ SResKeyPos* newPos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
+ if (newPos == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ newPos->groupId = groupId;
+ newPos->pos = (SResultRowPosition){.pageId = result->pageId, .offset = result->offset};
+ *(int64_t*)newPos->key = result->win.skey;
+ if (taosArrayInsert(pUpdated, index, &newPos) == NULL ){
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock,
+ uint64_t tableGroupId, SArray* pUpdated) {
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info;
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
int32_t numOfOutput = pOperatorInfo->numOfExprs;
- SArray* pUpdated = NULL;
- if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
- pUpdated = taosArrayInit(4, POINTER_BYTES);
- }
-
int32_t step = 1;
bool ascScan = (pInfo->order == TSDB_ORDER_ASC);
@@ -663,13 +748,10 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
- if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
- SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
- pos->groupId = tableGroupId;
- pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
- *(int64_t*)pos->key = pResult->win.skey;
-
- taosArrayPush(pUpdated, &pos);
+ if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM &&
+ (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE ||
+ pInfo->twAggSup.calTrigger == 0) ) {
+ saveResult(pResult, tableGroupId, pUpdated);
}
int32_t forwardStep = 0;
@@ -742,13 +824,10 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
- if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
- SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
- pos->groupId = tableGroupId;
- pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
- *(int64_t*)pos->key = pResult->win.skey;
-
- taosArrayPush(pUpdated, &pos);
+ if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM &&
+ (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE ||
+ pInfo->twAggSup.calTrigger == 0) ) {
+ saveResult(pResult, tableGroupId, pUpdated);
}
ekey = ascScan? nextWin.ekey:nextWin.skey;
@@ -769,7 +848,6 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
saveDataBlockLastRow(pInfo->pRow, pBlock->pDataBlock, rowIndex, pBlock->info.numOfCols);
}
- return pUpdated;
// updateResultRowInfoActiveIndex(pResultRowInfo, &pInfo->win, pRuntimeEnv->current->lastKey, true, false);
}
@@ -799,7 +877,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
STableQueryInfo* pTableQueryInfo = pInfo->pCurrent;
setIntervalQueryRange(pTableQueryInfo, pBlock->info.window.skey, &pTaskInfo->window);
- hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId);
+ hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId, NULL);
#if 0 // test for encode/decode result info
if(pOperator->encodeResultRow){
@@ -1067,7 +1145,7 @@ void doClearWindow(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, char* pData,
}
static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo,
- SInterval* pIntrerval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock,
+ SInterval* pInterval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock,
SArray* pUpWins) {
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex);
TSKEY *tsCols = (TSKEY*)pColDataInfo->pData;
@@ -1075,8 +1153,8 @@ static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo,
for (int32_t i = 0; i < pBlock->info.rows; i += step) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
- STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pIntrerval,
- pIntrerval->precision, NULL);
+ STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pInterval,
+ pInterval->precision, NULL);
step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i,
win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
doClearWindow(pSup, pBinfo, (char*)&win.skey, sizeof(TKEY), pBlock->info.groupId, numOfOutput);
@@ -1086,6 +1164,39 @@ static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo,
}
}
+static int32_t closeIntervalWindow(SHashObj *pHashMap, STimeWindowAggSupp *pSup,
+ SInterval* pInterval, SArray* closeWins) {
+ void *pIte = NULL;
+ size_t keyLen = 0;
+ while((pIte = taosHashIterate(pHashMap, pIte)) != NULL) {
+ void* key = taosHashGetKey(pIte, &keyLen);
+ uint64_t groupId = *(uint64_t*) key;
+ ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY)));
+ TSKEY ts = *(uint64_t*) ((char*)key + sizeof(uint64_t));
+ SResultRowInfo dumyInfo;
+ dumyInfo.cur.pageId = -1;
+ STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, ts, pInterval,
+ pInterval->precision, NULL);
+ if (win.ekey < pSup->maxTs - pSup->waterMark) {
+ char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))];
+ SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId);
+ taosHashRemove(pHashMap, keyBuf, keyLen);
+ SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
+ if (pos == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pos->groupId = groupId;
+ pos->pos = *(SResultRowPosition*) pIte;
+ *(int64_t*)pos->key = ts;
+ if (!taosArrayPush(closeWins, &pos)) {
+ taosMemoryFree(pos);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
SIntervalAggOperatorInfo* pInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@@ -1106,7 +1217,9 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
SOperatorInfo* downstream = pOperator->pDownstream[0];
- SArray* pUpdated = NULL;
+ SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
+ SArray* pClosed = taosArrayInit(4, POINTER_BYTES);
+
while (1) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
@@ -1128,10 +1241,19 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
continue;
}
- pUpdated = hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId);
+ hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId, pUpdated);
+ pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
}
-
- finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset);
+ closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup,
+ &pInfo->interval, pClosed);
+ finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pClosed,
+ pInfo->binfo.rowCellInfoOffset);
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) {
+ taosArrayAddAll(pUpdated, pClosed);
+ }
+ taosArrayDestroy(pClosed);
+ finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated,
+ pInfo->binfo.rowCellInfoOffset);
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
@@ -1935,63 +2057,6 @@ _error:
return NULL;
}
-typedef int64_t (*__get_value_fn_t)(void* data, int32_t index);
-
-int32_t binarySearch(void* keyList, int num, TSKEY key, int order,
- __get_value_fn_t getValuefn) {
- int firstPos = 0, lastPos = num - 1, midPos = -1;
- int numOfRows = 0;
-
- if (num <= 0) return -1;
- if (order == TSDB_ORDER_DESC) {
- // find the first position which is smaller than the key
- while (1) {
- if (key >= getValuefn(keyList, lastPos)) return lastPos;
- if (key == getValuefn(keyList, firstPos)) return firstPos;
- if (key < getValuefn(keyList, firstPos)) return firstPos - 1;
-
- numOfRows = lastPos - firstPos + 1;
- midPos = (numOfRows >> 1) + firstPos;
-
- if (key < getValuefn(keyList, midPos)) {
- lastPos = midPos - 1;
- } else if (key > getValuefn(keyList, midPos)) {
- firstPos = midPos + 1;
- } else {
- break;
- }
- }
-
- } else {
- // find the first position which is bigger than the key
- while (1) {
- if (key <= getValuefn(keyList, firstPos)) return firstPos;
- if (key == getValuefn(keyList, lastPos)) return lastPos;
-
- if (key > getValuefn(keyList, lastPos)) {
- lastPos = lastPos + 1;
- if (lastPos >= num)
- return -1;
- else
- return lastPos;
- }
-
- numOfRows = lastPos - firstPos + 1;
- midPos = (numOfRows >> 1) + firstPos;
-
- if (key < getValuefn(keyList, midPos)) {
- lastPos = midPos - 1;
- } else if (key > getValuefn(keyList, midPos)) {
- firstPos = midPos + 1;
- } else {
- break;
- }
- }
- }
-
- return midPos;
-}
-
int64_t getSessionWindowEndkey(void* data, int32_t index) {
SArray* pWinInfos = (SArray*) data;
SResultWindowInfo* pWin = taosArrayGet(pWinInfos, index);
@@ -2223,12 +2288,14 @@ static void doStreamSessionWindowAggImpl(SOperatorInfo* pOperator,
if (winNum > 0) {
compactTimeWindow(pInfo, winIndex, winNum, groupId, numOfOutput, pTaskInfo, pStUpdated, pStDeleted);
}
-
- code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &(pCurWin->win.skey), sizeof(TSKEY));
- if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ pCurWin->isClosed = false;
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
+ code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &(pCurWin->win.skey), sizeof(TSKEY));
+ if (code != TSDB_CODE_SUCCESS) {
+ longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+ pCurWin->isOutput = true;
}
- pCurWin->isOutput = true;
i += winRows;
}
}
@@ -2325,6 +2392,37 @@ bool isFinalSession(SStreamSessionAggOperatorInfo* pInfo) {
return pInfo->pChildren != NULL;
}
+int32_t closeSessionWindow(SArray *pWins, STimeWindowAggSupp *pTwSup, SArray *pClosed,
+ int8_t calTrigger) {
+ // Todo(liuyao) save window to tdb
+ int32_t size = taosArrayGetSize(pWins);
+ for (int32_t i = 0; i < size; i++) {
+ SResultWindowInfo *pSeWin = taosArrayGet(pWins, i);
+ if (pSeWin->win.ekey < pTwSup->maxTs - pTwSup->waterMark) {
+ if (!pSeWin->isClosed) {
+ SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
+ if (pos == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pos->groupId = 0;
+ pos->pos = pSeWin->pos;
+ *(int64_t*)pos->key = pSeWin->win.ekey;
+ if (!taosArrayPush(pClosed, &pos)) {
+ taosMemoryFree(pos);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pSeWin->isClosed = true;
+ if (calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) {
+ pSeWin->isOutput = true;
+ }
+ }
+ continue;
+ }
+ break;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) {
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
@@ -2377,13 +2475,21 @@ static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) {
doStreamSessionWindowAggImpl(pOperator, pBlock, NULL, NULL);
}
doStreamSessionWindowAggImpl(pOperator, pBlock, pStUpdated, pInfo->pStDeleted);
+ pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
}
-
// restore the value
pOperator->status = OP_RES_TO_RETURN;
+
+ SArray* pClosed = taosArrayInit(16, POINTER_BYTES);
+ closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pClosed,
+ pInfo->twAggSup.calTrigger);
SArray* pUpdated = taosArrayInit(16, POINTER_BYTES);
copyUpdateResult(pStUpdated, pUpdated, pBInfo->pRes->info.groupId);
taosHashCleanup(pStUpdated);
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) {
+ taosArrayAddAll(pUpdated, pClosed);
+ }
+
finalizeUpdatedResult(pOperator->numOfExprs, pInfo->streamAggSup.pResultBuf, pUpdated,
pInfo->binfo.rowCellInfoOffset);
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt
index 7a4cd8092205786065015252432dcb4de0a1db41..ea401e56e5c6585b93344af99280bb450137f98f 100644
--- a/source/libs/function/CMakeLists.txt
+++ b/source/libs/function/CMakeLists.txt
@@ -14,7 +14,7 @@ target_include_directories(
target_link_libraries(
function
- PRIVATE os util common nodes scalar catalog qcom transport
+ PRIVATE os util common nodes scalar qcom transport
PUBLIC uv_a
)
diff --git a/source/libs/function/inc/functionMgtInt.h b/source/libs/function/inc/functionMgtInt.h
index 21d277665872fc520ecea0fe6157b8338789499b..29dd0bcd90d6297ca539bad8a5c5cd78ff151d1d 100644
--- a/source/libs/function/inc/functionMgtInt.h
+++ b/source/libs/function/inc/functionMgtInt.h
@@ -44,9 +44,7 @@ extern "C" {
#define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0)
-#define FUNC_UDF_ID_START 5000
-#define FUNC_AGGREGATE_UDF_ID 5001
-#define FUNC_SCALAR_UDF_ID 5002
+#define FUNC_UDF_ID_START 5000
extern const int funcMgtUdfNum;
diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c
index 10526e1a1c6ad6e5beddc0c38ad028e16e05f92c..24a781855ac2337863a381e7d01d22159ee78937 100644
--- a/source/libs/function/src/builtins.c
+++ b/source/libs/function/src/builtins.c
@@ -746,7 +746,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
}
uint8_t resType;
- if (IS_SIGNED_NUMERIC_TYPE(colType)) {
+ if (IS_SIGNED_NUMERIC_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) {
resType = TSDB_DATA_TYPE_BIGINT;
} else {
resType = TSDB_DATA_TYPE_DOUBLE;
diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c
index 506b0eb8da98444491b2f86f0e9951b71193de75..c2b325bc928be50ac908c103bb6a14a907156b39 100644
--- a/source/libs/function/src/functionMgt.c
+++ b/source/libs/function/src/functionMgt.c
@@ -16,7 +16,6 @@
#include "functionMgt.h"
#include "builtins.h"
-#include "catalog.h"
#include "functionMgtInt.h"
#include "taos.h"
#include "taoserror.h"
@@ -65,35 +64,19 @@ static bool isSpecificClassifyFunc(int32_t funcId, uint64_t classification) {
return FUNC_MGT_TEST_MASK(funcMgtBuiltins[funcId].classification, classification);
}
-static int32_t getUdfInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc) {
- SFuncInfo funcInfo = {0};
- int32_t code = catalogGetUdfInfo(pParam->pCtg, pParam->pRpc, pParam->pMgmtEps, pFunc->functionName, &funcInfo);
- if (TSDB_CODE_SUCCESS != code) {
- return code;
- }
-
- pFunc->funcType = FUNCTION_TYPE_UDF;
- pFunc->funcId = TSDB_FUNC_TYPE_AGGREGATE == funcInfo.funcType ? FUNC_AGGREGATE_UDF_ID : FUNC_SCALAR_UDF_ID;
- pFunc->node.resType.type = funcInfo.outputType;
- pFunc->node.resType.bytes = funcInfo.outputLen;
- pFunc->udfBufSize = funcInfo.bufSize;
- tFreeSFuncInfo(&funcInfo);
- return TSDB_CODE_SUCCESS;
-}
-
int32_t fmFuncMgtInit() {
taosThreadOnce(&functionHashTableInit, doInitFunctionTable);
return initFunctionCode;
}
-int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc) {
+int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen) {
void* pVal = taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc->functionName, strlen(pFunc->functionName));
if (NULL != pVal) {
pFunc->funcId = *(int32_t*)pVal;
pFunc->funcType = funcMgtBuiltins[pFunc->funcId].type;
- return funcMgtBuiltins[pFunc->funcId].translateFunc(pFunc, pParam->pErrBuf, pParam->errBufLen);
+ return funcMgtBuiltins[pFunc->funcId].translateFunc(pFunc, pMsg, msgLen);
}
- return getUdfInfo(pParam, pFunc);
+ return TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION;
}
bool fmIsBuiltinFunc(const char* pFunc) {
diff --git a/source/libs/index/inc/indexCache.h b/source/libs/index/inc/indexCache.h
index 6cbe2532cc5b7532e011f14f76dea49437087006..1046a04db34062367fb84bef2c6b292da6b147d5 100644
--- a/source/libs/index/inc/indexCache.h
+++ b/source/libs/index/inc/indexCache.h
@@ -74,7 +74,7 @@ void indexCacheIteratorDestroy(Iterate* iiter);
int indexCachePut(void* cache, SIndexTerm* term, uint64_t uid);
// int indexCacheGet(void *cache, uint64_t *rst);
-int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* tr, STermValueType* s);
+int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* tr, STermValueType* s);
void indexCacheRef(IndexCache* cache);
void indexCacheUnRef(IndexCache* cache);
diff --git a/source/libs/index/inc/indexTfile.h b/source/libs/index/inc/indexTfile.h
index af32caa8219016cd6562423466d5f8a44eeb0229..ca55aa93da5a47bcefa26bf880d115abeb46b8c8 100644
--- a/source/libs/index/inc/indexTfile.h
+++ b/source/libs/index/inc/indexTfile.h
@@ -105,7 +105,7 @@ TFileReader* tfileGetReaderByCol(IndexTFile* tf, uint64_t suid, char* colName);
TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const char* colName);
TFileReader* tfileReaderCreate(WriterCtx* ctx);
void tfileReaderDestroy(TFileReader* reader);
-int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResult* tr);
+int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr);
void tfileReaderRef(TFileReader* reader);
void tfileReaderUnRef(TFileReader* reader);
@@ -120,7 +120,7 @@ int tfileWriterFinish(TFileWriter* tw);
IndexTFile* indexTFileCreate(const char* path);
void indexTFileDestroy(IndexTFile* tfile);
int indexTFilePut(void* tfile, SIndexTerm* term, uint64_t uid);
-int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* tr);
+int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTRslt* tr);
Iterate* tfileIteratorCreate(TFileReader* reader);
void tfileIteratorDestroy(Iterate* iterator);
diff --git a/source/libs/index/inc/indexUtil.h b/source/libs/index/inc/indexUtil.h
index f1676ed411a5e2074667816d1746dc607dc0f44d..dbaecaa9630b04b8b50f108c1a59e499f04899dc 100644
--- a/source/libs/index/inc/indexUtil.h
+++ b/source/libs/index/inc/indexUtil.h
@@ -66,7 +66,7 @@ extern "C" {
* [1, 4, 5]
* output:[4, 5]
*/
-void iIntersection(SArray *interResults, SArray *finalResult);
+void iIntersection(SArray *in, SArray *out);
/* multi sorted result union
* input: [1, 2, 4, 5]
@@ -74,7 +74,7 @@ void iIntersection(SArray *interResults, SArray *finalResult);
* [1, 4, 5]
* output:[1, 2, 3, 4, 5]
*/
-void iUnion(SArray *interResults, SArray *finalResult);
+void iUnion(SArray *in, SArray *out);
/* see example
* total: [1, 2, 4, 5, 7, 8]
@@ -92,19 +92,24 @@ typedef struct {
uint64_t data;
} SIdxVerdata;
+/*
+ * index temp result
+ *
+ */
typedef struct {
SArray *total;
- SArray *added;
- SArray *deled;
-} SIdxTempResult;
+ SArray *add;
+ SArray *del;
+} SIdxTRslt;
+
+SIdxTRslt *idxTRsltCreate();
-SIdxTempResult *sIdxTempResultCreate();
+void idxTRsltClear(SIdxTRslt *tr);
-void sIdxTempResultClear(SIdxTempResult *tr);
+void idxTRsltDestroy(SIdxTRslt *tr);
-void sIdxTempResultDestroy(SIdxTempResult *tr);
+void idxTRsltMergeTo(SIdxTRslt *tr, SArray *out);
-void sIdxTempResultMergeTo(SArray *result, SIdxTempResult *tr);
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/index/src/index.c b/source/libs/index/src/index.c
index 500f5706491b61e05deea65d567b68ecc8cb1694..8584d95bf26bc2a586e0e5842ab8c4e5b5572bbd 100644
--- a/source/libs/index/src/index.c
+++ b/source/libs/index/src/index.c
@@ -29,7 +29,7 @@
#include "lucene++/Lucene_c.h"
#endif
-#define INDEX_NUM_OF_THREADS 1
+#define INDEX_NUM_OF_THREADS 5
#define INDEX_QUEUE_SIZE 200
#define INDEX_DATA_BOOL_NULL 0x02
@@ -85,7 +85,7 @@ static int indexMergeFinalResults(SArray* interResults, EIndexOperatorType oTyp
static int indexGenTFile(SIndex* index, IndexCache* cache, SArray* batch);
// merge cache and tfile by opera type
-static void indexMergeCacheAndTFile(SArray* result, IterateValue* icache, IterateValue* iTfv, SIdxTempResult* helper);
+static void indexMergeCacheAndTFile(SArray* result, IterateValue* icache, IterateValue* iTfv, SIdxTRslt* helper);
// static int32_t indexSerialTermKey(SIndexTerm* itm, char* buf);
// int32_t indexSerialKey(ICacheKey* key, char* buf);
@@ -201,6 +201,7 @@ int indexPut(SIndex* index, SIndexMultiTerm* fVals, uint64_t uid) {
char buf[128] = {0};
ICacheKey key = {.suid = p->suid, .colName = p->colName, .nColName = strlen(p->colName), .colType = p->colType};
int32_t sz = indexSerialCacheKey(&key, buf);
+ indexDebug("suid: %" PRIu64 ", colName: %s, colType: %d", key.suid, key.colName, key.colType);
IndexCache** cache = taosHashGet(index->colObj, buf, sz);
assert(*cache != NULL);
@@ -328,6 +329,7 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result
char buf[128] = {0};
ICacheKey key = {
.suid = term->suid, .colName = term->colName, .nColName = strlen(term->colName), .colType = term->colType};
+ indexDebug("suid: %" PRIu64 ", colName: %s, colType: %d", key.suid, key.colName, key.colType);
int32_t sz = indexSerialCacheKey(&key, buf);
taosThreadMutexLock(&sIdx->mtx);
@@ -341,7 +343,7 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result
int64_t st = taosGetTimestampUs();
- SIdxTempResult* tr = sIdxTempResultCreate();
+ SIdxTRslt* tr = idxTRsltCreate();
if (0 == indexCacheSearch(cache, query, tr, &s)) {
if (s == kTypeDeletion) {
indexInfo("col: %s already drop by", term->colName);
@@ -363,12 +365,12 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result
int64_t cost = taosGetTimestampUs() - st;
indexInfo("search cost: %" PRIu64 "us", cost);
- sIdxTempResultMergeTo(*result, tr);
+ idxTRsltMergeTo(tr, *result);
- sIdxTempResultDestroy(tr);
+ idxTRsltDestroy(tr);
return 0;
END:
- sIdxTempResultDestroy(tr);
+ idxTRsltDestroy(tr);
return -1;
}
static void indexInterResultsDestroy(SArray* results) {
@@ -404,18 +406,18 @@ static int indexMergeFinalResults(SArray* interResults, EIndexOperatorType oType
return 0;
}
-static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdxTempResult* tr) {
+static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdxTRslt* tr) {
int32_t sz = taosArrayGetSize(result);
if (sz > 0) {
TFileValue* lv = taosArrayGetP(result, sz - 1);
if (tfv != NULL && strcmp(lv->colVal, tfv->colVal) != 0) {
- sIdxTempResultMergeTo(lv->tableId, tr);
- sIdxTempResultClear(tr);
+ idxTRsltMergeTo(tr, lv->tableId);
+ idxTRsltClear(tr);
taosArrayPush(result, &tfv);
} else if (tfv == NULL) {
// handle last iterator
- sIdxTempResultMergeTo(lv->tableId, tr);
+ idxTRsltMergeTo(tr, lv->tableId);
} else {
// temp result saved in help
tfileValueDestroy(tfv);
@@ -424,7 +426,7 @@ static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdx
taosArrayPush(result, &tfv);
}
}
-static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateValue* tv, SIdxTempResult* tr) {
+static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateValue* tv, SIdxTRslt* tr) {
char* colVal = (cv != NULL) ? cv->colVal : tv->colVal;
TFileValue* tfv = tfileValueCreate(colVal);
@@ -434,9 +436,9 @@ static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateVal
uint64_t id = *(uint64_t*)taosArrayGet(cv->val, 0);
uint32_t ver = cv->ver;
if (cv->type == ADD_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->deled, tr->added, id)
+ INDEX_MERGE_ADD_DEL(tr->del, tr->add, id)
} else if (cv->type == DEL_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->added, tr->deled, id)
+ INDEX_MERGE_ADD_DEL(tr->add, tr->del, id)
}
}
if (tv != NULL) {
@@ -489,7 +491,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) {
bool cn = cacheIter ? cacheIter->next(cacheIter) : false;
bool tn = tfileIter ? tfileIter->next(tfileIter) : false;
- SIdxTempResult* tr = sIdxTempResultCreate();
+ SIdxTRslt* tr = idxTRsltCreate();
while (cn == true || tn == true) {
IterateValue* cv = (cn == true) ? cacheIter->getValue(cacheIter) : NULL;
IterateValue* tv = (tn == true) ? tfileIter->getValue(tfileIter) : NULL;
@@ -515,7 +517,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) {
}
}
indexMayMergeTempToFinalResult(result, NULL, tr);
- sIdxTempResultDestroy(tr);
+ idxTRsltDestroy(tr);
int ret = indexGenTFile(sIdx, pCache, result);
indexDestroyFinalResult(result);
diff --git a/source/libs/index/src/indexCache.c b/source/libs/index/src/indexCache.c
index 6e52c4b1ba03ecd77cc4476022d61d160ae34890..3b33006452989fbe8f69155f30041d6345b1d1e0 100644
--- a/source/libs/index/src/indexCache.c
+++ b/source/libs/index/src/indexCache.c
@@ -36,32 +36,31 @@ static char* indexCacheTermGet(const void* pData);
static MemTable* indexInternalCacheCreate(int8_t type);
-static int32_t cacheSearchTerm(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchPrefix(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchSuffix(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchRegex(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchLessThan(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchRange(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
+static int32_t cacheSearchTerm(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchPrefix(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchSuffix(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchRegex(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchLessThan(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchRange(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
/*comm func of compare, used in (LE/LT/GE/GT compare)*/
-static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s,
- RangeType type);
-static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s);
-
-static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s,
+static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s, RangeType type);
+static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s);
+
+static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s,
RangeType type);
-static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s) = {
+static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s) = {
{cacheSearchTerm, cacheSearchPrefix, cacheSearchSuffix, cacheSearchRegex, cacheSearchLessThan, cacheSearchLessEqual,
cacheSearchGreaterThan, cacheSearchGreaterEqual, cacheSearchRange},
{cacheSearchTerm_JSON, cacheSearchPrefix_JSON, cacheSearchSuffix_JSON, cacheSearchRegex_JSON,
@@ -71,7 +70,7 @@ static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTemp
static void doMergeWork(SSchedMsg* msg);
static bool indexCacheIteratorNext(Iterate* itera);
-static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
if (cache == NULL) {
return 0;
}
@@ -93,11 +92,11 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr
CacheTerm* c = (CacheTerm*)SL_GET_NODE_DATA(node);
if (0 == strcmp(c->colVal, pCt->colVal)) {
if (c->operaType == ADD_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid)
// taosArrayPush(result, &c->uid);
*s = kTypeValue;
} else if (c->operaType == DEL_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid)
}
} else {
break;
@@ -108,20 +107,19 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr
tSkipListDestroyIter(iter);
return 0;
}
-static int32_t cacheSearchPrefix(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchPrefix(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
// impl later
return 0;
}
-static int32_t cacheSearchSuffix(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchSuffix(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
// impl later
return 0;
}
-static int32_t cacheSearchRegex(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchRegex(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
// impl later
return 0;
}
-static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s,
- RangeType type) {
+static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type) {
if (cache == NULL) {
return 0;
}
@@ -133,6 +131,7 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes
CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm));
pCt->colVal = term->colVal;
+ pCt->colType = term->colType;
pCt->version = atomic_load_64(&pCache->version);
char* key = indexCacheTermGet(pCt);
@@ -147,11 +146,11 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes
TExeCond cond = cmpFn(c->colVal, pCt->colVal, pCt->colType);
if (cond == MATCH) {
if (c->operaType == ADD_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid)
// taosArrayPush(result, &c->uid);
*s = kTypeValue;
} else if (c->operaType == DEL_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid)
}
} else if (cond == CONTINUE) {
continue;
@@ -163,20 +162,20 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes
tSkipListDestroyIter(iter);
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchLessThan(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchLessThan(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc(cache, term, tr, s, LT);
}
-static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc(cache, term, tr, s, LE);
}
-static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc(cache, term, tr, s, GT);
}
-static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc(cache, term, tr, s, GE);
}
-static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
if (cache == NULL) {
return 0;
}
@@ -204,11 +203,11 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul
if (0 == strcmp(c->colVal, pCt->colVal)) {
if (c->operaType == ADD_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid)
// taosArrayPush(result, &c->uid);
*s = kTypeValue;
} else if (c->operaType == DEL_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid)
}
} else {
break;
@@ -222,32 +221,32 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc_JSON(cache, term, tr, s, LT);
}
-static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc_JSON(cache, term, tr, s, LE);
}
-static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc_JSON(cache, term, tr, s, GT);
}
-static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return cacheSearchCompareFunc_JSON(cache, term, tr, s, GE);
}
-static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s,
+static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s,
RangeType type) {
if (cache == NULL) {
return 0;
@@ -289,11 +288,11 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe
TExeCond cond = cmpFn(p + skip, term->colVal, dType);
if (cond == MATCH) {
if (c->operaType == ADD_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid)
// taosArrayPush(result, &c->uid);
*s = kTypeValue;
} else if (c->operaType == DEL_VALUE) {
- INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid)
+ INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid)
}
} else if (cond == CONTINUE) {
continue;
@@ -309,7 +308,7 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe
return TSDB_CODE_SUCCESS;
}
-static int32_t cacheSearchRange(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) {
+static int32_t cacheSearchRange(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) {
// impl later
return 0;
}
@@ -568,7 +567,7 @@ int indexCacheDel(void* cache, const char* fieldValue, int32_t fvlen, uint64_t u
return 0;
}
-static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTempResult* tr, STermValueType* s) {
+static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTRslt* tr, STermValueType* s) {
if (mem == NULL) {
return 0;
}
@@ -582,7 +581,7 @@ static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTempResu
return cacheSearch[0][qtype](mem, term, tr, s);
}
}
-int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result, STermValueType* s) {
+int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* result, STermValueType* s) {
int64_t st = taosGetTimestampUs();
if (cache == NULL) {
return 0;
@@ -597,10 +596,10 @@ int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result
indexMemRef(imm);
taosThreadMutexUnlock(&pCache->mtx);
- int ret = indexQueryMem(mem, query, result, s);
+ int ret = (mem && mem->mem) ? indexQueryMem(mem, query, result, s) : 0;
if (ret == 0 && *s != kTypeDeletion) {
// continue search in imm
- ret = indexQueryMem(imm, query, result, s);
+ ret = (imm && imm->mem) ? indexQueryMem(imm, query, result, s) : 0;
}
indexMemUnRef(mem);
@@ -709,7 +708,7 @@ static int32_t indexCacheJsonTermCompare(const void* l, const void* r) {
return cmp;
}
static MemTable* indexInternalCacheCreate(int8_t type) {
- int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : type;
+ int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : TSDB_DATA_TYPE_BINARY;
int32_t (*cmpFn)(const void* l, const void* r) =
INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? indexCacheJsonTermCompare : indexCacheTermCompare;
diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c
index 0273867ccf040f3d3344066270ef3b8aa6a3bae2..b882caa168a3b89dcd037ee34eefa2f8b82bd904 100644
--- a/source/libs/index/src/indexFilter.c
+++ b/source/libs/index/src/indexFilter.c
@@ -37,12 +37,15 @@ typedef struct SIFParam {
int64_t suid; // add later
char dbName[TSDB_DB_NAME_LEN];
char colName[TSDB_COL_NAME_LEN];
+
+ SIndexMetaArg arg;
} SIFParam;
typedef struct SIFCtx {
- int32_t code;
- SHashObj *pRes; /* element is SIFParam */
- bool noExec; // true: just iterate condition tree, and add hint to executor plan
+ int32_t code;
+ SHashObj * pRes; /* element is SIFParam */
+ bool noExec; // true: just iterate condition tree, and add hint to executor plan
+ SIndexMetaArg arg;
// SIdxFltStatus st;
} SIFCtx;
@@ -257,7 +260,9 @@ static int32_t sifExecFunction(SFunctionNode *node, SIFCtx *ctx, SIFParam *outpu
return TSDB_CODE_QRY_INVALID_INPUT;
}
static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFParam *output) {
- SIndexTerm *tm = indexTermCreate(left->suid, DEFAULT, left->colValType, left->colName, strlen(left->colName),
+#ifdef USE_INVERTED_INDEX
+ SIndexMetaArg *arg = &output->arg;
+ SIndexTerm * tm = indexTermCreate(arg->suid, DEFAULT, left->colValType, left->colName, strlen(left->colName),
right->condValue, strlen(right->condValue));
if (tm == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
@@ -268,9 +273,13 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP
SIndexMultiTermQuery *mtm = indexMultiTermQueryCreate(MUST);
indexMultiTermQueryAdd(mtm, tm, qtype);
- int ret = indexSearch(NULL, mtm, output->result);
+ int ret = indexSearch(arg->metaHandle, mtm, output->result);
+ indexDebug("index filter data size: %d", (int)taosArrayGetSize(output->result));
indexMultiTermQueryDestroy(mtm);
return ret;
+#else
+ return 0;
+#endif
}
static int32_t sifLessThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
@@ -372,6 +381,8 @@ static int32_t sifExecOper(SOperatorNode *node, SIFCtx *ctx, SIFParam *output) {
SIFParam *params = NULL;
SIF_ERR_RET(sifInitOperParams(¶ms, node, ctx));
+ // ugly code, refactor later
+ output->arg = ctx->arg;
sif_func_t operFn = sifGetOperFn(node->opType);
if (ctx->noExec && operFn == NULL) {
output->status = SFLT_NOT_INDEX;
@@ -423,7 +434,7 @@ _return:
static EDealRes sifWalkFunction(SNode *pNode, void *context) {
SFunctionNode *node = (SFunctionNode *)pNode;
- SIFParam output = {0};
+ SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))};
SIFCtx *ctx = context;
ctx->code = sifExecFunction(node, ctx, &output);
@@ -439,7 +450,8 @@ static EDealRes sifWalkFunction(SNode *pNode, void *context) {
}
static EDealRes sifWalkLogic(SNode *pNode, void *context) {
SLogicConditionNode *node = (SLogicConditionNode *)pNode;
- SIFParam output = {0};
+
+ SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))};
SIFCtx *ctx = context;
ctx->code = sifExecLogic(node, ctx, &output);
@@ -455,7 +467,7 @@ static EDealRes sifWalkLogic(SNode *pNode, void *context) {
}
static EDealRes sifWalkOper(SNode *pNode, void *context) {
SOperatorNode *node = (SOperatorNode *)pNode;
- SIFParam output = {0};
+ SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))};
SIFCtx *ctx = context;
ctx->code = sifExecOper(node, ctx, &output);
@@ -507,8 +519,9 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
int32_t code = 0;
- SIFCtx ctx = {.code = 0, .noExec = false};
+ SIFCtx ctx = {.code = 0, .noExec = false, .arg = pDst->arg};
ctx.pRes = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+
if (NULL == ctx.pRes) {
indexError("index-filter failed to taosHashInit");
return TSDB_CODE_QRY_OUT_OF_MEMORY;
@@ -523,7 +536,9 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) {
indexError("no valid res in hash, node:(%p), type(%d)", (void *)&pNode, nodeType(pNode));
SIF_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
- taosArrayAddAll(pDst->result, res->result);
+ if (res->result != NULL) {
+ taosArrayAddAll(pDst->result, res->result);
+ }
sifFreeParam(res);
taosHashRemove(ctx.pRes, (void *)&pNode, POINTER_BYTES);
@@ -561,7 +576,7 @@ static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status) {
SIF_RET(code);
}
-int32_t doFilterTag(const SNode *pFilterNode, SArray *result) {
+int32_t doFilterTag(const SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result) {
if (pFilterNode == NULL) {
return TSDB_CODE_SUCCESS;
}
@@ -570,10 +585,12 @@ int32_t doFilterTag(const SNode *pFilterNode, SArray *result) {
// todo move to the initialization function
// SIF_ERR_RET(filterInitFromNode((SNode *)pFilterNode, &filter, 0));
- SIFParam param = {0};
+ SArray * output = taosArrayInit(8, sizeof(uint64_t));
+ SIFParam param = {.arg = *metaArg, .result = output};
SIF_ERR_RET(sifCalculate((SNode *)pFilterNode, ¶m));
taosArrayAddAll(result, param.result);
+ // taosArrayAddAll(result, param.result);
sifFreeParam(¶m);
SIF_RET(TSDB_CODE_SUCCESS);
}
diff --git a/source/libs/index/src/indexFst.c b/source/libs/index/src/indexFst.c
index 335b0865269604432259847de072a53854286c2c..892716f38708fed46bc755548436f2477d1e91e5 100644
--- a/source/libs/index/src/indexFst.c
+++ b/source/libs/index/src/indexFst.c
@@ -1324,7 +1324,7 @@ StreamWithStateResult* streamWithStateNextWith(StreamWithState* sws, StreamCallb
if (FST_NODE_ADDR(p->node) != fstGetRootAddr(sws->fst)) {
taosArrayPop(sws->inp);
}
- // streamStateDestroy(p);
+ streamStateDestroy(p);
continue;
}
FstTransition trn;
diff --git a/source/libs/index/src/indexFstUtil.c b/source/libs/index/src/indexFstUtil.c
index a980c6b740ab4f5b0e128479de342ce84c159c3c..5760b24900ef47e6a52419ade3d91cee9870709a 100644
--- a/source/libs/index/src/indexFstUtil.c
+++ b/source/libs/index/src/indexFstUtil.c
@@ -93,14 +93,15 @@ FstSlice fstSliceCreate(uint8_t* data, uint64_t len) {
// just shallow copy
FstSlice fstSliceCopy(FstSlice* s, int32_t start, int32_t end) {
FstString* str = s->str;
- str->ref++;
+ atomic_add_fetch_32(&str->ref, 1);
FstSlice t = {.str = str, .start = start + s->start, .end = end + s->start};
return t;
}
FstSlice fstSliceDeepCopy(FstSlice* s, int32_t start, int32_t end) {
- int32_t tlen = end - start + 1;
- int32_t slen;
+ int32_t tlen = end - start + 1;
+ int32_t slen;
+
uint8_t* data = fstSliceData(s, &slen);
assert(tlen <= slen);
@@ -129,8 +130,9 @@ uint8_t* fstSliceData(FstSlice* s, int32_t* size) {
}
void fstSliceDestroy(FstSlice* s) {
FstString* str = s->str;
- str->ref--;
- if (str->ref == 0) {
+
+ int32_t ref = atomic_sub_fetch_32(&str->ref, 1);
+ if (ref == 0) {
taosMemoryFree(str->data);
taosMemoryFree(str);
s->str = NULL;
diff --git a/source/libs/index/src/indexTfile.c b/source/libs/index/src/indexTfile.c
index 3de556e8b50c27f11687ea6b45fcf5da9675fed3..53dd2923ac8c1f07b62098a3663c030016b46a72 100644
--- a/source/libs/index/src/indexTfile.c
+++ b/source/libs/index/src/indexTfile.c
@@ -60,31 +60,31 @@ static void tfileGenFileFullName(char* fullname, const char* path, uint64_t s
/*
* search from tfile
*/
-static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-
-static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype);
-
-static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr);
-
-static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype);
-
-static int32_t (*tfSearch[][QUERY_MAX])(void* reader, SIndexTerm* tem, SIdxTempResult* tr) = {
+static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+
+static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype);
+
+static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr);
+
+static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype);
+
+static int32_t (*tfSearch[][QUERY_MAX])(void* reader, SIndexTerm* tem, SIdxTRslt* tr) = {
{tfSearchTerm, tfSearchPrefix, tfSearchSuffix, tfSearchRegex, tfSearchLessThan, tfSearchLessEqual,
tfSearchGreaterThan, tfSearchGreaterEqual, tfSearchRange},
{tfSearchTerm_JSON, tfSearchPrefix_JSON, tfSearchSuffix_JSON, tfSearchRegex_JSON, tfSearchLessThan_JSON,
@@ -211,16 +211,16 @@ void tfileReaderDestroy(TFileReader* reader) {
}
// T_REF_INC(reader);
fstDestroy(reader->fst);
- writerCtxDestroy(reader->ctx, reader->remove);
if (reader->remove) {
indexInfo("%s is removed", reader->ctx->file.buf);
} else {
indexInfo("%s is not removed", reader->ctx->file.buf);
}
+ writerCtxDestroy(reader->ctx, reader->remove);
taosMemoryFree(reader);
}
-static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
int ret = 0;
char* p = tem->colVal;
uint64_t sz = tem->nColVal;
@@ -243,7 +243,7 @@ static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
return 0;
}
-static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON);
char* p = tem->colVal;
uint64_t sz = tem->nColVal;
@@ -279,7 +279,7 @@ static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr)
}
return 0;
}
-static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON);
int ret = 0;
@@ -298,7 +298,7 @@ static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr)
fstSliceDestroy(&key);
return 0;
}
-static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON);
int ret = 0;
@@ -319,7 +319,7 @@ static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr)
return 0;
}
-static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType type) {
+static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType type) {
int ret = 0;
char* p = tem->colVal;
int skip = 0;
@@ -358,19 +358,19 @@ static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult
fstStreamBuilderDestroy(sb);
return TSDB_CODE_SUCCESS;
}
-static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc(reader, tem, tr, LT);
}
-static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc(reader, tem, tr, LE);
}
-static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc(reader, tem, tr, GT);
}
-static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc(reader, tem, tr, GE);
}
-static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON);
int ret = 0;
char* p = tem->colVal;
@@ -399,7 +399,7 @@ static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr)
fstSliceDestroy(&key);
return 0;
}
-static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
int ret = 0;
char* p = indexPackJsonData(tem);
int sz = strlen(p);
@@ -424,36 +424,36 @@ static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult*
// deprecate api
return TSDB_CODE_SUCCESS;
}
-static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
// impl later
return TSDB_CODE_SUCCESS;
}
-static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
// impl later
return TSDB_CODE_SUCCESS;
}
-static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
// impl later
return TSDB_CODE_SUCCESS;
}
-static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc_JSON(reader, tem, tr, LT);
}
-static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc_JSON(reader, tem, tr, LE);
}
-static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc_JSON(reader, tem, tr, GT);
}
-static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
return tfSearchCompareFunc_JSON(reader, tem, tr, GE);
}
-static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) {
+static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
// impl later
return TSDB_CODE_SUCCESS;
}
-static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype) {
+static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype) {
int ret = 0;
int skip = 0;
@@ -501,7 +501,7 @@ static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempR
fstStreamBuilderDestroy(sb);
return TSDB_CODE_SUCCESS;
}
-int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResult* tr) {
+int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr) {
SIndexTerm* term = query->term;
EIndexQueryType qtype = query->qType;
int ret = 0;
@@ -673,7 +673,7 @@ void indexTFileDestroy(IndexTFile* tfile) {
taosMemoryFree(tfile);
}
-int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* result) {
+int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTRslt* result) {
int ret = -1;
if (tfile == NULL) {
return ret;
diff --git a/source/libs/index/src/indexUtil.c b/source/libs/index/src/indexUtil.c
index a618787fd49c96b729e782b4a01a5374c76639be..1d2027889572fcd809e378dcae13560b0bae51c1 100644
--- a/source/libs/index/src/indexUtil.c
+++ b/source/libs/index/src/indexUtil.c
@@ -36,24 +36,24 @@ static int iBinarySearch(SArray *arr, int s, int e, uint64_t k) {
return s;
}
-void iIntersection(SArray *inters, SArray *final) {
- int32_t sz = (int32_t)taosArrayGetSize(inters);
+void iIntersection(SArray *in, SArray *out) {
+ int32_t sz = (int32_t)taosArrayGetSize(in);
if (sz <= 0) {
return;
}
MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex));
for (int i = 0; i < sz; i++) {
- SArray *t = taosArrayGetP(inters, i);
+ SArray *t = taosArrayGetP(in, i);
mi[i].len = (int32_t)taosArrayGetSize(t);
mi[i].idx = 0;
}
- SArray *base = taosArrayGetP(inters, 0);
+ SArray *base = taosArrayGetP(in, 0);
for (int i = 0; i < taosArrayGetSize(base); i++) {
uint64_t tgt = *(uint64_t *)taosArrayGet(base, i);
bool has = true;
- for (int j = 1; j < taosArrayGetSize(inters); j++) {
- SArray *oth = taosArrayGetP(inters, j);
+ for (int j = 1; j < taosArrayGetSize(in); j++) {
+ SArray *oth = taosArrayGetP(in, j);
int mid = iBinarySearch(oth, mi[j].idx, mi[j].len - 1, tgt);
if (mid >= 0 && mid < mi[j].len) {
uint64_t val = *(uint64_t *)taosArrayGet(oth, mid);
@@ -64,33 +64,33 @@ void iIntersection(SArray *inters, SArray *final) {
}
}
if (has == true) {
- taosArrayPush(final, &tgt);
+ taosArrayPush(out, &tgt);
}
}
taosMemoryFreeClear(mi);
}
-void iUnion(SArray *inters, SArray *final) {
- int32_t sz = (int32_t)taosArrayGetSize(inters);
+void iUnion(SArray *in, SArray *out) {
+ int32_t sz = (int32_t)taosArrayGetSize(in);
if (sz <= 0) {
return;
}
if (sz == 1) {
- taosArrayAddAll(final, taosArrayGetP(inters, 0));
+ taosArrayAddAll(out, taosArrayGetP(in, 0));
return;
}
MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex));
for (int i = 0; i < sz; i++) {
- SArray *t = taosArrayGetP(inters, i);
+ SArray *t = taosArrayGetP(in, i);
mi[i].len = (int32_t)taosArrayGetSize(t);
mi[i].idx = 0;
}
while (1) {
- uint64_t mVal = UINT_MAX;
+ uint64_t mVal = UINT64_MAX;
int mIdx = -1;
for (int j = 0; j < sz; j++) {
- SArray *t = taosArrayGetP(inters, j);
+ SArray *t = taosArrayGetP(in, j);
if (mi[j].idx >= mi[j].len) {
continue;
}
@@ -102,13 +102,13 @@ void iUnion(SArray *inters, SArray *final) {
}
if (mIdx != -1) {
mi[mIdx].idx++;
- if (taosArrayGetSize(final) > 0) {
- uint64_t lVal = *(uint64_t *)taosArrayGetLast(final);
+ if (taosArrayGetSize(out) > 0) {
+ uint64_t lVal = *(uint64_t *)taosArrayGetLast(out);
if (lVal == mVal) {
continue;
}
}
- taosArrayPush(final, &mVal);
+ taosArrayPush(out, &mVal);
} else {
break;
}
@@ -158,41 +158,44 @@ int verdataCompare(const void *a, const void *b) {
return cmp;
}
-SIdxTempResult *sIdxTempResultCreate() {
- SIdxTempResult *tr = taosMemoryCalloc(1, sizeof(SIdxTempResult));
+SIdxTRslt *idxTRsltCreate() {
+ SIdxTRslt *tr = taosMemoryCalloc(1, sizeof(SIdxTRslt));
tr->total = taosArrayInit(4, sizeof(uint64_t));
- tr->added = taosArrayInit(4, sizeof(uint64_t));
- tr->deled = taosArrayInit(4, sizeof(uint64_t));
+ tr->add = taosArrayInit(4, sizeof(uint64_t));
+ tr->del = taosArrayInit(4, sizeof(uint64_t));
return tr;
}
-void sIdxTempResultClear(SIdxTempResult *tr) {
+void idxTRsltClear(SIdxTRslt *tr) {
if (tr == NULL) {
return;
}
taosArrayClear(tr->total);
- taosArrayClear(tr->added);
- taosArrayClear(tr->deled);
+ taosArrayClear(tr->add);
+ taosArrayClear(tr->del);
}
-void sIdxTempResultDestroy(SIdxTempResult *tr) {
+void idxTRsltDestroy(SIdxTRslt *tr) {
if (tr == NULL) {
return;
}
taosArrayDestroy(tr->total);
- taosArrayDestroy(tr->added);
- taosArrayDestroy(tr->deled);
+ taosArrayDestroy(tr->add);
+ taosArrayDestroy(tr->del);
}
-void sIdxTempResultMergeTo(SArray *result, SIdxTempResult *tr) {
+void idxTRsltMergeTo(SIdxTRslt *tr, SArray *result) {
taosArraySort(tr->total, uidCompare);
- taosArraySort(tr->added, uidCompare);
- taosArraySort(tr->deled, uidCompare);
-
- SArray *arrs = taosArrayInit(2, sizeof(void *));
- taosArrayPush(arrs, &tr->total);
- taosArrayPush(arrs, &tr->added);
-
- iUnion(arrs, result);
- taosArrayDestroy(arrs);
-
- iExcept(result, tr->deled);
+ taosArraySort(tr->add, uidCompare);
+ taosArraySort(tr->del, uidCompare);
+
+ if (taosArrayGetSize(tr->total) == 0 || taosArrayGetSize(tr->add) == 0) {
+ SArray *t = taosArrayGetSize(tr->total) == 0 ? tr->add : tr->total;
+ taosArrayAddAll(result, t);
+ } else {
+ SArray *arrs = taosArrayInit(2, sizeof(void *));
+ taosArrayPush(arrs, &tr->total);
+ taosArrayPush(arrs, &tr->add);
+ iUnion(arrs, result);
+ taosArrayDestroy(arrs);
+ }
+ iExcept(result, tr->del);
}
diff --git a/source/libs/index/test/CMakeLists.txt b/source/libs/index/test/CMakeLists.txt
index c0b47e74c6b0561141806dae8ce14ab4d632ec8e..2835084a81b87e358916c20ce0e6c70cf6884021 100644
--- a/source/libs/index/test/CMakeLists.txt
+++ b/source/libs/index/test/CMakeLists.txt
@@ -1,74 +1,74 @@
-add_executable(indexTest "")
-add_executable(fstTest "")
-add_executable(fstUT "")
-add_executable(UtilUT "")
-add_executable(jsonUT "")
+add_executable(idxTest "")
+add_executable(idxFstTest "")
+add_executable(idxFstUT "")
+add_executable(idxUtilUT "")
+add_executable(idxJsonUT "")
-target_sources(indexTest
+target_sources(idxTest
PRIVATE
"indexTests.cc"
)
-target_sources(fstTest
+target_sources(idxFstTest
PRIVATE
"fstTest.cc"
)
-target_sources(fstUT
+target_sources(idxFstUT
PRIVATE
"fstUT.cc"
)
-target_sources(UtilUT
+target_sources(idxUtilUT
PRIVATE
"utilUT.cc"
)
-target_sources(jsonUT
+target_sources(idxJsonUT
PRIVATE
"jsonUT.cc"
)
-target_include_directories ( indexTest
+target_include_directories (idxTest
PUBLIC
"${TD_SOURCE_DIR}/include/libs/index"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
-target_include_directories ( fstTest
+target_include_directories (idxFstTest
PUBLIC
"${TD_SOURCE_DIR}/include/libs/index"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
-target_include_directories ( fstUT
+target_include_directories (idxFstUT
PUBLIC
"${TD_SOURCE_DIR}/include/libs/index"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
-target_include_directories ( UtilUT
+target_include_directories (idxUtilUT
PUBLIC
"${TD_SOURCE_DIR}/include/libs/index"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
-target_include_directories (jsonUT
+target_include_directories (idxJsonUT
PUBLIC
"${TD_SOURCE_DIR}/include/libs/index"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
-target_link_libraries (indexTest
+target_link_libraries (idxTest
os
util
common
gtest_main
index
)
-target_link_libraries (fstTest
+target_link_libraries (idxFstTest
os
util
common
gtest_main
index
)
-target_link_libraries (fstUT
+target_link_libraries (idxFstUT
os
util
common
@@ -76,7 +76,7 @@ target_link_libraries (fstUT
index
)
-target_link_libraries (UtilUT
+target_link_libraries (idxUtilUT
os
util
common
@@ -84,7 +84,7 @@ target_link_libraries (UtilUT
index
)
-target_link_libraries (jsonUT
+target_link_libraries (idxJsonUT
os
util
common
@@ -92,19 +92,21 @@ target_link_libraries (jsonUT
index
)
-add_test(
- NAME idxtest
- COMMAND indexTest
-)
-add_test(
- NAME idxJsonUT
- COMMAND jsonUT
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME idxtest
+ COMMAND idxTest
+ )
+ add_test(
+ NAME idxJsonUT
+ COMMAND idxJsonUT
+ )
+endif(NOT TD_WINDOWS)
add_test(
NAME idxUtilUT
- COMMAND UtilUT
+ COMMAND idxUtilUT
)
add_test(
NAME idxFstUT
- COMMAND fstUT
+ COMMAND idxFstUT
)
diff --git a/source/libs/index/test/indexTests.cc b/source/libs/index/test/indexTests.cc
index 2d06002af854b1860faf7985fd23e68275207c46..74a30c3387ea3c3133e4e4f82ffd3dd8dc38f540 100644
--- a/source/libs/index/test/indexTests.cc
+++ b/source/libs/index/test/indexTests.cc
@@ -272,7 +272,7 @@ void validateFst() {
}
delete m;
}
-static std::string logDir = "/tmp/log";
+static std::string logDir = TD_TMP_DIR_PATH "log";
static void initLog() {
const char* defaultLogFileNamePrefix = "taoslog";
@@ -411,12 +411,12 @@ class TFileObj {
//
//
}
- SIdxTempResult* tr = sIdxTempResultCreate();
+ SIdxTRslt* tr = idxTRsltCreate();
int ret = tfileReaderSearch(reader_, query, tr);
- sIdxTempResultMergeTo(result, tr);
- sIdxTempResultDestroy(tr);
+ idxTRsltMergeTo(tr, result);
+ idxTRsltDestroy(tr);
return ret;
}
~TFileObj() {
@@ -531,11 +531,11 @@ class CacheObj {
indexCacheDebug(cache);
}
int Get(SIndexTermQuery* query, int16_t colId, int32_t version, SArray* result, STermValueType* s) {
- SIdxTempResult* tr = sIdxTempResultCreate();
+ SIdxTRslt* tr = idxTRsltCreate();
int ret = indexCacheSearch(cache, query, tr, s);
- sIdxTempResultMergeTo(result, tr);
- sIdxTempResultDestroy(tr);
+ idxTRsltMergeTo(tr, result);
+ idxTRsltDestroy(tr);
if (ret != 0) {
std::cout << "failed to get from cache:" << ret << std::endl;
@@ -916,7 +916,7 @@ TEST_F(IndexEnv2, testIndexOpen) {
}
}
TEST_F(IndexEnv2, testEmptyIndexOpen) {
- std::string path = "/tmp/test";
+ std::string path = TD_TMP_DIR_PATH "test";
if (index->Init(path) != 0) {
std::cout << "failed to init index" << std::endl;
exit(1);
diff --git a/source/libs/index/test/utilUT.cc b/source/libs/index/test/utilUT.cc
index 18a2b457c41c2cd66f20a01f3690d0af4fe69d3d..4a30160244d82b8c00b3e7b031d6fd492057ec21 100644
--- a/source/libs/index/test/utilUT.cc
+++ b/source/libs/index/test/utilUT.cc
@@ -226,6 +226,22 @@ TEST_F(UtilEnv, 04union) {
iUnion(src, rslt);
assert(taosArrayGetSize(rslt) == 12);
}
+TEST_F(UtilEnv, 05unionExcept) {
+ clearSourceArray(src);
+ clearFinalArray(rslt);
+
+ uint64_t arr2[] = {7};
+ SArray * f = (SArray *)taosArrayGetP(src, 1);
+ for (int i = 0; i < sizeof(arr2) / sizeof(arr2[0]); i++) {
+ taosArrayPush(f, &arr2[i]);
+ }
+
+ iUnion(src, rslt);
+
+ SArray *ept = taosArrayInit(0, sizeof(uint64_t));
+ iExcept(rslt, ept);
+ EXPECT_EQ(taosArrayGetSize(rslt), 1);
+}
TEST_F(UtilEnv, 01Except) {
SArray *total = taosArrayInit(4, sizeof(uint64_t));
{
@@ -308,16 +324,36 @@ TEST_F(UtilEnv, 01Except) {
ASSERT_EQ(*(uint64_t *)taosArrayGet(total, 1), 100);
}
TEST_F(UtilEnv, testFill) {
- for (int i = 0; i < 10000000; i++) {
+ for (int i = 0; i < 1000000; i++) {
int64_t val = i;
char buf[65] = {0};
indexInt2str(val, buf, 1);
EXPECT_EQ(val, taosStr2int64(buf));
}
- for (int i = 0; i < 10000000; i++) {
+ for (int i = 0; i < 1000000; i++) {
int64_t val = 0 - i;
char buf[65] = {0};
indexInt2str(val, buf, -1);
EXPECT_EQ(val, taosStr2int64(buf));
}
}
+TEST_F(UtilEnv, TempResult) {
+ SIdxTRslt *relt = idxTRsltCreate();
+
+ SArray *f = taosArrayInit(0, sizeof(uint64_t));
+
+ uint64_t val = UINT64_MAX - 1;
+ taosArrayPush(relt->add, &val);
+ idxTRsltMergeTo(relt, f);
+ EXPECT_EQ(taosArrayGetSize(f), 1);
+}
+TEST_F(UtilEnv, TempResultExcept) {
+ SIdxTRslt *relt = idxTRsltCreate();
+
+ SArray *f = taosArrayInit(0, sizeof(uint64_t));
+
+ uint64_t val = UINT64_MAX;
+ taosArrayPush(relt->add, &val);
+ idxTRsltMergeTo(relt, f);
+ EXPECT_EQ(taosArrayGetSize(f), 1);
+}
diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c
index 8887b9841ac8dc907d3a9a71360db20674278cfd..78710569cbe6718c6fa899448a1cab11edebaab3 100644
--- a/source/libs/nodes/src/nodesCodeFuncs.c
+++ b/source/libs/nodes/src/nodesCodeFuncs.c
@@ -1130,6 +1130,9 @@ static const char* jkTableScanPhysiPlanOffset = "Offset";
static const char* jkTableScanPhysiPlanSliding = "Sliding";
static const char* jkTableScanPhysiPlanIntervalUnit = "intervalUnit";
static const char* jkTableScanPhysiPlanSlidingUnit = "slidingUnit";
+static const char* jkTableScanPhysiPlanTriggerType = "triggerType";
+static const char* jkTableScanPhysiPlanWatermark = "watermark";
+static const char* jkTableScanPhysiPlanTsColId = "tsColId";
static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj;
@@ -1171,6 +1174,15 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTriggerType, pNode->triggerType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanWatermark, pNode->watermark);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId);
+ }
return code;
}
@@ -1221,6 +1233,15 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit, code);
;
}
+ if (TSDB_CODE_SUCCESS == code) {
+ tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTriggerType, pNode->triggerType, code);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ tjsonGetNumberValue(pJson, jkTableScanPhysiPlanWatermark, pNode->watermark, code);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId, code);
+ }
return code;
}
@@ -2534,7 +2555,7 @@ static const char* jkSessionWindowTsPrimaryKey = "TsPrimaryKey";
static const char* jkSessionWindowGap = "Gap";
static int32_t sessionWindowNodeToJson(const void* pObj, SJson* pJson) {
- const SSessionWindowNode * pNode = (const SSessionWindowNode*)pObj;
+ const SSessionWindowNode* pNode = (const SSessionWindowNode*)pObj;
int32_t code = tjsonAddObject(pJson, jkSessionWindowTsPrimaryKey, nodeToJson, pNode->pCol);
if (TSDB_CODE_SUCCESS == code) {
@@ -2546,9 +2567,9 @@ static int32_t sessionWindowNodeToJson(const void* pObj, SJson* pJson) {
static int32_t jsonToSessionWindowNode(const SJson* pJson, void* pObj) {
SSessionWindowNode* pNode = (SSessionWindowNode*)pObj;
- int32_t code = jsonToNodeObject(pJson, jkSessionWindowTsPrimaryKey, (SNode **)&pNode->pCol);
+ int32_t code = jsonToNodeObject(pJson, jkSessionWindowTsPrimaryKey, (SNode**)&pNode->pCol);
if (TSDB_CODE_SUCCESS == code) {
- code = jsonToNodeObject(pJson, jkSessionWindowGap, (SNode **)&pNode->pGap);
+ code = jsonToNodeObject(pJson, jkSessionWindowGap, (SNode**)&pNode->pGap);
}
return code;
}
@@ -2775,6 +2796,150 @@ static int32_t jsonToDownstreamSourceNode(const SJson* pJson, void* pObj) {
return code;
}
+static const char* jkDatabaseOptionsBuffer = "Buffer";
+static const char* jkDatabaseOptionsCachelast = "Cachelast";
+static const char* jkDatabaseOptionsCompressionLevel = "CompressionLevel";
+static const char* jkDatabaseOptionsDaysPerFileNode = "DaysPerFileNode";
+static const char* jkDatabaseOptionsDaysPerFile = "DaysPerFile";
+static const char* jkDatabaseOptionsFsyncPeriod = "FsyncPeriod";
+static const char* jkDatabaseOptionsMaxRowsPerBlock = "MaxRowsPerBlock";
+static const char* jkDatabaseOptionsMinRowsPerBlock = "MinRowsPerBlock";
+static const char* jkDatabaseOptionsKeep = "Keep";
+static const char* jkDatabaseOptionsPages = "Pages";
+static const char* jkDatabaseOptionsPagesize = "Pagesize";
+static const char* jkDatabaseOptionsPrecision = "Precision";
+static const char* jkDatabaseOptionsReplica = "Replica";
+static const char* jkDatabaseOptionsStrict = "Strict";
+static const char* jkDatabaseOptionsWalLevel = "WalLevel";
+static const char* jkDatabaseOptionsNumOfVgroups = "NumOfVgroups";
+static const char* jkDatabaseOptionsSingleStable = "SingleStable";
+static const char* jkDatabaseOptionsRetentions = "Retentions";
+static const char* jkDatabaseOptionsSchemaless = "Schemaless";
+
+static int32_t databaseOptionsToJson(const void* pObj, SJson* pJson) {
+ const SDatabaseOptions* pNode = (const SDatabaseOptions*)pObj;
+
+ int32_t code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsBuffer, pNode->buffer);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCachelast, pNode->cachelast);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCompressionLevel, pNode->compressionLevel);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkDatabaseOptionsDaysPerFileNode, nodeToJson, pNode->pDaysPerFile);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsDaysPerFile, pNode->daysPerFile);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsFsyncPeriod, pNode->fsyncPeriod);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsMaxRowsPerBlock, pNode->maxRowsPerBlock);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsMinRowsPerBlock, pNode->minRowsPerBlock);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeListToJson(pJson, jkDatabaseOptionsKeep, pNode->pKeep);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsPages, pNode->pages);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsPagesize, pNode->pagesize);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddStringToObject(pJson, jkDatabaseOptionsPrecision, pNode->precisionStr);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsReplica, pNode->replica);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsStrict, pNode->strict);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsWalLevel, pNode->walLevel);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsNumOfVgroups, pNode->numOfVgroups);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsSingleStable, pNode->singleStable);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeListToJson(pJson, jkDatabaseOptionsRetentions, pNode->pRetentions);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsSchemaless, pNode->schemaless);
+ }
+
+ return code;
+}
+
+static int32_t jsonToDatabaseOptions(const SJson* pJson, void* pObj) {
+ SDatabaseOptions* pNode = (SDatabaseOptions*)pObj;
+
+ int32_t code = tjsonGetIntValue(pJson, jkDatabaseOptionsBuffer, &pNode->buffer);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCachelast, &pNode->cachelast);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCompressionLevel, &pNode->compressionLevel);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkDatabaseOptionsDaysPerFileNode, (SNode**)&pNode->pDaysPerFile);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsDaysPerFile, &pNode->daysPerFile);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsFsyncPeriod, &pNode->fsyncPeriod);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsMaxRowsPerBlock, &pNode->maxRowsPerBlock);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsMinRowsPerBlock, &pNode->minRowsPerBlock);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkDatabaseOptionsKeep, &pNode->pKeep);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsPages, &pNode->pages);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsPagesize, &pNode->pagesize);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetStringValue(pJson, jkDatabaseOptionsPrecision, pNode->precisionStr);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsReplica, &pNode->replica);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsStrict, &pNode->strict);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsWalLevel, &pNode->walLevel);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkDatabaseOptionsNumOfVgroups, &pNode->numOfVgroups);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsSingleStable, &pNode->singleStable);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkDatabaseOptionsRetentions, &pNode->pRetentions);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsSchemaless, &pNode->schemaless);
+ }
+
+ return code;
+}
+
static const char* jkDataBlockDescDataBlockId = "DataBlockId";
static const char* jkDataBlockDescSlots = "Slots";
static const char* jkDataBlockTotalRowSize = "TotalRowSize";
@@ -2977,6 +3142,130 @@ static int32_t jsonToSelectStmt(const SJson* pJson, void* pObj) {
return code;
}
+static const char* jkAlterDatabaseStmtDbName = "DbName";
+static const char* jkAlterDatabaseStmtOptions = "Options";
+
+static int32_t alterDatabaseStmtToJson(const void* pObj, SJson* pJson) {
+ const SAlterDatabaseStmt* pNode = (const SAlterDatabaseStmt*)pObj;
+
+ int32_t code = tjsonAddStringToObject(pJson, jkAlterDatabaseStmtDbName, pNode->dbName);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkAlterDatabaseStmtOptions, nodeToJson, pNode->pOptions);
+ }
+
+ return code;
+}
+
+static int32_t jsonToAlterDatabaseStmt(const SJson* pJson, void* pObj) {
+ SAlterDatabaseStmt* pNode = (SAlterDatabaseStmt*)pObj;
+
+ int32_t code = tjsonGetStringValue(pJson, jkAlterDatabaseStmtDbName, pNode->dbName);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkAlterDatabaseStmtOptions, (SNode**)&pNode->pOptions);
+ }
+
+ return code;
+}
+
+static const char* jkAlterTableStmtDbName = "DbName";
+static const char* jkAlterTableStmtTableName = "TableName";
+static const char* jkAlterTableStmtAlterType = "AlterType";
+static const char* jkAlterTableStmtColName = "ColName";
+static const char* jkAlterTableStmtNewColName = "NewColName";
+static const char* jkAlterTableStmtOptions = "Options";
+static const char* jkAlterTableStmtNewDataType = "NewDataType";
+static const char* jkAlterTableStmtNewTagVal = "NewTagVal";
+
+static int32_t alterTableStmtToJson(const void* pObj, SJson* pJson) {
+ const SAlterTableStmt* pNode = (const SAlterTableStmt*)pObj;
+
+ int32_t code = tjsonAddStringToObject(pJson, jkAlterTableStmtDbName, pNode->dbName);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddStringToObject(pJson, jkAlterTableStmtTableName, pNode->tableName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkAlterTableStmtAlterType, pNode->alterType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddStringToObject(pJson, jkAlterTableStmtColName, pNode->colName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddStringToObject(pJson, jkAlterTableStmtNewColName, pNode->newColName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkAlterTableStmtOptions, nodeToJson, pNode->pOptions);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkAlterTableStmtNewDataType, dataTypeToJson, &pNode->dataType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkAlterTableStmtOptions, nodeToJson, pNode->pVal);
+ }
+
+ return code;
+}
+
+static int32_t jsonToAlterTableStmt(const SJson* pJson, void* pObj) {
+ SAlterTableStmt* pNode = (SAlterTableStmt*)pObj;
+
+ int32_t code = tjsonGetStringValue(pJson, jkAlterTableStmtDbName, pNode->dbName);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetStringValue(pJson, jkAlterTableStmtTableName, pNode->tableName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetTinyIntValue(pJson, jkAlterTableStmtAlterType, &pNode->alterType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetStringValue(pJson, jkAlterTableStmtColName, pNode->colName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetStringValue(pJson, jkAlterTableStmtNewColName, pNode->newColName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkAlterTableStmtOptions, (SNode**)&pNode->pOptions);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonToObject(pJson, jkAlterTableStmtNewDataType, jsonToDataType, &pNode->dataType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkAlterTableStmtOptions, (SNode**)&pNode->pVal);
+ }
+
+ return code;
+}
+
+static const char* jkAlterDnodeStmtDnodeId = "DnodeId";
+static const char* jkAlterDnodeStmtConfig = "Config";
+static const char* jkAlterDnodeStmtValue = "Value";
+
+static int32_t alterDnodeStmtToJson(const void* pObj, SJson* pJson) {
+ const SAlterDnodeStmt* pNode = (const SAlterDnodeStmt*)pObj;
+
+ int32_t code = tjsonAddIntegerToObject(pJson, jkAlterDnodeStmtDnodeId, pNode->dnodeId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddStringToObject(pJson, jkAlterDnodeStmtConfig, pNode->config);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddStringToObject(pJson, jkAlterDnodeStmtValue, pNode->value);
+ }
+
+ return code;
+}
+
+static int32_t jsonToAlterDnodeStmt(const SJson* pJson, void* pObj) {
+ SAlterDnodeStmt* pNode = (SAlterDnodeStmt*)pObj;
+
+ int32_t code = tjsonGetIntValue(pJson, jkAlterDnodeStmtDnodeId, &pNode->dnodeId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetStringValue(pJson, jkAlterDnodeStmtConfig, pNode->config);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetStringValue(pJson, jkAlterDnodeStmtValue, pNode->value);
+ }
+
+ return code;
+}
+
static const char* jkCreateTopicStmtTopicName = "TopicName";
static const char* jkCreateTopicStmtSubscribeDbName = "SubscribeDbName";
static const char* jkCreateTopicStmtIgnoreExists = "IgnoreExists";
@@ -3061,6 +3350,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
break;
case QUERY_NODE_DOWNSTREAM_SOURCE:
return downstreamSourceNodeToJson(pObj, pJson);
+ case QUERY_NODE_DATABASE_OPTIONS:
+ return databaseOptionsToJson(pObj, pJson);
case QUERY_NODE_LEFT_VALUE:
return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to serialize.
case QUERY_NODE_SET_OPERATOR:
@@ -3069,8 +3360,17 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return selectStmtToJson(pObj, pJson);
case QUERY_NODE_VNODE_MODIF_STMT:
case QUERY_NODE_CREATE_DATABASE_STMT:
+ break;
+ case QUERY_NODE_ALTER_DATABASE_STMT:
+ return alterDatabaseStmtToJson(pObj, pJson);
case QUERY_NODE_CREATE_TABLE_STMT:
+ break;
+ case QUERY_NODE_ALTER_TABLE_STMT:
+ return alterTableStmtToJson(pObj, pJson);
case QUERY_NODE_USE_DATABASE_STMT:
+ break;
+ case QUERY_NODE_ALTER_DNODE_STMT:
+ return alterDnodeStmtToJson(pObj, pJson);
case QUERY_NODE_SHOW_DATABASES_STMT:
case QUERY_NODE_SHOW_TABLES_STMT:
break;
@@ -3177,12 +3477,20 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToSlotDescNode(pJson, pObj);
case QUERY_NODE_DOWNSTREAM_SOURCE:
return jsonToDownstreamSourceNode(pJson, pObj);
+ case QUERY_NODE_DATABASE_OPTIONS:
+ return jsonToDatabaseOptions(pJson, pObj);
case QUERY_NODE_LEFT_VALUE:
return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to deserialize.
case QUERY_NODE_SET_OPERATOR:
return jsonToSetOperator(pJson, pObj);
case QUERY_NODE_SELECT_STMT:
return jsonToSelectStmt(pJson, pObj);
+ case QUERY_NODE_ALTER_DATABASE_STMT:
+ return jsonToAlterDatabaseStmt(pJson, pObj);
+ case QUERY_NODE_ALTER_TABLE_STMT:
+ return jsonToAlterTableStmt(pJson, pObj);
+ case QUERY_NODE_ALTER_DNODE_STMT:
+ return jsonToAlterDnodeStmt(pJson, pObj);
case QUERY_NODE_CREATE_TOPIC_STMT:
return jsonToCreateTopicStmt(pJson, pObj);
case QUERY_NODE_LOGIC_PLAN_SCAN:
diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h
index 50c05170b3937d37862b3ea7e8a2f47046be5c80..a1c304118bfcdc5078bf0a19b73a8bde17e3c0cf 100644
--- a/source/libs/parser/inc/parAst.h
+++ b/source/libs/parser/inc/parAst.h
@@ -27,14 +27,13 @@ extern "C" {
#include "querynodes.h"
typedef struct SAstCreateContext {
- SParseContext* pQueryCxt;
- SMsgBuf msgBuf;
- bool notSupport;
- SNode* pRootNode;
- int16_t placeholderNo;
- SArray* pPlaceholderValues;
- int32_t errCode;
- SParseMetaCache* pMetaCache;
+ SParseContext* pQueryCxt;
+ SMsgBuf msgBuf;
+ bool notSupport;
+ SNode* pRootNode;
+ int16_t placeholderNo;
+ SArray* pPlaceholderValues;
+ int32_t errCode;
} SAstCreateContext;
typedef enum EDatabaseOptionType {
@@ -75,7 +74,7 @@ typedef struct SAlterOption {
extern SToken nil_token;
-int32_t initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt);
+void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt);
SNode* createRawExprNode(SAstCreateContext* pCxt, const SToken* pToken, SNode* pNode);
SNode* createRawExprNodeExt(SAstCreateContext* pCxt, const SToken* pStart, const SToken* pEnd, SNode* pNode);
diff --git a/source/libs/parser/inc/parInt.h b/source/libs/parser/inc/parInt.h
index 2ad1ebc1121d96f243fff9d55980b26bffdf6c04..184ed7d8b243ed2ec97e4c38b1f1e31de9e3f2c2 100644
--- a/source/libs/parser/inc/parInt.h
+++ b/source/libs/parser/inc/parInt.h
@@ -26,6 +26,7 @@ extern "C" {
int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery);
int32_t parse(SParseContext* pParseCxt, SQuery** pQuery);
+int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery);
int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery);
int32_t translate(SParseContext* pParseCxt, SQuery* pQuery);
int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema);
diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h
index 7ad5a7ecab20c99114cf8d05363037985c74d9bd..80288dbc448a0cd35212da5e672b6b59bc021313 100644
--- a/source/libs/parser/inc/parUtil.h
+++ b/source/libs/parser/inc/parUtil.h
@@ -24,12 +24,12 @@ extern "C" {
#include "os.h"
#include "query.h"
-#define parserFatal(param, ...) qFatal("PARSER: " param, __VA_ARGS__)
-#define parserError(param, ...) qError("PARSER: " param, __VA_ARGS__)
-#define parserWarn(param, ...) qWarn("PARSER: " param, __VA_ARGS__)
-#define parserInfo(param, ...) qInfo("PARSER: " param, __VA_ARGS__)
-#define parserDebug(param, ...) qDebug("PARSER: " param, __VA_ARGS__)
-#define parserTrace(param, ...) qTrace("PARSER: " param, __VA_ARGS__)
+#define parserFatal(param, ...) qFatal("PARSER: " param, ##__VA_ARGS__)
+#define parserError(param, ...) qError("PARSER: " param, ##__VA_ARGS__)
+#define parserWarn(param, ...) qWarn("PARSER: " param, ##__VA_ARGS__)
+#define parserInfo(param, ...) qInfo("PARSER: " param, ##__VA_ARGS__)
+#define parserDebug(param, ...) qDebug("PARSER: " param, ##__VA_ARGS__)
+#define parserTrace(param, ...) qTrace("PARSER: " param, ##__VA_ARGS__)
#define PK_TS_COL_INTERNAL_NAME "_rowts"
@@ -42,7 +42,10 @@ typedef struct SParseMetaCache {
SHashObj* pTableMeta; // key is tbFName, element is STableMeta*
SHashObj* pDbVgroup; // key is dbFName, element is SArray*
SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo*
- SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo
+ SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo*
+ SHashObj* pDbInfo; // key is tbFName, element is SDbInfo*
+ SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass
+ SHashObj* pUdf; // key is funcName, element is SFuncInfo*
} SParseMetaCache;
int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...);
@@ -62,12 +65,22 @@ int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen);
int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq);
int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache);
int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache);
+int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
+int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache);
+int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
+int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
+int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type,
+ SParseMetaCache* pMetaCache);
+int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache);
int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta);
-int32_t getDBVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo);
-int32_t getTableHashVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup);
-int32_t getDBVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
+int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo);
+int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup);
+int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
int32_t* pTableNum);
-int32_t getDBCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo);
+int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo);
+int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDb, AUTH_TYPE type,
+ bool* pPass);
+int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo);
#ifdef __cplusplus
}
diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c
index 4aa28409b73e08e4ec445c22f55c02fbdd328b90..836a0cb520684e264cecb3cd6425ae3c7688de68 100644
--- a/source/libs/parser/src/parAstCreater.c
+++ b/source/libs/parser/src/parAstCreater.c
@@ -38,7 +38,7 @@
SToken nil_token = {.type = TK_NK_NIL, .n = 0, .z = NULL};
-int32_t initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) {
+void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) {
memset(pCxt, 0, sizeof(SAstCreateContext));
pCxt->pQueryCxt = pParseCxt;
pCxt->msgBuf.buf = pParseCxt->pMsg;
@@ -48,13 +48,6 @@ int32_t initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt)
pCxt->placeholderNo = 0;
pCxt->pPlaceholderValues = NULL;
pCxt->errCode = TSDB_CODE_SUCCESS;
- if (pParseCxt->async) {
- pCxt->pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache));
- if (NULL == pCxt->pMetaCache) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- }
- return TSDB_CODE_SUCCESS;
}
static void copyStringFormStringToken(SToken* pToken, char* pBuf, int32_t len) {
@@ -472,13 +465,6 @@ SNode* createRealTableNode(SAstCreateContext* pCxt, SToken* pDbName, SToken* pTa
strncpy(realTable->table.tableAlias, pTableName->z, pTableName->n);
}
strncpy(realTable->table.tableName, pTableName->z, pTableName->n);
- if (NULL != pCxt->pMetaCache) {
- if (TSDB_CODE_SUCCESS != reserveTableMetaInCache(pCxt->pQueryCxt->acctId, realTable->table.dbName,
- realTable->table.tableName, pCxt->pMetaCache)) {
- nodesDestroyNode(realTable);
- CHECK_OUT_OF_MEM(NULL);
- }
- }
return (SNode*)realTable;
}
diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c
index 28c79a88f05f03885f0ace1dfe73d8e52dd498f9..5d65a0b80bebc98a02e21458ae558661c4e5439b 100644
--- a/source/libs/parser/src/parAstParser.c
+++ b/source/libs/parser/src/parAstParser.c
@@ -13,11 +13,12 @@
* along with this program. If not, see .
*/
+#include "functionMgt.h"
#include "os.h"
-#include "parInt.h"
-
#include "parAst.h"
+#include "parInt.h"
#include "parToken.h"
+#include "systable.h"
typedef void* (*FMalloc)(size_t);
typedef void (*FFree)(void*);
@@ -82,8 +83,386 @@ abort_parse:
(*pQuery)->pRoot = cxt.pRootNode;
(*pQuery)->placeholderNum = cxt.placeholderNo;
TSWAP((*pQuery)->pPlaceholderValues, cxt.pPlaceholderValues);
- TSWAP((*pQuery)->pMetaCache, cxt.pMetaCache);
}
taosArrayDestroy(cxt.pPlaceholderValues);
return cxt.errCode;
}
+
+typedef struct SCollectMetaKeyCxt {
+ SParseContext* pParseCxt;
+ SParseMetaCache* pMetaCache;
+} SCollectMetaKeyCxt;
+
+static void destroyCollectMetaKeyCxt(SCollectMetaKeyCxt* pCxt) {
+ if (NULL != pCxt->pMetaCache) {
+ // TODO
+ }
+}
+
+typedef struct SCollectMetaKeyFromExprCxt {
+ SCollectMetaKeyCxt* pComCxt;
+ int32_t errCode;
+} SCollectMetaKeyFromExprCxt;
+
+static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt);
+
+static EDealRes collectMetaKeyFromFunction(SCollectMetaKeyFromExprCxt* pCxt, SFunctionNode* pFunc) {
+ if (fmIsBuiltinFunc(pFunc->functionName)) {
+ return TSDB_CODE_SUCCESS;
+ }
+ return reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache);
+}
+
+static EDealRes collectMetaKeyFromRealTable(SCollectMetaKeyFromExprCxt* pCxt, SRealTableNode* pRealTable) {
+ pCxt->errCode = reserveTableMetaInCache(pCxt->pComCxt->pParseCxt->acctId, pRealTable->table.dbName,
+ pRealTable->table.tableName, pCxt->pComCxt->pMetaCache);
+ if (TSDB_CODE_SUCCESS == pCxt->errCode) {
+ pCxt->errCode = reserveTableVgroupInCache(pCxt->pComCxt->pParseCxt->acctId, pRealTable->table.dbName,
+ pRealTable->table.tableName, pCxt->pComCxt->pMetaCache);
+ }
+ if (TSDB_CODE_SUCCESS == pCxt->errCode) {
+ pCxt->errCode = reserveUserAuthInCache(pCxt->pComCxt->pParseCxt->acctId, pCxt->pComCxt->pParseCxt->pUser,
+ pRealTable->table.dbName, AUTH_TYPE_READ, pCxt->pComCxt->pMetaCache);
+ }
+ return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
+}
+
+static EDealRes collectMetaKeyFromTempTable(SCollectMetaKeyFromExprCxt* pCxt, STempTableNode* pTempTable) {
+ pCxt->errCode = collectMetaKeyFromQuery(pCxt->pComCxt, pTempTable->pSubquery);
+ return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
+}
+
+static EDealRes collectMetaKeyFromExprImpl(SNode* pNode, void* pContext) {
+ SCollectMetaKeyFromExprCxt* pCxt = pContext;
+ switch (nodeType(pNode)) {
+ case QUERY_NODE_FUNCTION:
+ return collectMetaKeyFromFunction(pCxt, (SFunctionNode*)pNode);
+ case QUERY_NODE_REAL_TABLE:
+ return collectMetaKeyFromRealTable(pCxt, (SRealTableNode*)pNode);
+ case QUERY_NODE_TEMP_TABLE:
+ return collectMetaKeyFromTempTable(pCxt, (STempTableNode*)pNode);
+ default:
+ break;
+ }
+ return DEAL_RES_CONTINUE;
+}
+
+static int32_t collectMetaKeyFromExprs(SCollectMetaKeyCxt* pCxt, SNodeList* pList) {
+ SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS};
+ nodesWalkExprs(pList, collectMetaKeyFromExprImpl, &cxt);
+ return cxt.errCode;
+}
+
+static int32_t collectMetaKeyFromSetOperator(SCollectMetaKeyCxt* pCxt, SSetOperator* pStmt) {
+ int32_t code = collectMetaKeyFromQuery(pCxt, pStmt->pLeft);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = collectMetaKeyFromQuery(pCxt, pStmt->pRight);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = collectMetaKeyFromExprs(pCxt, pStmt->pOrderByList);
+ }
+ return code;
+}
+
+static int32_t collectMetaKeyFromSelect(SCollectMetaKeyCxt* pCxt, SSelectStmt* pStmt) {
+ SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS};
+ nodesWalkSelectStmt(pStmt, SQL_CLAUSE_FROM, collectMetaKeyFromExprImpl, &cxt);
+ return cxt.errCode;
+}
+
+static int32_t collectMetaKeyFromCreateTable(SCollectMetaKeyCxt* pCxt, SCreateTableStmt* pStmt) {
+ if (NULL == pStmt->pTags) {
+ return reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache);
+ } else {
+ return reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache);
+ }
+}
+
+static int32_t collectMetaKeyFromCreateMultiTable(SCollectMetaKeyCxt* pCxt, SCreateMultiTableStmt* pStmt) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SNode* pNode = NULL;
+ FOREACH(pNode, pStmt->pSubTables) {
+ SCreateSubTableClause* pClause = (SCreateSubTableClause*)pNode;
+ code =
+ reserveTableMetaInCache(pCxt->pParseCxt->acctId, pClause->useDbName, pClause->useTableName, pCxt->pMetaCache);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pClause->dbName, pClause->tableName, pCxt->pMetaCache);
+ }
+ if (TSDB_CODE_SUCCESS != code) {
+ break;
+ }
+ }
+ return code;
+}
+
+static int32_t collectMetaKeyFromAlterTable(SCollectMetaKeyCxt* pCxt, SAlterTableStmt* pStmt) {
+ int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache);
+ }
+ return code;
+}
+
+static int32_t collectMetaKeyFromUseDatabase(SCollectMetaKeyCxt* pCxt, SUseDatabaseStmt* pStmt) {
+ return reserveDbVgVersionInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromCreateIndex(SCollectMetaKeyCxt* pCxt, SCreateIndexStmt* pStmt) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (INDEX_TYPE_SMA == pStmt->indexType) {
+ code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->tableName, pCxt->pMetaCache);
+ if (TSDB_CODE_SUCCESS == code) {
+ code =
+ reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->tableName, pCxt->pMetaCache);
+ }
+ }
+ return code;
+}
+
+static int32_t collectMetaKeyFromCreateTopic(SCollectMetaKeyCxt* pCxt, SCreateTopicStmt* pStmt) {
+ if (NULL != pStmt->pQuery) {
+ return collectMetaKeyFromQuery(pCxt, pStmt->pQuery);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t collectMetaKeyFromExplain(SCollectMetaKeyCxt* pCxt, SExplainStmt* pStmt) {
+ return collectMetaKeyFromQuery(pCxt, pStmt->pQuery);
+}
+
+static int32_t collectMetaKeyFromCreateStream(SCollectMetaKeyCxt* pCxt, SCreateStreamStmt* pStmt) {
+ return collectMetaKeyFromQuery(pCxt, pStmt->pQuery);
+}
+
+static int32_t collectMetaKeyFromShowDnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowMnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MNODES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowModules(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MODULES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowQnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_QNODES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowSnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SNODES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowBnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_BNODES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowDatabases(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_DATABASES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowFunctions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_FUNCTIONS,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowIndexes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_INDEXES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowStables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_STABLES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowStreams(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowTables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB,
+ TSDB_INS_TABLE_USER_TABLES, pCxt->pMetaCache);
+ if (TSDB_CODE_SUCCESS == code) {
+ if (NULL != pStmt->pDbName) {
+ code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
+ } else {
+ code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, pCxt->pMetaCache);
+ }
+ }
+ return code;
+}
+
+static int32_t collectMetaKeyFromShowUsers(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_USERS,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowLicence(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_LICENCES,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowVgroups(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VGROUPS,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowTopics(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TOPICS,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromShowTransactions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TRANS,
+ pCxt->pMetaCache);
+}
+
+static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
+ switch (nodeType(pStmt)) {
+ case QUERY_NODE_SET_OPERATOR:
+ return collectMetaKeyFromSetOperator(pCxt, (SSetOperator*)pStmt);
+ case QUERY_NODE_SELECT_STMT:
+ return collectMetaKeyFromSelect(pCxt, (SSelectStmt*)pStmt);
+ case QUERY_NODE_VNODE_MODIF_STMT:
+ case QUERY_NODE_CREATE_DATABASE_STMT:
+ case QUERY_NODE_DROP_DATABASE_STMT:
+ case QUERY_NODE_ALTER_DATABASE_STMT:
+ break;
+ case QUERY_NODE_CREATE_TABLE_STMT:
+ return collectMetaKeyFromCreateTable(pCxt, (SCreateTableStmt*)pStmt);
+ case QUERY_NODE_CREATE_SUBTABLE_CLAUSE:
+ break;
+ case QUERY_NODE_CREATE_MULTI_TABLE_STMT:
+ return collectMetaKeyFromCreateMultiTable(pCxt, (SCreateMultiTableStmt*)pStmt);
+ case QUERY_NODE_DROP_TABLE_CLAUSE:
+ case QUERY_NODE_DROP_TABLE_STMT:
+ case QUERY_NODE_DROP_SUPER_TABLE_STMT:
+ break;
+ case QUERY_NODE_ALTER_TABLE_STMT:
+ return collectMetaKeyFromAlterTable(pCxt, (SAlterTableStmt*)pStmt);
+ case QUERY_NODE_CREATE_USER_STMT:
+ case QUERY_NODE_ALTER_USER_STMT:
+ case QUERY_NODE_DROP_USER_STMT:
+ break;
+ case QUERY_NODE_USE_DATABASE_STMT:
+ return collectMetaKeyFromUseDatabase(pCxt, (SUseDatabaseStmt*)pStmt);
+ case QUERY_NODE_CREATE_DNODE_STMT:
+ case QUERY_NODE_DROP_DNODE_STMT:
+ case QUERY_NODE_ALTER_DNODE_STMT:
+ break;
+ case QUERY_NODE_CREATE_INDEX_STMT:
+ return collectMetaKeyFromCreateIndex(pCxt, (SCreateIndexStmt*)pStmt);
+ case QUERY_NODE_DROP_INDEX_STMT:
+ case QUERY_NODE_CREATE_QNODE_STMT:
+ case QUERY_NODE_DROP_QNODE_STMT:
+ case QUERY_NODE_CREATE_BNODE_STMT:
+ case QUERY_NODE_DROP_BNODE_STMT:
+ case QUERY_NODE_CREATE_SNODE_STMT:
+ case QUERY_NODE_DROP_SNODE_STMT:
+ case QUERY_NODE_CREATE_MNODE_STMT:
+ case QUERY_NODE_DROP_MNODE_STMT:
+ break;
+ case QUERY_NODE_CREATE_TOPIC_STMT:
+ return collectMetaKeyFromCreateTopic(pCxt, (SCreateTopicStmt*)pStmt);
+ case QUERY_NODE_DROP_TOPIC_STMT:
+ case QUERY_NODE_DROP_CGROUP_STMT:
+ case QUERY_NODE_ALTER_LOCAL_STMT:
+ break;
+ case QUERY_NODE_EXPLAIN_STMT:
+ return collectMetaKeyFromExplain(pCxt, (SExplainStmt*)pStmt);
+ case QUERY_NODE_DESCRIBE_STMT:
+ case QUERY_NODE_RESET_QUERY_CACHE_STMT:
+ case QUERY_NODE_COMPACT_STMT:
+ case QUERY_NODE_CREATE_FUNCTION_STMT:
+ case QUERY_NODE_DROP_FUNCTION_STMT:
+ break;
+ case QUERY_NODE_CREATE_STREAM_STMT:
+ return collectMetaKeyFromCreateStream(pCxt, (SCreateStreamStmt*)pStmt);
+ case QUERY_NODE_DROP_STREAM_STMT:
+ case QUERY_NODE_MERGE_VGROUP_STMT:
+ case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT:
+ case QUERY_NODE_SPLIT_VGROUP_STMT:
+ case QUERY_NODE_SYNCDB_STMT:
+ case QUERY_NODE_GRANT_STMT:
+ case QUERY_NODE_REVOKE_STMT:
+ case QUERY_NODE_SHOW_DNODES_STMT:
+ return collectMetaKeyFromShowDnodes(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_MNODES_STMT:
+ return collectMetaKeyFromShowMnodes(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_MODULES_STMT:
+ return collectMetaKeyFromShowModules(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_QNODES_STMT:
+ return collectMetaKeyFromShowQnodes(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_SNODES_STMT:
+ return collectMetaKeyFromShowSnodes(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_BNODES_STMT:
+ return collectMetaKeyFromShowBnodes(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_CLUSTER_STMT:
+ break;
+ case QUERY_NODE_SHOW_DATABASES_STMT:
+ return collectMetaKeyFromShowDatabases(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_FUNCTIONS_STMT:
+ return collectMetaKeyFromShowFunctions(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_INDEXES_STMT:
+ return collectMetaKeyFromShowIndexes(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_STABLES_STMT:
+ return collectMetaKeyFromShowStables(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_STREAMS_STMT:
+ return collectMetaKeyFromShowStreams(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_TABLES_STMT:
+ return collectMetaKeyFromShowTables(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_USERS_STMT:
+ return collectMetaKeyFromShowUsers(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_LICENCE_STMT:
+ return collectMetaKeyFromShowLicence(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_VGROUPS_STMT:
+ return collectMetaKeyFromShowVgroups(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_TOPICS_STMT:
+ return collectMetaKeyFromShowTopics(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_CONSUMERS_STMT:
+ case QUERY_NODE_SHOW_SUBSCRIBES_STMT:
+ case QUERY_NODE_SHOW_SMAS_STMT:
+ case QUERY_NODE_SHOW_CONFIGS_STMT:
+ case QUERY_NODE_SHOW_CONNECTIONS_STMT:
+ case QUERY_NODE_SHOW_QUERIES_STMT:
+ case QUERY_NODE_SHOW_VNODES_STMT:
+ case QUERY_NODE_SHOW_APPS_STMT:
+ case QUERY_NODE_SHOW_SCORES_STMT:
+ case QUERY_NODE_SHOW_VARIABLE_STMT:
+ case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
+ case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
+ case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
+ break;
+ case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
+ return collectMetaKeyFromShowTransactions(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_KILL_CONNECTION_STMT:
+ case QUERY_NODE_KILL_QUERY_STMT:
+ case QUERY_NODE_KILL_TRANSACTION_STMT:
+ default:
+ break;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery) {
+ SCollectMetaKeyCxt cxt = {.pParseCxt = pParseCxt, .pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache))};
+ if (NULL == cxt.pMetaCache) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ int32_t code = collectMetaKeyFromQuery(&cxt, pQuery->pRoot);
+ if (TSDB_CODE_SUCCESS == code) {
+ TSWAP(pQuery->pMetaCache, cxt.pMetaCache);
+ }
+ destroyCollectMetaKeyCxt(&cxt);
+ return code;
+}
diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c
index 250e7910d69847a130fa4f0b2132b3dcb99da8e7..2670e5710b9f5418c401e9799678c68d82c8f29d 100644
--- a/source/libs/parser/src/parAuthenticator.c
+++ b/source/libs/parser/src/parAuthenticator.c
@@ -18,23 +18,30 @@
#include "parInt.h"
typedef struct SAuthCxt {
- SParseContext* pParseCxt;
- int32_t errCode;
+ SParseContext* pParseCxt;
+ SParseMetaCache* pMetaCache;
+ int32_t errCode;
} SAuthCxt;
static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt);
-static int32_t checkAuth(SParseContext* pCxt, const char* pDbName, AUTH_TYPE type) {
- if (pCxt->isSuperUser) {
+static int32_t checkAuth(SAuthCxt* pCxt, const char* pDbName, AUTH_TYPE type) {
+ SParseContext* pParseCxt = pCxt->pParseCxt;
+ if (pParseCxt->isSuperUser) {
return TSDB_CODE_SUCCESS;
}
SName name;
- tNameSetDbName(&name, pCxt->acctId, pDbName, strlen(pDbName));
+ tNameSetDbName(&name, pParseCxt->acctId, pDbName, strlen(pDbName));
char dbFname[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(&name, dbFname);
+ int32_t code = TSDB_CODE_SUCCESS;
bool pass = false;
- int32_t code =
- catalogChkAuth(pCxt->pCatalog, pCxt->pTransporter, &pCxt->mgmtEpSet, pCxt->pUser, dbFname, type, &pass);
+ if (NULL != pCxt->pMetaCache) {
+ code = getUserAuthFromCache(pCxt->pMetaCache, pParseCxt->pUser, dbFname, type, &pass);
+ } else {
+ code = catalogChkAuth(pParseCxt->pCatalog, pParseCxt->pTransporter, &pParseCxt->mgmtEpSet, pParseCxt->pUser,
+ dbFname, type, &pass);
+ }
return TSDB_CODE_SUCCESS == code ? (pass ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED) : code;
}
@@ -45,7 +52,7 @@ static EDealRes authSubquery(SAuthCxt* pCxt, SNode* pStmt) {
static EDealRes authSelectImpl(SNode* pNode, void* pContext) {
SAuthCxt* pCxt = pContext;
if (QUERY_NODE_REAL_TABLE == nodeType(pNode)) {
- pCxt->errCode = checkAuth(pCxt->pParseCxt, ((SRealTableNode*)pNode)->table.dbName, AUTH_TYPE_READ);
+ pCxt->errCode = checkAuth(pCxt, ((SRealTableNode*)pNode)->table.dbName, AUTH_TYPE_READ);
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
} else if (QUERY_NODE_TEMP_TABLE == nodeType(pNode)) {
return authSubquery(pCxt, ((STempTableNode*)pNode)->pSubquery);
@@ -79,87 +86,8 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
return authSetOperator(pCxt, (SSetOperator*)pStmt);
case QUERY_NODE_SELECT_STMT:
return authSelect(pCxt, (SSelectStmt*)pStmt);
- case QUERY_NODE_CREATE_DATABASE_STMT:
- case QUERY_NODE_DROP_DATABASE_STMT:
- case QUERY_NODE_ALTER_DATABASE_STMT:
- case QUERY_NODE_CREATE_TABLE_STMT:
- case QUERY_NODE_CREATE_SUBTABLE_CLAUSE:
- case QUERY_NODE_CREATE_MULTI_TABLE_STMT:
- case QUERY_NODE_DROP_TABLE_CLAUSE:
- case QUERY_NODE_DROP_TABLE_STMT:
- case QUERY_NODE_DROP_SUPER_TABLE_STMT:
- case QUERY_NODE_ALTER_TABLE_STMT:
- case QUERY_NODE_CREATE_USER_STMT:
- case QUERY_NODE_ALTER_USER_STMT:
- break;
- case QUERY_NODE_DROP_USER_STMT: {
+ case QUERY_NODE_DROP_USER_STMT:
return authDropUser(pCxt, (SDropUserStmt*)pStmt);
- }
- case QUERY_NODE_USE_DATABASE_STMT:
- case QUERY_NODE_CREATE_DNODE_STMT:
- case QUERY_NODE_DROP_DNODE_STMT:
- case QUERY_NODE_ALTER_DNODE_STMT:
- case QUERY_NODE_CREATE_INDEX_STMT:
- case QUERY_NODE_DROP_INDEX_STMT:
- case QUERY_NODE_CREATE_QNODE_STMT:
- case QUERY_NODE_DROP_QNODE_STMT:
- case QUERY_NODE_CREATE_BNODE_STMT:
- case QUERY_NODE_DROP_BNODE_STMT:
- case QUERY_NODE_CREATE_SNODE_STMT:
- case QUERY_NODE_DROP_SNODE_STMT:
- case QUERY_NODE_CREATE_MNODE_STMT:
- case QUERY_NODE_DROP_MNODE_STMT:
- case QUERY_NODE_CREATE_TOPIC_STMT:
- case QUERY_NODE_DROP_TOPIC_STMT:
- case QUERY_NODE_ALTER_LOCAL_STMT:
- case QUERY_NODE_EXPLAIN_STMT:
- case QUERY_NODE_DESCRIBE_STMT:
- case QUERY_NODE_RESET_QUERY_CACHE_STMT:
- case QUERY_NODE_COMPACT_STMT:
- case QUERY_NODE_CREATE_FUNCTION_STMT:
- case QUERY_NODE_DROP_FUNCTION_STMT:
- case QUERY_NODE_CREATE_STREAM_STMT:
- case QUERY_NODE_DROP_STREAM_STMT:
- case QUERY_NODE_MERGE_VGROUP_STMT:
- case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT:
- case QUERY_NODE_SPLIT_VGROUP_STMT:
- case QUERY_NODE_SYNCDB_STMT:
- case QUERY_NODE_GRANT_STMT:
- case QUERY_NODE_REVOKE_STMT:
- case QUERY_NODE_SHOW_DNODES_STMT:
- case QUERY_NODE_SHOW_MNODES_STMT:
- case QUERY_NODE_SHOW_MODULES_STMT:
- case QUERY_NODE_SHOW_QNODES_STMT:
- case QUERY_NODE_SHOW_SNODES_STMT:
- case QUERY_NODE_SHOW_BNODES_STMT:
- case QUERY_NODE_SHOW_CLUSTER_STMT:
- case QUERY_NODE_SHOW_DATABASES_STMT:
- case QUERY_NODE_SHOW_FUNCTIONS_STMT:
- case QUERY_NODE_SHOW_INDEXES_STMT:
- case QUERY_NODE_SHOW_STABLES_STMT:
- case QUERY_NODE_SHOW_STREAMS_STMT:
- case QUERY_NODE_SHOW_TABLES_STMT:
- case QUERY_NODE_SHOW_USERS_STMT:
- case QUERY_NODE_SHOW_LICENCE_STMT:
- case QUERY_NODE_SHOW_VGROUPS_STMT:
- case QUERY_NODE_SHOW_TOPICS_STMT:
- case QUERY_NODE_SHOW_CONSUMERS_STMT:
- case QUERY_NODE_SHOW_SUBSCRIBES_STMT:
- case QUERY_NODE_SHOW_SMAS_STMT:
- case QUERY_NODE_SHOW_CONFIGS_STMT:
- case QUERY_NODE_SHOW_CONNECTIONS_STMT:
- case QUERY_NODE_SHOW_QUERIES_STMT:
- case QUERY_NODE_SHOW_VNODES_STMT:
- case QUERY_NODE_SHOW_APPS_STMT:
- case QUERY_NODE_SHOW_SCORES_STMT:
- case QUERY_NODE_SHOW_VARIABLE_STMT:
- case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
- case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
- case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
- case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
- case QUERY_NODE_KILL_CONNECTION_STMT:
- case QUERY_NODE_KILL_QUERY_STMT:
- case QUERY_NODE_KILL_TRANSACTION_STMT:
default:
break;
}
@@ -168,6 +96,6 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
}
int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery) {
- SAuthCxt cxt = {.pParseCxt = pParseCxt, .errCode = TSDB_CODE_SUCCESS};
+ SAuthCxt cxt = {.pParseCxt = pParseCxt, .pMetaCache = pQuery->pMetaCache, .errCode = TSDB_CODE_SUCCESS};
return authQuery(&cxt, pQuery->pRoot);
}
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index d142d89373065df859259aaa7d3d565f1216c9f7..d84b005f7f0cd8bd91a3f9bbd17e9a8e7fa81a78 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -152,7 +152,7 @@ static int32_t getDBVgInfoImpl(STranslateContext* pCxt, const SName* pName, SArr
tNameGetFullDbName(pName, fullDbName);
int32_t code = TSDB_CODE_SUCCESS;
if (pParCxt->async) {
- code = getDBVgInfoFromCache(pCxt->pMetaCache, fullDbName, pVgInfo);
+ code = getDbVgInfoFromCache(pCxt->pMetaCache, fullDbName, pVgInfo);
} else {
code = collectUseDatabaseImpl(fullDbName, pCxt->pDbs);
if (TSDB_CODE_SUCCESS == code) {
@@ -177,7 +177,7 @@ static int32_t getTableHashVgroupImpl(STranslateContext* pCxt, const SName* pNam
SParseContext* pParCxt = pCxt->pParseCxt;
int32_t code = TSDB_CODE_SUCCESS;
if (pParCxt->async) {
- code = getTableHashVgroupFromCache(pCxt->pMetaCache, pName, pInfo);
+ code = getTableVgroupFromCache(pCxt->pMetaCache, pName, pInfo);
} else {
code = collectUseDatabase(pName, pCxt->pDbs);
if (TSDB_CODE_SUCCESS == code) {
@@ -205,7 +205,7 @@ static int32_t getDBVgVersion(STranslateContext* pCxt, const char* pDbFName, int
SParseContext* pParCxt = pCxt->pParseCxt;
int32_t code = TSDB_CODE_SUCCESS;
if (pParCxt->async) {
- code = getDBVgVersionFromCache(pCxt->pMetaCache, pDbFName, pVersion, pDbId, pTableNum);
+ code = getDbVgVersionFromCache(pCxt->pMetaCache, pDbFName, pVersion, pDbId, pTableNum);
} else {
code = collectUseDatabaseImpl(pDbFName, pCxt->pDbs);
if (TSDB_CODE_SUCCESS == code) {
@@ -226,7 +226,7 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo
tNameGetFullDbName(&name, dbFname);
int32_t code = TSDB_CODE_SUCCESS;
if (pParCxt->async) {
- code = getDBCfgFromCache(pCxt->pMetaCache, dbFname, pInfo);
+ code = getDbCfgFromCache(pCxt->pMetaCache, dbFname, pInfo);
} else {
code = collectUseDatabaseImpl(dbFname, pCxt->pDbs);
if (TSDB_CODE_SUCCESS == code) {
@@ -239,6 +239,27 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo
return code;
}
+static int32_t getUdfInfo(STranslateContext* pCxt, SFunctionNode* pFunc) {
+ SParseContext* pParCxt = pCxt->pParseCxt;
+ SFuncInfo funcInfo = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (pParCxt->async) {
+ code = getUdfInfoFromCache(pCxt->pMetaCache, pFunc->functionName, &funcInfo);
+ } else {
+ code = catalogGetUdfInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pFunc->functionName,
+ &funcInfo);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ pFunc->funcType = FUNCTION_TYPE_UDF;
+ pFunc->funcId = TSDB_FUNC_TYPE_AGGREGATE == funcInfo.funcType ? FUNC_AGGREGATE_UDF_ID : FUNC_SCALAR_UDF_ID;
+ pFunc->node.resType.type = funcInfo.outputType;
+ pFunc->node.resType.bytes = funcInfo.outputLen;
+ pFunc->udfBufSize = funcInfo.bufSize;
+ tFreeSFuncInfo(&funcInfo);
+ }
+ return code;
+}
+
static int32_t initTranslateContext(SParseContext* pParseCxt, SParseMetaCache* pMetaCache, STranslateContext* pCxt) {
pCxt->pParseCxt = pParseCxt;
pCxt->errCode = TSDB_CODE_SUCCESS;
@@ -873,12 +894,11 @@ static bool hasInvalidFuncNesting(SNodeList* pParameterList) {
}
static int32_t getFuncInfo(STranslateContext* pCxt, SFunctionNode* pFunc) {
- SFmGetFuncInfoParam param = {.pCtg = pCxt->pParseCxt->pCatalog,
- .pRpc = pCxt->pParseCxt->pTransporter,
- .pMgmtEps = &pCxt->pParseCxt->mgmtEpSet,
- .pErrBuf = pCxt->msgBuf.buf,
- .errBufLen = pCxt->msgBuf.len};
- return fmGetFuncInfo(¶m, pFunc);
+ int32_t code = fmGetFuncInfo(pFunc, pCxt->msgBuf.buf, pCxt->msgBuf.len);
+ if (TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION == code) {
+ code = getUdfInfo(pCxt, pFunc);
+ }
+ return code;
}
static int32_t translateAggFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
@@ -1212,7 +1232,6 @@ static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRea
int32_t code = TSDB_CODE_SUCCESS;
SArray* vgroupList = NULL;
if ('\0' != pRealTable->qualDbName[0]) {
- // todo release after mnode can be processed
if (0 != strcmp(pRealTable->qualDbName, TSDB_INFORMATION_SCHEMA_DB)) {
code = getDBVgInfo(pCxt, pRealTable->qualDbName, &vgroupList);
}
@@ -1220,7 +1239,6 @@ static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRea
code = getDBVgInfoImpl(pCxt, pName, &vgroupList);
}
- // todo release after mnode can be processed
if (TSDB_CODE_SUCCESS == code) {
code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &vgroupList);
}
diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c
index 9de43b8cd38d612a6bfd96685edc3f92bca23060..34b01991545cdfdea46203b6edc73098e273fd39 100644
--- a/source/libs/parser/src/parUtil.c
+++ b/source/libs/parser/src/parUtil.c
@@ -15,6 +15,9 @@
#include "parUtil.h"
#include "cJSON.h"
+#include "querynodes.h"
+
+#define USER_AUTH_KEY_MAX_LEN TSDB_USER_LEN + TSDB_DB_FNAME_LEN + 2
static char* getSyntaxErrFormat(int32_t errCode) {
switch (errCode) {
@@ -255,17 +258,8 @@ STableComInfo getTableInfo(const STableMeta* pTableMeta) {
return pTableMeta->tableInfo;
}
-static uint32_t getTableMetaSize(const STableMeta* pTableMeta) {
- int32_t totalCols = 0;
- if (pTableMeta->tableInfo.numOfColumns >= 0) {
- totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags;
- }
-
- return sizeof(STableMeta) + totalCols * sizeof(SSchema);
-}
-
STableMeta* tableMetaDup(const STableMeta* pTableMeta) {
- size_t size = getTableMetaSize(pTableMeta);
+ size_t size = TABLE_META_SIZE(pTableMeta);
STableMeta* p = taosMemoryMalloc(size);
memcpy(p, pTableMeta, size);
@@ -449,6 +443,26 @@ end:
return retCode;
}
+static int32_t userAuthToString(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, char* pStr) {
+ return sprintf(pStr, "%s*%d.%s*%d", pUser, acctId, pDb, type);
+}
+
+static int32_t userAuthToStringExt(const char* pUser, const char* pDbFName, AUTH_TYPE type, char* pStr) {
+ return sprintf(pStr, "%s*%s*%d", pUser, pDbFName, type);
+}
+
+static void stringToUserAuth(const char* pStr, int32_t len, SUserAuthInfo* pUserAuth) {
+ char* p1 = strchr(pStr, '*');
+ strncpy(pUserAuth->user, pStr, p1 - pStr);
+ ++p1;
+ char* p2 = strchr(p1, '*');
+ strncpy(pUserAuth->dbFName, p1, p2 - p1);
+ ++p2;
+ char buf[10] = {0};
+ strncpy(buf, p2, len - (p2 - pStr));
+ pUserAuth->type = taosStr2Int32(buf, NULL, 10);
+}
+
static int32_t buildTableReq(SHashObj* pTablesHash, SArray** pTables) {
if (NULL != pTablesHash) {
*pTables = taosArrayInit(taosHashGetSize(pTablesHash), sizeof(SName));
@@ -503,6 +517,44 @@ static int32_t buildTableVgroupReq(SHashObj* pTableVgroupHash, SArray** pTableVg
static int32_t buildDbCfgReq(SHashObj* pDbCfgHash, SArray** pDbCfg) { return buildDbReq(pDbCfgHash, pDbCfg); }
+static int32_t buildUserAuthReq(SHashObj* pUserAuthHash, SArray** pUserAuth) {
+ if (NULL != pUserAuthHash) {
+ *pUserAuth = taosArrayInit(taosHashGetSize(pUserAuthHash), sizeof(SUserAuthInfo));
+ if (NULL == *pUserAuth) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ void* p = taosHashIterate(pUserAuthHash, NULL);
+ while (NULL != p) {
+ size_t len = 0;
+ char* pKey = taosHashGetKey(p, &len);
+ SUserAuthInfo userAuth = {0};
+ stringToUserAuth(pKey, len, &userAuth);
+ taosArrayPush(*pUserAuth, &userAuth);
+ p = taosHashIterate(pUserAuthHash, p);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t buildUdfReq(SHashObj* pUdfHash, SArray** pUdf) {
+ if (NULL != pUdfHash) {
+ *pUdf = taosArrayInit(taosHashGetSize(pUdfHash), TSDB_FUNC_NAME_LEN);
+ if (NULL == *pUdf) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ void* p = taosHashIterate(pUdfHash, NULL);
+ while (NULL != p) {
+ size_t len = 0;
+ char* pFunc = taosHashGetKey(p, &len);
+ char func[TSDB_FUNC_NAME_LEN] = {0};
+ strncpy(func, pFunc, len);
+ taosArrayPush(*pUdf, func);
+ p = taosHashIterate(pUdfHash, p);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
int32_t code = buildTableMetaReq(pMetaCache->pTableMeta, &pCatalogReq->pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
@@ -512,7 +564,13 @@ int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalog
code = buildTableVgroupReq(pMetaCache->pTableVgroup, &pCatalogReq->pTableHash);
}
if (TSDB_CODE_SUCCESS == code) {
- code = buildDbCfgReq(pMetaCache->pDbVgroup, &pCatalogReq->pDbCfg);
+ code = buildDbCfgReq(pMetaCache->pDbCfg, &pCatalogReq->pDbCfg);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = buildUserAuthReq(pMetaCache->pUserAuth, &pCatalogReq->pUser);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = buildUdfReq(pMetaCache->pUdf, &pCatalogReq->pUdf);
}
return code;
}
@@ -568,6 +626,31 @@ static int32_t putDbCfgToCache(const SArray* pDbCfgReq, const SArray* pDbCfgData
return TSDB_CODE_SUCCESS;
}
+static int32_t putUserAuthToCache(const SArray* pUserAuthReq, const SArray* pUserAuthData, SHashObj* pUserAuth) {
+ int32_t nvgs = taosArrayGetSize(pUserAuthReq);
+ for (int32_t i = 0; i < nvgs; ++i) {
+ SUserAuthInfo* pUser = taosArrayGet(pUserAuthReq, i);
+ char key[USER_AUTH_KEY_MAX_LEN] = {0};
+ int32_t len = userAuthToStringExt(pUser->user, pUser->dbFName, pUser->type, key);
+ if (TSDB_CODE_SUCCESS != taosHashPut(pUserAuth, key, len, taosArrayGet(pUserAuthData, i), sizeof(bool))) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t putUdfToCache(const SArray* pUdfReq, const SArray* pUdfData, SHashObj* pUdf) {
+ int32_t num = taosArrayGetSize(pUdfReq);
+ for (int32_t i = 0; i < num; ++i) {
+ char* pFunc = taosArrayGet(pUdfReq, i);
+ SFuncInfo* pInfo = taosArrayGet(pUdfData, i);
+ if (TSDB_CODE_SUCCESS != taosHashPut(pUdf, pFunc, strlen(pFunc), &pInfo, POINTER_BYTES)) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
int32_t code = putTableMetaToCache(pCatalogReq->pTableMeta, pMetaData->pTableMeta, pMetaCache->pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
@@ -579,54 +662,161 @@ int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMet
if (TSDB_CODE_SUCCESS == code) {
code = putDbCfgToCache(pCatalogReq->pDbCfg, pMetaData->pDbCfg, pMetaCache->pDbCfg);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = putUserAuthToCache(pCatalogReq->pUser, pMetaData->pUser, pMetaCache->pUserAuth);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = putUdfToCache(pCatalogReq->pUdf, pMetaData->pUdfList, pMetaCache->pUdf);
+ }
return code;
}
-int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) {
- if (NULL == pMetaCache->pTableMeta) {
- pMetaCache->pTableMeta = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
- if (NULL == pMetaCache->pTableMeta) {
+static int32_t reserveTableReqInCache(int32_t acctId, const char* pDb, const char* pTable, SHashObj** pTables) {
+ if (NULL == *pTables) {
+ *pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ if (NULL == *pTables) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
char fullName[TSDB_TABLE_FNAME_LEN];
int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s.%s", acctId, pDb, pTable);
- return taosHashPut(pMetaCache->pTableMeta, fullName, len, &len, POINTER_BYTES);
+ return taosHashPut(*pTables, fullName, len, &pTables, POINTER_BYTES);
+}
+
+int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) {
+ return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableMeta);
}
int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta) {
char fullName[TSDB_TABLE_FNAME_LEN];
tNameExtractFullName(pName, fullName);
- *pMeta = taosHashGet(pMetaCache->pTableMeta, fullName, strlen(fullName));
- return NULL == *pMeta ? TSDB_CODE_PAR_INTERNAL_ERROR : TSDB_CODE_SUCCESS;
+ STableMeta** pRes = taosHashGet(pMetaCache->pTableMeta, fullName, strlen(fullName));
+ if (NULL == pRes || NULL == *pRes) {
+ return TSDB_CODE_PAR_INTERNAL_ERROR;
+ }
+ *pMeta = tableMetaDup(*pRes);
+ if (NULL == *pMeta) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t reserveDbReqInCache(int32_t acctId, const char* pDb, SHashObj** pDbs) {
+ if (NULL == *pDbs) {
+ *pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ if (NULL == *pDbs) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ char fullName[TSDB_TABLE_FNAME_LEN];
+ int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s", acctId, pDb);
+ return taosHashPut(*pDbs, fullName, len, &pDbs, POINTER_BYTES);
+}
+
+int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) {
+ return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbVgroup);
+}
+
+int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo) {
+ SArray** pRes = taosHashGet(pMetaCache->pDbVgroup, pDbFName, strlen(pDbFName));
+ if (NULL == pRes) {
+ return TSDB_CODE_PAR_INTERNAL_ERROR;
+ }
+ // *pRes is null, which is a legal value, indicating that the user DB has not been created
+ if (NULL != *pRes) {
+ *pVgInfo = taosArrayDup(*pRes);
+ if (NULL == *pVgInfo) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
}
-int32_t getDBVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo) {
- *pVgInfo = taosHashGet(pMetaCache->pDbVgroup, pDbFName, strlen(pDbFName));
- return NULL == *pVgInfo ? TSDB_CODE_PAR_INTERNAL_ERROR : TSDB_CODE_SUCCESS;
+int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) {
+ return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableVgroup);
}
-int32_t getTableHashVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) {
+int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) {
char fullName[TSDB_TABLE_FNAME_LEN];
tNameExtractFullName(pName, fullName);
- SVgroupInfo* pInfo = taosHashGet(pMetaCache->pTableVgroup, fullName, strlen(fullName));
- if (NULL == pInfo) {
+ SVgroupInfo** pRes = taosHashGet(pMetaCache->pTableVgroup, fullName, strlen(fullName));
+ if (NULL == pRes || NULL == *pRes) {
return TSDB_CODE_PAR_INTERNAL_ERROR;
}
- memcpy(pVgroup, pInfo, sizeof(SVgroupInfo));
+ memcpy(pVgroup, *pRes, sizeof(SVgroupInfo));
return TSDB_CODE_SUCCESS;
}
-int32_t getDBVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
+int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) {
+ return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbCfg);
+}
+
+int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
int32_t* pTableNum) {
- return TSDB_CODE_PAR_INTERNAL_ERROR;
+ SDbInfo** pRes = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName));
+ if (NULL == pRes || NULL == *pRes) {
+ return TSDB_CODE_PAR_INTERNAL_ERROR;
+ }
+ *pVersion = (*pRes)->vgVer;
+ *pDbId = (*pRes)->dbId;
+ *pTableNum = (*pRes)->tbNum;
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) {
+ return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbCfg);
+}
+
+int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo) {
+ SDbCfgInfo** pRes = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName));
+ if (NULL == pRes || NULL == *pRes) {
+ return TSDB_CODE_PAR_INTERNAL_ERROR;
+ }
+ memcpy(pInfo, *pRes, sizeof(SDbCfgInfo));
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type,
+ SParseMetaCache* pMetaCache) {
+ if (NULL == pMetaCache->pUserAuth) {
+ pMetaCache->pUserAuth = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ if (NULL == pMetaCache->pUserAuth) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ char key[USER_AUTH_KEY_MAX_LEN] = {0};
+ int32_t len = userAuthToString(acctId, pUser, pDb, type, key);
+ bool pass = false;
+ return taosHashPut(pMetaCache->pUserAuth, key, len, &pass, sizeof(pass));
+}
+
+int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type,
+ bool* pPass) {
+ char key[USER_AUTH_KEY_MAX_LEN] = {0};
+ int32_t len = userAuthToStringExt(pUser, pDbFName, type, key);
+ bool* pRes = taosHashGet(pMetaCache->pUserAuth, key, len);
+ if (NULL == pRes) {
+ return TSDB_CODE_PAR_INTERNAL_ERROR;
+ }
+ *pPass = *pRes;
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache) {
+ if (NULL == pMetaCache->pUdf) {
+ pMetaCache->pUdf = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ if (NULL == pMetaCache->pUdf) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return taosHashPut(pMetaCache->pUdf, pFunc, strlen(pFunc), &pMetaCache, POINTER_BYTES);
}
-int32_t getDBCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo) {
- SDbCfgInfo* pDbCfg = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName));
- if (NULL == pDbCfg) {
+int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo) {
+ SFuncInfo** pRes = taosHashGet(pMetaCache->pUdf, pFunc, strlen(pFunc));
+ if (NULL == pRes || NULL == *pRes) {
return TSDB_CODE_PAR_INTERNAL_ERROR;
}
- memcpy(pInfo, pDbCfg, sizeof(SDbCfgInfo));
+ memcpy(pInfo, *pRes, sizeof(SFuncInfo));
return TSDB_CODE_SUCCESS;
}
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index a9962b044491fc20dd55237f66026576b36ef49f..54aa9c642cfb9cf4c580addf374f3a91907dd56a 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -59,6 +59,14 @@ static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) {
return code;
}
+static int32_t syntaxParseSql(SParseContext* pCxt, SQuery** pQuery) {
+ int32_t code = parse(pCxt, pQuery);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = collectMetaKey(pCxt, *pQuery);
+ }
+ return code;
+}
+
static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
if (pParam->is_null && 1 == *(pParam->is_null)) {
pVal->node.resType.type = TSDB_DATA_TYPE_NULL;
@@ -98,6 +106,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
}
varDataSetLen(pVal->datum.p, pVal->node.resType.bytes);
strncpy(varDataVal(pVal->datum.p), (const char*)pParam->buffer, pVal->node.resType.bytes);
+ pVal->node.resType.bytes += VARSTR_HEADER_SIZE;
break;
case TSDB_DATA_TYPE_NCHAR: {
pVal->node.resType.bytes *= TSDB_NCHAR_SIZE;
@@ -112,7 +121,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
return errno;
}
varDataSetLen(pVal->datum.p, output);
- pVal->node.resType.bytes = output;
+ pVal->node.resType.bytes = output + VARSTR_HEADER_SIZE;
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
@@ -188,7 +197,7 @@ int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq
if (qIsInsertSql(pCxt->pSql, pCxt->sqlLen)) {
// todo insert sql
} else {
- code = parse(pCxt, pQuery);
+ code = syntaxParseSql(pCxt, pQuery);
}
if (TSDB_CODE_SUCCESS == code) {
code = buildCatalogReq((*pQuery)->pMetaCache, pCatalogReq);
diff --git a/source/libs/parser/test/CMakeLists.txt b/source/libs/parser/test/CMakeLists.txt
index 0e8adb978dd0e9fca5a67e9999ce7c5faa877cc0..c252653e9ee743ca8c9e899f6851e1893fb766be 100644
--- a/source/libs/parser/test/CMakeLists.txt
+++ b/source/libs/parser/test/CMakeLists.txt
@@ -26,7 +26,9 @@ if(${BUILD_WINGETOPT})
target_link_libraries(parserTest PUBLIC wingetopt)
endif()
-add_test(
- NAME parserTest
- COMMAND parserTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME parserTest
+ COMMAND parserTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp
index 19460fb87a914519e8501c5f1381df16a419dade..154f13ea686aa172d9c2ad53bfadcae893305ed0 100644
--- a/source/libs/parser/test/mockCatalog.cpp
+++ b/source/libs/parser/test/mockCatalog.cpp
@@ -103,7 +103,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) {
}
{
ITableBuilder& builder = mcs->createTableBuilder("performance_schema", "streams", TSDB_SYSTEM_TABLE, 1)
- .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
+ .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
}
@@ -157,6 +157,12 @@ void generateTestST1(MockCatalogService* mcs) {
mcs->createSubTable("test", "st1", "st1s3", 1);
}
+void generateFunctions(MockCatalogService* mcs) {
+ mcs->createFunction("udf1", TSDB_FUNC_TYPE_SCALAR, TSDB_DATA_TYPE_INT, tDataTypes[TSDB_DATA_TYPE_INT].bytes, 0);
+ mcs->createFunction("udf2", TSDB_FUNC_TYPE_AGGREGATE, TSDB_DATA_TYPE_DOUBLE, tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes,
+ 8);
+}
+
} // namespace
int32_t __catalogGetHandle(const char* clusterId, struct SCatalog** catalogHandle) { return 0; }
@@ -196,6 +202,11 @@ int32_t __catalogChkAuth(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, con
return 0;
}
+int32_t __catalogGetUdfInfo(SCatalog* pCtg, void* pTrans, const SEpSet* pMgmtEps, const char* funcName,
+ SFuncInfo* pInfo) {
+ return g_mockCatalogService->catalogGetUdfInfo(funcName, pInfo);
+}
+
void initMetaDataEnv() {
g_mockCatalogService.reset(new MockCatalogService());
@@ -209,6 +220,7 @@ void initMetaDataEnv() {
stub.set(catalogGetDBVgInfo, __catalogGetDBVgInfo);
stub.set(catalogGetDBCfg, __catalogGetDBCfg);
stub.set(catalogChkAuth, __catalogChkAuth);
+ stub.set(catalogGetUdfInfo, __catalogGetUdfInfo);
// {
// AddrAny any("libcatalog.so");
// std::map result;
@@ -256,6 +268,7 @@ void generateMetaData() {
generatePerformanceSchema(g_mockCatalogService.get());
generateTestT1(g_mockCatalogService.get());
generateTestST1(g_mockCatalogService.get());
+ generateFunctions(g_mockCatalogService.get());
g_mockCatalogService->showTables();
}
diff --git a/source/libs/parser/test/mockCatalogService.cpp b/source/libs/parser/test/mockCatalogService.cpp
index 9e9e5cd2af16fa9569765cec395eb2d9f6e9b11c..aefbb3967ff74d9ee9eb7aa488f44a0d31dae25b 100644
--- a/source/libs/parser/test/mockCatalogService.cpp
+++ b/source/libs/parser/test/mockCatalogService.cpp
@@ -120,11 +120,35 @@ class MockCatalogServiceImpl {
return copyTableVgroup(db, tNameGetTableName(pTableName), vgList);
}
+ int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const {
+ auto it = udf_.find(funcName);
+ if (udf_.end() == it) {
+ return TSDB_CODE_FAILED;
+ }
+ memcpy(pInfo, it->second.get(), sizeof(SFuncInfo));
+ return TSDB_CODE_SUCCESS;
+ }
+
int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const {
int32_t code = getAllTableMeta(pCatalogReq->pTableMeta, &pMetaData->pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
code = getAllTableVgroup(pCatalogReq->pTableHash, &pMetaData->pTableHash);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = getAllDbVgroup(pCatalogReq->pDbVgroup, &pMetaData->pDbVgroup);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = getAllDbCfg(pCatalogReq->pDbCfg, &pMetaData->pDbCfg);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = getAllDbInfo(pCatalogReq->pDbInfo, &pMetaData->pDbInfo);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = getAllUserAuth(pCatalogReq->pUser, &pMetaData->pUser);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = getAllUdf(pCatalogReq->pUdf, &pMetaData->pUdfList);
+ }
return code;
}
@@ -163,9 +187,8 @@ class MockCatalogServiceImpl {
// number of backward fills
#define NOB(n) ((n) % 2 ? (n) / 2 + 1 : (n) / 2)
// center aligned
-#define CA(n, s) \
- std::setw(NOF((n) - (s).length())) << "" << (s) << std::setw(NOB((n) - (s).length())) << "" \
- << "|"
+#define CA(n, s) std::setw(NOF((n) - int((s).length()))) << "" << (s) \
+ << std::setw(NOB((n) - int((s).length()))) << "" << "|"
// string field length
#define SFL 20
// string field header
@@ -211,21 +234,21 @@ class MockCatalogServiceImpl {
}
}
- std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const {
- DbMetaCache::const_iterator it = meta_.find(db);
- if (meta_.end() == it) {
- return std::shared_ptr();
- }
- TableMetaCache::const_iterator tit = it->second.find(tbname);
- if (it->second.end() == tit) {
- return std::shared_ptr();
- }
- return tit->second;
+ void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize) {
+ std::shared_ptr info(new SFuncInfo);
+ strcpy(info->name, func.c_str());
+ info->funcType = funcType;
+ info->scriptType = TSDB_FUNC_SCRIPT_BIN_LIB;
+ info->outputType = outputType;
+ info->outputLen = outputLen;
+ info->bufSize = bufSize;
+ udf_.insert(std::make_pair(func, info));
}
private:
typedef std::map> TableMetaCache;
typedef std::map DbMetaCache;
+ typedef std::map> UdfMetaCache;
std::string toDbname(const std::string& dbFullName) const {
std::string::size_type n = dbFullName.find(".");
@@ -308,6 +331,18 @@ class MockCatalogServiceImpl {
return TSDB_CODE_SUCCESS;
}
+ std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const {
+ DbMetaCache::const_iterator it = meta_.find(db);
+ if (meta_.end() == it) {
+ return std::shared_ptr();
+ }
+ TableMetaCache::const_iterator tit = it->second.find(tbname);
+ if (it->second.end() == tit) {
+ return std::shared_ptr();
+ }
+ return tit->second;
+ }
+
int32_t getAllTableMeta(SArray* pTableMetaReq, SArray** pTableMetaData) const {
int32_t code = TSDB_CODE_SUCCESS;
if (NULL != pTableMetaReq) {
@@ -330,12 +365,82 @@ class MockCatalogServiceImpl {
int32_t code = TSDB_CODE_SUCCESS;
if (NULL != pTableVgroupReq) {
int32_t ntables = taosArrayGetSize(pTableVgroupReq);
- *pTableVgroupData = taosArrayInit(ntables, POINTER_BYTES);
+ *pTableVgroupData = taosArrayInit(ntables, sizeof(SVgroupInfo));
for (int32_t i = 0; i < ntables; ++i) {
- SVgroupInfo* pVgInfo = (SVgroupInfo*)taosMemoryCalloc(1, sizeof(SVgroupInfo));
- code = catalogGetTableHashVgroup((const SName*)taosArrayGet(pTableVgroupReq, i), pVgInfo);
+ SVgroupInfo vgInfo = {0};
+ code = catalogGetTableHashVgroup((const SName*)taosArrayGet(pTableVgroupReq, i), &vgInfo);
if (TSDB_CODE_SUCCESS == code) {
- taosArrayPush(*pTableVgroupData, &pVgInfo);
+ taosArrayPush(*pTableVgroupData, &vgInfo);
+ } else {
+ break;
+ }
+ }
+ }
+ return code;
+ }
+
+ int32_t getAllDbVgroup(SArray* pDbVgroupReq, SArray** pDbVgroupData) const {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (NULL != pDbVgroupReq) {
+ int32_t ndbs = taosArrayGetSize(pDbVgroupReq);
+ *pDbVgroupData = taosArrayInit(ndbs, POINTER_BYTES);
+ for (int32_t i = 0; i < ndbs; ++i) {
+ int64_t zeroVg = 0;
+ taosArrayPush(*pDbVgroupData, &zeroVg);
+ }
+ }
+ return code;
+ }
+
+ int32_t getAllDbCfg(SArray* pDbCfgReq, SArray** pDbCfgData) const {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (NULL != pDbCfgReq) {
+ int32_t ndbs = taosArrayGetSize(pDbCfgReq);
+ *pDbCfgData = taosArrayInit(ndbs, sizeof(SDbCfgInfo));
+ for (int32_t i = 0; i < ndbs; ++i) {
+ SDbCfgInfo dbCfg = {0};
+ taosArrayPush(*pDbCfgData, &dbCfg);
+ }
+ }
+ return code;
+ }
+
+ int32_t getAllDbInfo(SArray* pDbInfoReq, SArray** pDbInfoData) const {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (NULL != pDbInfoReq) {
+ int32_t ndbs = taosArrayGetSize(pDbInfoReq);
+ *pDbInfoData = taosArrayInit(ndbs, sizeof(SDbCfgInfo));
+ for (int32_t i = 0; i < ndbs; ++i) {
+ SDbInfo dbInfo = {0};
+ taosArrayPush(*pDbInfoData, &dbInfo);
+ }
+ }
+ return code;
+ }
+
+ int32_t getAllUserAuth(SArray* pUserAuthReq, SArray** pUserAuthData) const {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (NULL != pUserAuthReq) {
+ int32_t num = taosArrayGetSize(pUserAuthReq);
+ *pUserAuthData = taosArrayInit(num, sizeof(bool));
+ for (int32_t i = 0; i < num; ++i) {
+ bool pass = true;
+ taosArrayPush(*pUserAuthData, &pass);
+ }
+ }
+ return code;
+ }
+
+ int32_t getAllUdf(SArray* pUdfReq, SArray** pUdfData) const {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (NULL != pUdfReq) {
+ int32_t num = taosArrayGetSize(pUdfReq);
+ *pUdfData = taosArrayInit(num, sizeof(SFuncInfo));
+ for (int32_t i = 0; i < num; ++i) {
+ SFuncInfo info = {0};
+ code = catalogGetUdfInfo((char*)taosArrayGet(pUdfReq, i), &info);
+ if (TSDB_CODE_SUCCESS == code) {
+ taosArrayPush(*pUdfData, &info);
} else {
break;
}
@@ -347,6 +452,7 @@ class MockCatalogServiceImpl {
uint64_t id_;
std::unique_ptr builder_;
DbMetaCache meta_;
+ UdfMetaCache udf_;
};
MockCatalogService::MockCatalogService() : impl_(new MockCatalogServiceImpl()) {}
@@ -365,9 +471,9 @@ void MockCatalogService::createSubTable(const std::string& db, const std::string
void MockCatalogService::showTables() const { impl_->showTables(); }
-std::shared_ptr MockCatalogService::getTableMeta(const std::string& db,
- const std::string& tbname) const {
- return impl_->getTableMeta(db, tbname);
+void MockCatalogService::createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen,
+ int32_t bufSize) {
+ impl_->createFunction(func, funcType, outputType, outputLen, bufSize);
}
int32_t MockCatalogService::catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const {
@@ -382,6 +488,10 @@ int32_t MockCatalogService::catalogGetTableDistVgInfo(const SName* pTableName, S
return impl_->catalogGetTableDistVgInfo(pTableName, pVgList);
}
+int32_t MockCatalogService::catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const {
+ return impl_->catalogGetUdfInfo(funcName, pInfo);
+}
+
int32_t MockCatalogService::catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const {
return impl_->catalogGetAllMeta(pCatalogReq, pMetaData);
}
diff --git a/source/libs/parser/test/mockCatalogService.h b/source/libs/parser/test/mockCatalogService.h
index bfc35247fec3335f7c6090ca811a4d13637d4cc7..cb0f10e95bfcb05ce46ea0eb423d9753477db422 100644
--- a/source/libs/parser/test/mockCatalogService.h
+++ b/source/libs/parser/test/mockCatalogService.h
@@ -56,11 +56,12 @@ class MockCatalogService {
int32_t numOfColumns, int32_t numOfTags = 0);
void createSubTable(const std::string& db, const std::string& stbname, const std::string& tbname, int16_t vgid);
void showTables() const;
- std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const;
+ void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize);
int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const;
int32_t catalogGetTableHashVgroup(const SName* pTableName, SVgroupInfo* vgInfo) const;
int32_t catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const;
+ int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const;
int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const;
private:
diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp
index a5e7ef51a797a01ff404dc01275ded61534fde33..e55f36376cbce26f1954211fe7308070a0a192bd 100644
--- a/source/libs/parser/test/parInitialCTest.cpp
+++ b/source/libs/parser/test/parInitialCTest.cpp
@@ -228,7 +228,44 @@ TEST_F(ParserInitialCTest, createDnode) {
run("CREATE DNODE 1.1.1.1 PORT 9000");
}
-// todo CREATE FUNCTION
+// CREATE [AGGREGATE] FUNCTION [IF NOT EXISTS] func_name AS library_path OUTPUTTYPE type_name [BUFSIZE value]
+TEST_F(ParserInitialCTest, createFunction) {
+ useDb("root", "test");
+
+ SCreateFuncReq expect = {0};
+
+ auto setCreateFuncReqFunc = [&](const char* pUdfName, int8_t outputType, int32_t outputBytes = 0,
+ int8_t funcType = TSDB_FUNC_TYPE_SCALAR, int8_t igExists = 0, int32_t bufSize = 0) {
+ memset(&expect, 0, sizeof(SCreateFuncReq));
+ strcpy(expect.name, pUdfName);
+ expect.igExists = igExists;
+ expect.funcType = funcType;
+ expect.scriptType = TSDB_FUNC_SCRIPT_BIN_LIB;
+ expect.outputType = outputType;
+ expect.outputLen = outputBytes > 0 ? outputBytes : tDataTypes[outputType].bytes;
+ expect.bufSize = bufSize;
+ };
+
+ setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
+ ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_FUNCTION_STMT);
+ SCreateFuncReq req = {0};
+ ASSERT_TRUE(TSDB_CODE_SUCCESS == tDeserializeSCreateFuncReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
+
+ ASSERT_EQ(std::string(req.name), std::string(expect.name));
+ ASSERT_EQ(req.igExists, expect.igExists);
+ ASSERT_EQ(req.funcType, expect.funcType);
+ ASSERT_EQ(req.scriptType, expect.scriptType);
+ ASSERT_EQ(req.outputType, expect.outputType);
+ ASSERT_EQ(req.outputLen, expect.outputLen);
+ ASSERT_EQ(req.bufSize, expect.bufSize);
+ });
+
+ setCreateFuncReqFunc("udf1", TSDB_DATA_TYPE_INT);
+ // run("CREATE FUNCTION udf1 AS './build/lib/libudf1.so' OUTPUTTYPE INT");
+
+ setCreateFuncReqFunc("udf2", TSDB_DATA_TYPE_DOUBLE, 0, TSDB_FUNC_TYPE_AGGREGATE, 1, 8);
+ // run("CREATE AGGREGATE FUNCTION IF NOT EXISTS udf2 AS './build/lib/libudf2.so' OUTPUTTYPE DOUBLE BUFSIZE 8");
+}
TEST_F(ParserInitialCTest, createIndexSma) {
useDb("root", "test");
diff --git a/source/libs/parser/test/parInitialDTest.cpp b/source/libs/parser/test/parInitialDTest.cpp
index 7cf3337fea3c29afcd0eaac8d6bd160c5ec9aacd..57d349e7eeecd33fd9855f5a0d8df22548c5ceee 100644
--- a/source/libs/parser/test/parInitialDTest.cpp
+++ b/source/libs/parser/test/parInitialDTest.cpp
@@ -103,6 +103,7 @@ TEST_F(ParserInitialDTest, dropTopic) {
}
TEST_F(ParserInitialDTest, dropUser) {
+ login("root");
useDb("root", "test");
run("drop user wxy");
diff --git a/source/libs/parser/test/parSelectTest.cpp b/source/libs/parser/test/parSelectTest.cpp
index f00500faa4963f4efef561bce103658585a029a6..2d4fe41d4fed38bb6f97fcb37c6972aa8c7d65fc 100644
--- a/source/libs/parser/test/parSelectTest.cpp
+++ b/source/libs/parser/test/parSelectTest.cpp
@@ -141,6 +141,14 @@ TEST_F(ParserSelectTest, IndefiniteRowsFuncSemanticCheck) {
// run("SELECT DIFF(c1) FROM t1 INTERVAL(10s)");
}
+TEST_F(ParserSelectTest, useDefinedFunc) {
+ useDb("root", "test");
+
+ run("SELECT udf1(c1) FROM t1");
+
+ run("SELECT udf2(c1) FROM t1 GROUP BY c2");
+}
+
TEST_F(ParserSelectTest, groupBy) {
useDb("root", "test");
diff --git a/source/libs/parser/test/parTestMain.cpp b/source/libs/parser/test/parTestMain.cpp
index aadf8e7c66325f20da60e0eb9d25fb8f23042293..820b8cca3cdc02633982a3ea797aa605db1e3fd3 100644
--- a/source/libs/parser/test/parTestMain.cpp
+++ b/source/libs/parser/test/parTestMain.cpp
@@ -37,6 +37,7 @@ class ParserEnv : public testing::Environment {
virtual void SetUp() {
initMetaDataEnv();
generateMetaData();
+ initLog(TD_TMP_DIR_PATH "td");
}
virtual void TearDown() {
@@ -47,20 +48,55 @@ class ParserEnv : public testing::Environment {
ParserEnv() {}
virtual ~ParserEnv() {}
+
+ private:
+ void initLog(const char* path) {
+ int32_t logLevel = getLogLevel();
+ dDebugFlag = logLevel;
+ vDebugFlag = logLevel;
+ mDebugFlag = logLevel;
+ cDebugFlag = logLevel;
+ jniDebugFlag = logLevel;
+ tmrDebugFlag = logLevel;
+ uDebugFlag = logLevel;
+ rpcDebugFlag = logLevel;
+ qDebugFlag = logLevel;
+ wDebugFlag = logLevel;
+ sDebugFlag = logLevel;
+ tsdbDebugFlag = logLevel;
+ tsLogEmbedded = 1;
+ tsAsyncLog = 0;
+
+ taosRemoveDir(path);
+ taosMkDir(path);
+ tstrncpy(tsLogDir, path, PATH_MAX);
+ if (taosInitLog("taoslog", 1) != 0) {
+ std::cout << "failed to init log file" << std::endl;
+ }
+ }
};
static void parseArg(int argc, char* argv[]) {
- int opt = 0;
- const char* optstring = "";
+ int opt = 0;
+ const char* optstring = "";
+ // clang-format off
static struct option long_options[] = {
- {"dump", no_argument, NULL, 'd'}, {"async", no_argument, NULL, 'a'}, {0, 0, 0, 0}};
+ {"dump", no_argument, NULL, 'd'},
+ {"async", required_argument, NULL, 'a'},
+ {"skipSql", required_argument, NULL, 's'},
+ {0, 0, 0, 0}
+ };
+ // clang-format on
while ((opt = getopt_long(argc, argv, optstring, long_options, NULL)) != -1) {
switch (opt) {
case 'd':
g_dump = true;
break;
case 'a':
- g_testAsyncApis = true;
+ setAsyncFlag(optarg);
+ break;
+ case 's':
+ setSkipSqlNum(optarg);
break;
default:
break;
diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp
index 8b15cc8a23af4eea91a1d3090442e621cc5f9a0b..fab7ed35b1cb408a5cdd6f455994da07a26596fd 100644
--- a/source/libs/parser/test/parTestUtil.cpp
+++ b/source/libs/parser/test/parTestUtil.cpp
@@ -44,23 +44,40 @@ namespace ParserTest {
} \
} while (0);
-bool g_dump = false;
-bool g_testAsyncApis = false;
+bool g_dump = false;
+bool g_testAsyncApis = true;
+int32_t g_logLevel = 131;
+int32_t g_skipSql = 0;
+
+void setAsyncFlag(const char* pFlag) { g_testAsyncApis = stoi(pFlag) > 0 ? true : false; }
+void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); }
struct TerminateFlag : public exception {
const char* what() const throw() { return "success and terminate"; }
};
+void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); }
+
+int32_t getLogLevel() { return g_logLevel; }
+
class ParserTestBaseImpl {
public:
ParserTestBaseImpl(ParserTestBase* pBase) : pBase_(pBase) {}
+ void login(const std::string& user) { caseEnv_.user_ = user; }
+
void useDb(const string& acctId, const string& db) {
caseEnv_.acctId_ = acctId;
caseEnv_.db_ = db;
+ caseEnv_.nsql_ = g_skipSql;
}
void run(const string& sql, int32_t expect, ParserStage checkStage) {
+ if (caseEnv_.nsql_ > 0) {
+ --(caseEnv_.nsql_);
+ return;
+ }
+
reset(expect, checkStage);
try {
SParseContext cxt = {0};
@@ -69,6 +86,8 @@ class ParserTestBaseImpl {
SQuery* pQuery = nullptr;
doParse(&cxt, &pQuery);
+ doAuthenticate(&cxt, pQuery);
+
doTranslate(&cxt, pQuery);
doCalculateConstant(&cxt, pQuery);
@@ -89,59 +108,14 @@ class ParserTestBaseImpl {
}
}
- void runAsync(const string& sql, int32_t expect, ParserStage checkStage) {
- reset(expect, checkStage);
- try {
- SParseContext cxt = {0};
- setParseContext(sql, &cxt, true);
-
- SQuery* pQuery = nullptr;
- doParse(&cxt, &pQuery);
-
- SCatalogReq catalogReq = {0};
- doBuildCatalogReq(pQuery->pMetaCache, &catalogReq);
-
- string err;
- thread t1([&]() {
- try {
- SMetaData metaData = {0};
- doGetAllMeta(&catalogReq, &metaData);
-
- doPutMetaDataToCache(&catalogReq, &metaData, pQuery->pMetaCache);
-
- doTranslate(&cxt, pQuery);
-
- doCalculateConstant(&cxt, pQuery);
- } catch (const TerminateFlag& e) {
- // success and terminate
- } catch (const runtime_error& e) {
- err = e.what();
- } catch (...) {
- err = "unknown error";
- }
- });
-
- t1.join();
- if (!err.empty()) {
- throw runtime_error(err);
- }
-
- if (g_dump) {
- dump();
- }
- } catch (const TerminateFlag& e) {
- // success and terminate
- return;
- } catch (...) {
- dump();
- throw;
- }
- }
-
private:
struct caseEnv {
- string acctId_;
- string db_;
+ string acctId_;
+ string user_;
+ string db_;
+ int32_t nsql_;
+
+ caseEnv() : user_("wangxiaoyu"), nsql_(0) {}
};
struct stmtEnv {
@@ -207,6 +181,8 @@ class ParserTestBaseImpl {
pCxt->acctId = atoi(caseEnv_.acctId_.c_str());
pCxt->db = caseEnv_.db_.c_str();
+ pCxt->pUser = caseEnv_.user_.c_str();
+ pCxt->isSuperUser = caseEnv_.user_ == "root";
pCxt->pSql = stmtEnv_.sql_.c_str();
pCxt->sqlLen = stmtEnv_.sql_.length();
pCxt->pMsg = stmtEnv_.msgBuf_.data();
@@ -220,6 +196,11 @@ class ParserTestBaseImpl {
res_.parsedAst_ = toString((*pQuery)->pRoot);
}
+ void doCollectMetaKey(SParseContext* pCxt, SQuery* pQuery) {
+ DO_WITH_THROW(collectMetaKey, pCxt, pQuery);
+ ASSERT_NE(pQuery->pMetaCache, nullptr);
+ }
+
void doBuildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
DO_WITH_THROW(buildCatalogReq, pMetaCache, pCatalogReq);
}
@@ -232,6 +213,8 @@ class ParserTestBaseImpl {
DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache);
}
+ void doAuthenticate(SParseContext* pCxt, SQuery* pQuery) { DO_WITH_THROW(authenticate, pCxt, pQuery); }
+
void doTranslate(SParseContext* pCxt, SQuery* pQuery) {
DO_WITH_THROW(translate, pCxt, pQuery);
checkQuery(pQuery, PARSER_STAGE_TRANSLATE);
@@ -254,6 +237,59 @@ class ParserTestBaseImpl {
void checkQuery(const SQuery* pQuery, ParserStage stage) { pBase_->checkDdl(pQuery, stage); }
+ void runAsync(const string& sql, int32_t expect, ParserStage checkStage) {
+ reset(expect, checkStage);
+ try {
+ SParseContext cxt = {0};
+ setParseContext(sql, &cxt, true);
+
+ SQuery* pQuery = nullptr;
+ doParse(&cxt, &pQuery);
+
+ doCollectMetaKey(&cxt, pQuery);
+
+ SCatalogReq catalogReq = {0};
+ doBuildCatalogReq(pQuery->pMetaCache, &catalogReq);
+
+ string err;
+ thread t1([&]() {
+ try {
+ SMetaData metaData = {0};
+ doGetAllMeta(&catalogReq, &metaData);
+
+ doPutMetaDataToCache(&catalogReq, &metaData, pQuery->pMetaCache);
+
+ doAuthenticate(&cxt, pQuery);
+
+ doTranslate(&cxt, pQuery);
+
+ doCalculateConstant(&cxt, pQuery);
+ } catch (const TerminateFlag& e) {
+ // success and terminate
+ } catch (const runtime_error& e) {
+ err = e.what();
+ } catch (...) {
+ err = "unknown error";
+ }
+ });
+
+ t1.join();
+ if (!err.empty()) {
+ throw runtime_error(err);
+ }
+
+ if (g_dump) {
+ dump();
+ }
+ } catch (const TerminateFlag& e) {
+ // success and terminate
+ return;
+ } catch (...) {
+ dump();
+ throw;
+ }
+ }
+
caseEnv caseEnv_;
stmtEnv stmtEnv_;
stmtRes res_;
@@ -264,16 +300,14 @@ ParserTestBase::ParserTestBase() : impl_(new ParserTestBaseImpl(this)) {}
ParserTestBase::~ParserTestBase() {}
+void ParserTestBase::login(const std::string& user) { return impl_->login(user); }
+
void ParserTestBase::useDb(const std::string& acctId, const std::string& db) { impl_->useDb(acctId, db); }
void ParserTestBase::run(const std::string& sql, int32_t expect, ParserStage checkStage) {
return impl_->run(sql, expect, checkStage);
}
-void ParserTestBase::runAsync(const std::string& sql, int32_t expect, ParserStage checkStage) {
- return impl_->runAsync(sql, expect, checkStage);
-}
-
void ParserTestBase::checkDdl(const SQuery* pQuery, ParserStage stage) { return; }
} // namespace ParserTest
diff --git a/source/libs/parser/test/parTestUtil.h b/source/libs/parser/test/parTestUtil.h
index 43feb3d5f19a120120d8f194faede0a0d92e6822..44be7a24746ecde078f69555c88e4d85344b8313 100644
--- a/source/libs/parser/test/parTestUtil.h
+++ b/source/libs/parser/test/parTestUtil.h
@@ -34,9 +34,9 @@ class ParserTestBase : public testing::Test {
ParserTestBase();
virtual ~ParserTestBase();
+ void login(const std::string& user);
void useDb(const std::string& acctId, const std::string& db);
void run(const std::string& sql, int32_t expect = TSDB_CODE_SUCCESS, ParserStage checkStage = PARSER_STAGE_ALL);
- void runAsync(const std::string& sql, int32_t expect = TSDB_CODE_SUCCESS, ParserStage checkStage = PARSER_STAGE_ALL);
virtual void checkDdl(const SQuery* pQuery, ParserStage stage);
@@ -65,7 +65,11 @@ class ParserDdlTest : public ParserTestBase {
};
extern bool g_dump;
-extern bool g_testAsyncApis;
+
+extern void setAsyncFlag(const char* pFlag);
+extern void setLogLevel(const char* pLogLevel);
+extern int32_t getLogLevel();
+extern void setSkipSqlNum(const char* pNum);
} // namespace ParserTest
diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c
index 4d489f68e7c4ff042e6f0d0c82bbd98a6dbbfb2b..adc07fcd0d6b6a0c6f98fdf5032151dab3ae71f3 100644
--- a/source/libs/planner/src/planOptimizer.c
+++ b/source/libs/planner/src/planOptimizer.c
@@ -223,6 +223,9 @@ static void setScanWindowInfo(SScanLogicNode* pScan) {
pScan->sliding = ((SWindowLogicNode*)pScan->node.pParent)->sliding;
pScan->intervalUnit = ((SWindowLogicNode*)pScan->node.pParent)->intervalUnit;
pScan->slidingUnit = ((SWindowLogicNode*)pScan->node.pParent)->slidingUnit;
+ pScan->triggerType = ((SWindowLogicNode*)pScan->node.pParent)->triggerType;
+ pScan->watermark = ((SWindowLogicNode*)pScan->node.pParent)->watermark;
+ pScan->tsColId = ((SColumnNode*)((SWindowLogicNode*)pScan->node.pParent)->pTspk)->colId;
}
}
diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c
index 0f88a54e913c57c1fdc848317d7b8a85a4ac0e88..a45eabefb9f1f1f7fe9c97a3f8c7cf16385d2fc3 100644
--- a/source/libs/planner/src/planPhysiCreater.c
+++ b/source/libs/planner/src/planPhysiCreater.c
@@ -503,6 +503,9 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp
pTableScan->sliding = pScanLogicNode->sliding;
pTableScan->intervalUnit = pScanLogicNode->intervalUnit;
pTableScan->slidingUnit = pScanLogicNode->slidingUnit;
+ pTableScan->triggerType = pScanLogicNode->triggerType;
+ pTableScan->watermark = pScanLogicNode->watermark;
+ pTableScan->tsColId = pScanLogicNode->tsColId;
return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode);
}
diff --git a/source/libs/planner/test/CMakeLists.txt b/source/libs/planner/test/CMakeLists.txt
index a21b36fef6b3eecc51bdbe4abbb7fff3dc065098..abea60b0c798a055617abf3693be25f365fbc867 100644
--- a/source/libs/planner/test/CMakeLists.txt
+++ b/source/libs/planner/test/CMakeLists.txt
@@ -32,7 +32,9 @@ if(${BUILD_WINGETOPT})
target_link_libraries(plannerTest PUBLIC wingetopt)
endif()
-add_test(
- NAME plannerTest
- COMMAND plannerTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME plannerTest
+ COMMAND plannerTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp
index 084762088823edee627b4ea3bad2286208d570ac..e2082d49364727719bc72f3445bcb038d5584976 100644
--- a/source/libs/planner/test/planTestUtil.cpp
+++ b/source/libs/planner/test/planTestUtil.cpp
@@ -73,7 +73,7 @@ void setDumpModule(const char* pModule) {
}
}
-void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(optarg); }
+void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); }
void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); }
diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h
index f6d35ac4c1d57a2448b5e558b138f5b2e73597e3..b0a102069dc7d00e3002d14c76ec9c65f0854d92 100644
--- a/source/libs/qworker/inc/qwInt.h
+++ b/source/libs/qworker/inc/qwInt.h
@@ -145,6 +145,15 @@ typedef struct SQWSchStatus {
SHashObj *tasksHash; // key:queryId+taskId, value: SQWTaskStatus
} SQWSchStatus;
+typedef struct SQWWaitTimeStat {
+ uint64_t num;
+ uint64_t total;
+} SQWWaitTimeStat;
+
+typedef struct SQWStat {
+ SQWWaitTimeStat msgWait[2];
+} SQWStat;
+
// Qnode/Vnode level task management
typedef struct SQWorker {
int64_t refId;
@@ -155,9 +164,10 @@ typedef struct SQWorker {
tmr_h hbTimer;
SRWLatch schLock;
// SRWLatch ctxLock;
- SHashObj *schHash; // key: schedulerId, value: SQWSchStatus
- SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx
- SMsgCb msgCb;
+ SHashObj *schHash; // key: schedulerId, value: SQWSchStatus
+ SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx
+ SMsgCb msgCb;
+ SQWStat stat;
} SQWorker;
typedef struct SQWorkerMgmt {
@@ -322,6 +332,8 @@ int32_t qwDropTask(QW_FPARAMS_DEF);
void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx);
int32_t qwOpenRef(void);
void qwSetHbParam(int64_t refId, SQWHbParam **pParam);
+int32_t qwUpdateWaitTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type);
+int64_t qwGetWaitTimeInQueue(SQWorker *mgmt, EQueueType type);
void qwDbgDumpMgmtInfo(SQWorker *mgmt);
int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore);
diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c
index 46c2084494b2c64f6ffc81ca11205fba6adf890c..b9dc18cd2fd22ff196a300451d1d39b5bcd2353d 100644
--- a/source/libs/qworker/src/qwMsg.c
+++ b/source/libs/qworker/src/qwMsg.c
@@ -248,7 +248,7 @@ int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
@@ -257,6 +257,8 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SSubQueryMsg *msg = pMsg->pCont;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
+ qwUpdateWaitTimeInQueue(mgmt, ts, QUERY_QUEUE);
+
if (NULL == msg || pMsg->contLen <= sizeof(*msg)) {
QW_ELOG("invalid query msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@@ -286,7 +288,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
int32_t code = 0;
int8_t status = 0;
bool queryDone = false;
@@ -295,6 +297,8 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SQWTaskCtx * handles = NULL;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
+ qwUpdateWaitTimeInQueue(mgmt, ts, QUERY_QUEUE);
+
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
QW_ELOG("invalid cquery msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@@ -316,7 +320,7 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
@@ -324,6 +328,8 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SResFetchReq *msg = pMsg->pCont;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
+ qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE);
+
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
QW_ELOG("invalid fetch msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@@ -349,13 +355,16 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
+ SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
+ qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE);
+
qProcessFetchRsp(NULL, pMsg, NULL);
pMsg->pCont = NULL;
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
@@ -363,6 +372,9 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
int32_t code = 0;
STaskCancelReq *msg = pMsg->pCont;
+
+ qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE);
+
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
qError("invalid task cancel msg");
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@@ -390,7 +402,7 @@ _return:
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
@@ -399,6 +411,8 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
STaskDropReq *msg = pMsg->pCont;
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
+ qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE);
+
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
QW_ELOG("invalid task drop msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
@@ -429,7 +443,7 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
-int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
+int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) {
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
@@ -438,6 +452,8 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
SSchedulerHbReq req = {0};
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
+ qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE);
+
if (NULL == pMsg->pCont) {
QW_ELOG("invalid hb msg, msg:%p, msgLen:%d", pMsg->pCont, pMsg->contLen);
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c
index a96a3343e701d222e1cc6e0b27fa7ede7e581f02..a4bc22fc88121de7d51e3e67655468046e95c3bf 100644
--- a/source/libs/qworker/src/qwUtil.c
+++ b/source/libs/qworker/src/qwUtil.c
@@ -499,4 +499,43 @@ int32_t qwOpenRef(void) {
return TSDB_CODE_SUCCESS;
}
+int32_t qwUpdateWaitTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type) {
+ if (ts <= 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int64_t duration = taosGetTimestampUs() - ts;
+ switch (type) {
+ case QUERY_QUEUE:
+ ++mgmt->stat.msgWait[0].num;
+ mgmt->stat.msgWait[0].total += duration;
+ break;
+ case FETCH_QUEUE:
+ ++mgmt->stat.msgWait[1].num;
+ mgmt->stat.msgWait[1].total += duration;
+ break;
+ default:
+ qError("unsupported queue type %d", type);
+ return TSDB_CODE_APP_ERROR;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int64_t qwGetWaitTimeInQueue(SQWorker *mgmt, EQueueType type) {
+ SQWWaitTimeStat *pStat = NULL;
+ switch (type) {
+ case QUERY_QUEUE:
+ pStat = &mgmt->stat.msgWait[0];
+ return pStat->num ? (pStat->total/pStat->num) : 0;
+ case FETCH_QUEUE:
+ pStat = &mgmt->stat.msgWait[1];
+ return pStat->num ? (pStat->total/pStat->num) : 0;
+ default:
+ qError("unsupported queue type %d", type);
+ return -1;
+ }
+}
+
+
diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c
index 3ee152606e78bd309e47c9dee1e1dd91643c0eb1..7201820854e6a87a1dffc12a47c37b8d6b692668 100644
--- a/source/libs/qworker/src/qworker.c
+++ b/source/libs/qworker/src/qworker.c
@@ -950,4 +950,9 @@ void qWorkerDestroy(void **qWorkerMgmt) {
}
}
+int64_t qWorkerGetWaitTimeInQueue(void *qWorkerMgmt, EQueueType type) {
+ return qwGetWaitTimeInQueue((SQWorker *)qWorkerMgmt, type);
+}
+
+
diff --git a/source/libs/qworker/test/qworkerTests.cpp b/source/libs/qworker/test/qworkerTests.cpp
index 42596b1cd22f73dd822a1e9c85f04b6a60ecfb3f..1b959fbe633e0c50ddc7b80af321ee0420a9616d 100644
--- a/source/libs/qworker/test/qworkerTests.cpp
+++ b/source/libs/qworker/test/qworkerTests.cpp
@@ -635,7 +635,7 @@ void *queryThread(void *param) {
while (!qwtTestStop) {
qwtBuildQueryReqMsg(&queryRpc);
- qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
+ qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(taosRand()%5);
}
@@ -657,7 +657,7 @@ void *fetchThread(void *param) {
while (!qwtTestStop) {
qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc);
- code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc);
+ code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(taosRand()%5);
}
@@ -679,7 +679,7 @@ void *dropThread(void *param) {
while (!qwtTestStop) {
qwtBuildDropReqMsg(&dropMsg, &dropRpc);
- code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
+ code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(taosRand()%5);
}
@@ -758,9 +758,9 @@ void *queryQueueThread(void *param) {
}
if (TDMT_VND_QUERY == queryRpc->msgType) {
- qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc);
+ qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc, 0);
} else if (TDMT_VND_QUERY_CONTINUE == queryRpc->msgType) {
- qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc);
+ qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc, 0);
} else {
printf("unknown msg in query queue, type:%d\n", queryRpc->msgType);
assert(0);
@@ -815,13 +815,13 @@ void *fetchQueueThread(void *param) {
switch (fetchRpc->msgType) {
case TDMT_VND_FETCH:
- qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc);
+ qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc, 0);
break;
case TDMT_VND_CANCEL_TASK:
- qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc);
+ qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc, 0);
break;
case TDMT_VND_DROP_TASK:
- qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc);
+ qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc, 0);
break;
default:
printf("unknown msg type:%d in fetch queue", fetchRpc->msgType);
@@ -878,16 +878,16 @@ TEST(seqTest, normalCase) {
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
- code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
+ code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0);
ASSERT_EQ(code, 0);
//code = qWorkerProcessReadyMsg(mockPointer, mgmt, &readyRpc);
//ASSERT_EQ(code, 0);
- code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc);
+ code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0);
ASSERT_EQ(code, 0);
- code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
+ code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0);
ASSERT_EQ(code, 0);
qWorkerDestroy(&mgmt);
@@ -914,10 +914,10 @@ TEST(seqTest, cancelFirst) {
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
- code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
+ code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0);
ASSERT_EQ(code, 0);
- code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
+ code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0);
ASSERT_TRUE(0 != code);
qWorkerDestroy(&mgmt);
@@ -959,7 +959,7 @@ TEST(seqTest, randCase) {
if (r >= 0 && r < maxr/5) {
printf("Query,%d\n", t++);
qwtBuildQueryReqMsg(&queryRpc);
- code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
+ code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0);
} else if (r >= maxr/5 && r < maxr * 2/5) {
//printf("Ready,%d\n", t++);
//qwtBuildReadyReqMsg(&readyMsg, &readyRpc);
@@ -970,14 +970,14 @@ TEST(seqTest, randCase) {
} else if (r >= maxr * 2/5 && r < maxr* 3/5) {
printf("Fetch,%d\n", t++);
qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc);
- code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc);
+ code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(1);
}
} else if (r >= maxr * 3/5 && r < maxr * 4/5) {
printf("Drop,%d\n", t++);
qwtBuildDropReqMsg(&dropMsg, &dropRpc);
- code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
+ code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0);
if (qwtTestEnableSleep) {
taosUsleep(1);
}
diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h
index 9dbfeceb5940d4237ead01ff445529c2d7d447ac..1c2e4a358a2c256cf3ed577be568c2e93fe13cbe 100644
--- a/source/libs/scalar/inc/sclInt.h
+++ b/source/libs/scalar/inc/sclInt.h
@@ -51,7 +51,7 @@ typedef struct SScalarCtx {
int32_t doConvertDataType(SValueNode* pValueNode, SScalarParam* out);
SColumnInfoData* createColumnInfoData(SDataType* pType, int32_t numOfRows);
-void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode);
+int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode);
#define GET_PARAM_TYPE(_c) ((_c)->columnData->info.type)
#define GET_PARAM_BYTES(_c) ((_c)->columnData->info.bytes)
diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c
index 4317ad325e7e0d7b468dd7929c1f4a7c9ff7c169..195ec8a57791062cbca0e4c1a39ccce1866a5095 100644
--- a/source/libs/scalar/src/filter.c
+++ b/source/libs/scalar/src/filter.c
@@ -3553,7 +3553,11 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) {
return DEAL_RES_CONTINUE;
}
- sclConvertToTsValueNode(stat->precision, valueNode);
+ int32_t code = sclConvertToTsValueNode(stat->precision, valueNode);
+ if (code) {
+ stat->code = code;
+ return DEAL_RES_ERROR;
+ }
return DEAL_RES_CONTINUE;
}
@@ -3687,7 +3691,7 @@ int32_t fltReviseNodes(SFilterInfo *pInfo, SNode** pNode, SFltTreeStat *pStat) {
for (int32_t i = 0; i < nodeNum; ++i) {
SValueNode *valueNode = *(SValueNode **)taosArrayGet(pStat->nodeList, i);
- sclConvertToTsValueNode(pStat->precision, valueNode);
+ FLT_ERR_JRET(sclConvertToTsValueNode(pStat->precision, valueNode));
}
_return:
diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c
index fb03eaefa4fe79034d731b74de6bd166fa0db83e..d2436b9948f2cf7bfa15d061cdc9bbfdfefd6f08 100644
--- a/source/libs/scalar/src/scalar.c
+++ b/source/libs/scalar/src/scalar.c
@@ -20,17 +20,19 @@ int32_t scalarGetOperatorParamNum(EOperatorType type) {
return 2;
}
-void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) {
+int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) {
char *timeStr = valueNode->datum.p;
- if (convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i) !=
- TSDB_CODE_SUCCESS) {
- valueNode->datum.i = 0;
+ int32_t code = convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
}
taosMemoryFree(timeStr);
valueNode->typeData = valueNode->datum.i;
valueNode->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP;
valueNode->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes;
+
+ return TSDB_CODE_SUCCESS;
}
@@ -546,6 +548,7 @@ EDealRes sclRewriteBasedOnOptr(SNode** pNode, SScalarCtx *ctx, EOperatorType opT
EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
SOperatorNode *node = (SOperatorNode *)*pNode;
+ int32_t code = 0;
if (node->pLeft && (QUERY_NODE_VALUE == nodeType(node->pLeft))) {
SValueNode *valueNode = (SValueNode *)node->pLeft;
@@ -555,7 +558,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pRight && nodesIsExprNode(node->pRight)
&& ((SExprNode*)node->pRight)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) {
- sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode);
+ code = sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode);
+ if (code) {
+ ctx->code = code;
+ return DEAL_RES_ERROR;
+ }
}
}
@@ -567,7 +574,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pLeft && nodesIsExprNode(node->pLeft)
&& ((SExprNode*)node->pLeft)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) {
- sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode);
+ code = sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode);
+ if (code) {
+ ctx->code = code;
+ return DEAL_RES_ERROR;
+ }
}
}
diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c
index 18c1f08c6c7288e63619d272a1c71531a423af8f..6ee5f038d661d06090d74487531adabec4c9abf9 100644
--- a/source/libs/scalar/src/sclfunc.c
+++ b/source/libs/scalar/src/sclfunc.c
@@ -633,7 +633,7 @@ static int32_t doTrimFunction(SScalarParam *pInput, int32_t inputNum, SScalarPar
continue;
}
- char *input = colDataGetData(pInput[0].columnData, i);
+ char *input = colDataGetData(pInputData, i);
int32_t len = varDataLen(input);
int32_t charLen = (type == TSDB_DATA_TYPE_VARCHAR) ? len : len / TSDB_NCHAR_SIZE;
trimFn(input, output, type, charLen);
@@ -893,7 +893,7 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam *
memmove(tzInfo + fracLen, tzInfo, strlen(tzInfo));
}
- char tmp[32];
+ char tmp[32] = {0};
sprintf(tmp, ".%s", fraction);
memcpy(tzInfo, tmp, fracLen);
len += fracLen;
diff --git a/source/libs/scalar/test/scalar/CMakeLists.txt b/source/libs/scalar/test/scalar/CMakeLists.txt
index 480c22321d73acb63ed350b5164a9d9af3e31685..672cb5a3de39bfed51c9d399ac3d0431614f50ab 100644
--- a/source/libs/scalar/test/scalar/CMakeLists.txt
+++ b/source/libs/scalar/test/scalar/CMakeLists.txt
@@ -17,7 +17,9 @@ TARGET_INCLUDE_DIRECTORIES(
PUBLIC "${TD_SOURCE_DIR}/source/libs/parser/inc"
PRIVATE "${TD_SOURCE_DIR}/source/libs/scalar/inc"
)
-add_test(
- NAME scalarTest
- COMMAND scalarTest
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME scalarTest
+ COMMAND scalarTest
+ )
+endif(NOT TD_WINDOWS)
diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c
index dad4f7196ffaca8ad09da225dd37944210899435..312d587b6f0ee29a9f2da22afc23a2834747b063 100644
--- a/source/libs/scheduler/src/schRemote.c
+++ b/source/libs/scheduler/src/schRemote.c
@@ -94,6 +94,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
if (schJobNeedToStop(pJob, &status)) {
SCH_TASK_ELOG("rsp not processed cause of job status, job status:%s, rspCode:0x%x", jobTaskStatusStr(status),
rspCode);
+ taosMemoryFreeClear(msg);
SCH_RET(atomic_load_32(&pJob->errCode));
}
@@ -121,6 +122,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
}
SCH_ERR_JRET(rspCode);
+ taosMemoryFreeClear(msg);
+
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
}
@@ -145,6 +148,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
}
SCH_ERR_JRET(rspCode);
+ taosMemoryFreeClear(msg);
+
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
}
@@ -164,6 +169,9 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
if (NULL == msg) {
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
}
+
+ taosMemoryFreeClear(msg);
+
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
}
@@ -210,6 +218,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
}
+ taosMemoryFreeClear(msg);
+
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
@@ -224,6 +234,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_JRET(rsp->code);
SCH_ERR_JRET(schSaveJobQueryRes(pJob, rsp));
+
+ taosMemoryFreeClear(msg);
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
@@ -275,6 +287,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_JRET(schProcessOnExplainDone(pJob, pTask, pRsp));
}
+ taosMemoryFreeClear(msg);
+
return TSDB_CODE_SUCCESS;
}
@@ -282,6 +296,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_JRET(schFetchFromRemote(pJob));
+ taosMemoryFreeClear(msg);
+
return TSDB_CODE_SUCCESS;
}
@@ -300,6 +316,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_TASK_DLOG("got fetch rsp, rows:%d, complete:%d", htonl(rsp->numOfRows), rsp->completed);
+ msg = NULL;
+
schProcessOnDataFetched(pJob);
break;
}
@@ -322,6 +340,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
_return:
+ taosMemoryFreeClear(msg);
+
SCH_RET(schProcessOnTaskFailure(pJob, pTask, code));
}
diff --git a/source/libs/scheduler/src/schUtil.c b/source/libs/scheduler/src/schUtil.c
index cec754bdcdc76a83bf637448c4303037b8b74447..81c95ea976e0c685fa1585df6dbb42bed75fd0c8 100644
--- a/source/libs/scheduler/src/schUtil.c
+++ b/source/libs/scheduler/src/schUtil.c
@@ -41,7 +41,7 @@ uint64_t schGenUUID(void) {
static int32_t requestSerialId = 0;
if (hashId == 0) {
- char uid[64];
+ char uid[64] = {0};
int32_t code = taosGetSystemUUID(uid, tListLen(uid));
if (code != TSDB_CODE_SUCCESS) {
qError("Failed to get the system uid, reason:%s", tstrerror(TAOS_SYSTEM_ERROR(errno)));
diff --git a/source/libs/stream/src/tstream.c b/source/libs/stream/src/tstream.c
index 7e4f83a693cf9301da29493ea984828c2731552a..e033645667b2f1a3953feaaaca8daf4ed4331bf8 100644
--- a/source/libs/stream/src/tstream.c
+++ b/source/libs/stream/src/tstream.c
@@ -35,6 +35,14 @@ void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput) {
return (void*)buf;
}
+static int32_t streamBuildDispatchMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) {
+ SStreamDispatchReq req = {
+ .streamId = pTask->streamId,
+ .data = data,
+ };
+ return 0;
+}
+
static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) {
SStreamTaskExecReq req = {
.streamId = pTask->streamId,
@@ -59,7 +67,7 @@ static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMs
} else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) {
// TODO use general name rule of schemaless
- char ctbName[TSDB_TABLE_FNAME_LEN + 22];
+ char ctbName[TSDB_TABLE_FNAME_LEN + 22] = {0};
// all groupId must be the same in an array
SSDataBlock* pBlock = taosArrayGet(data, 0);
sprintf(ctbName, "%s:%ld", pTask->shuffleDispatcher.stbFullName, pBlock->info.groupId);
@@ -407,6 +415,26 @@ int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp)
return 0;
}
+int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) {
+ if (tStartEncode(pEncoder) < 0) return -1;
+ if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pReq->sourceTaskId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pReq->sourceVg) < 0) return -1;
+ tEndEncode(pEncoder);
+ return 0;
+}
+
+int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) {
+ if (tStartDecode(pDecoder) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pReq->sourceTaskId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pReq->sourceVg) < 0) return -1;
+ tEndDecode(pDecoder);
+ return 0;
+}
+
int32_t tEncodeSStreamTaskExecReq(void** buf, const SStreamTaskExecReq* pReq) {
int32_t tlen = 0;
tlen += taosEncodeFixedI64(buf, pReq->streamId);
diff --git a/source/libs/stream/src/tstreamUpdate.c b/source/libs/stream/src/tstreamUpdate.c
index 75319a2354f638d6dab9d871bdd402cfb15ee2c4..7587fcecc99962b2cd0eda135a121acb281a1a48 100644
--- a/source/libs/stream/src/tstreamUpdate.c
+++ b/source/libs/stream/src/tstreamUpdate.c
@@ -72,12 +72,14 @@ static int64_t adjustInterval(int64_t interval, int32_t precision) {
return val;
}
-static int64_t adjustWatermark(int64_t interval, int64_t watermark) {
- if (watermark <= 0 || watermark > MAX_NUM_SCALABLE_BF * interval) {
- watermark = MAX_NUM_SCALABLE_BF * interval;
- } else if (watermark < MIN_NUM_SCALABLE_BF * interval) {
- watermark = MIN_NUM_SCALABLE_BF * interval;
- }
+static int64_t adjustWatermark(int64_t adjInterval, int64_t originInt, int64_t watermark) {
+ if (watermark <= 0) {
+ watermark = TMIN(originInt/adjInterval, 1) * adjInterval;
+ } else if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) {
+ watermark = MAX_NUM_SCALABLE_BF * adjInterval;
+ }/* else if (watermark < MIN_NUM_SCALABLE_BF * adjInterval) {
+ watermark = MIN_NUM_SCALABLE_BF * adjInterval;
+ }*/ // Todo(liuyao) save window info to tdb
return watermark;
}
@@ -94,7 +96,7 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma
pInfo->pTsSBFs = NULL;
pInfo->minTS = -1;
pInfo->interval = adjustInterval(interval, precision);
- pInfo->watermark = adjustWatermark(pInfo->interval, watermark);
+ pInfo->watermark = adjustWatermark(pInfo->interval, interval, watermark);
uint64_t bfSize = (uint64_t)(pInfo->watermark / pInfo->interval);
@@ -149,13 +151,18 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) {
bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) {
int32_t res = TSDB_CODE_FAILED;
uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets;
+ TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index);
+ if (ts < maxTs - pInfo->watermark) {
+ // this window has been closed.
+ return true;
+ }
+
SScalableBf *pSBf = getSBf(pInfo, ts);
// pSBf may be a null pointer
if (pSBf) {
res = tScalableBfPut(pSBf, &ts, sizeof(TSKEY));
}
- TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index);
if (maxTs < ts) {
taosArraySet(pInfo->pTsBuckets, index, &ts);
return false;
diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h
index b3174a4b36855b417ababbb0c4614867433a2e83..43d1c0c0c38bc9836fdb9e3210f141af44376700 100644
--- a/source/libs/sync/inc/syncSnapshot.h
+++ b/source/libs/sync/inc/syncSnapshot.h
@@ -28,10 +28,12 @@ extern "C" {
#include "taosdef.h"
typedef struct SSyncSnapshotSender {
- bool isStart;
- int32_t progressIndex;
+ int32_t sending;
+ int32_t received;
+ bool finish;
void * pCurrentBlock;
- int32_t len;
+ int32_t blockLen;
+ int64_t sendingMS;
SSnapshot *pSnapshot;
SSyncNode *pSyncNode;
} SSyncSnapshotSender;
@@ -43,7 +45,8 @@ cJSON * snapshotSender2Json(SSyncSnapshotSender *pSender);
char * snapshotSender2Str(SSyncSnapshotSender *pSender);
typedef struct SSyncSnapshotReceiver {
- bool isStart;
+ bool start;
+ int32_t received;
int32_t progressIndex;
void * pCurrentBlock;
int32_t len;
diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c
index 46be7597c7f82afefbd83f6f6258c4ba811ae046..3afe7b15e213c0da3760c7a8ef1f313d145cd31f 100644
--- a/source/libs/sync/src/syncAppendEntries.c
+++ b/source/libs/sync/src/syncAppendEntries.c
@@ -89,7 +89,7 @@
int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
int32_t ret = 0;
- char logBuf[128];
+ char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "==syncNodeOnAppendEntriesCb== term:%lu", ths->pRaftStore->currentTerm);
syncAppendEntriesLog2(logBuf, pMsg);
diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c
index 77d85e29151205edd31deed1c40f5dbffca90849..4e6d870e194a223bd35d5671dc17532bd5e8626e 100644
--- a/source/libs/sync/src/syncAppendEntriesReply.c
+++ b/source/libs/sync/src/syncAppendEntriesReply.c
@@ -38,7 +38,7 @@
int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) {
int32_t ret = 0;
- char logBuf[128];
+ char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "==syncNodeOnAppendEntriesReplyCb== term:%lu", ths->pRaftStore->currentTerm);
syncAppendEntriesReplyLog2(logBuf, pMsg);
@@ -57,7 +57,7 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p
// }
if (pMsg->term > ths->pRaftStore->currentTerm) {
- char logBuf[128];
+ char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "syncNodeOnAppendEntriesReplyCb error term, receive:%lu current:%lu", pMsg->term,
ths->pRaftStore->currentTerm);
syncNodeLog2(logBuf, ths);
diff --git a/source/libs/sync/src/syncIO.c b/source/libs/sync/src/syncIO.c
index 39760c32e83eddc060aeb9669fb252eaca816e54..e30a39e6342c4b7df77ee9cfdbe4f29333e36c16 100644
--- a/source/libs/sync/src/syncIO.c
+++ b/source/libs/sync/src/syncIO.c
@@ -74,7 +74,7 @@ int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
{
syncUtilMsgNtoH(pMsg->pCont);
- char logBuf[256];
+ char logBuf[256] = {0};
snprintf(logBuf, sizeof(logBuf), "==syncIOSendMsg== %s:%d", pEpSet->eps[0].fqdn, pEpSet->eps[0].port);
syncRpcMsgLog2(logBuf, pMsg);
@@ -89,7 +89,7 @@ int32_t syncIOSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
int32_t syncIOEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
int32_t ret = 0;
- char logBuf[128];
+ char logBuf[128] = {0};
syncRpcMsgLog2((char *)"==syncIOEqMsg==", pMsg);
SRpcMsg *pTemp;
diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c
index b44b0da750a3648e0a9dd834e13d752848cd572d..4d556d21dde7e56c2048cc314f86ad0a8949bc37 100644
--- a/source/libs/sync/src/syncIndexMgr.c
+++ b/source/libs/sync/src/syncIndexMgr.c
@@ -76,7 +76,7 @@ SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaf
}
cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON *pRoot = cJSON_CreateObject();
if (pSyncIndexMgr != NULL) {
diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c
index 5ad8df11a922e14b470b5e09a916c8cb4c70a239..620fc514c6960754762187445b167098ce4382d3 100644
--- a/source/libs/sync/src/syncMain.c
+++ b/source/libs/sync/src/syncMain.c
@@ -815,7 +815,7 @@ int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, S
}
cJSON* syncNode2Json(const SSyncNode* pSyncNode) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pSyncNode != NULL) {
@@ -1338,7 +1338,7 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) {
// on message ----
int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg) {
// log state
- char logBuf[1024];
+ char logBuf[1024] = {0};
snprintf(logBuf, sizeof(logBuf),
"==syncNodeOnPingCb== vgId:%d, state: %d, %s, term:%lu electTimerLogicClock:%lu, "
"electTimerLogicClockUser:%lu, electTimerMS:%d",
diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c
index 57cbdaaf795b025af5f2aa36108b28845c91e1b7..fae069f2e6b13c0073c6309f889dc7f8f92c8c6e 100644
--- a/source/libs/sync/src/syncMessage.c
+++ b/source/libs/sync/src/syncMessage.c
@@ -215,7 +215,7 @@ SyncTimeout* syncTimeoutFromRpcMsg2(const SRpcMsg* pRpcMsg) {
}
cJSON* syncTimeout2Json(const SyncTimeout* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -442,7 +442,7 @@ SyncPing* syncPingFromRpcMsg2(const SRpcMsg* pRpcMsg) {
}
cJSON* syncPing2Json(const SyncPing* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -456,7 +456,7 @@ cJSON* syncPing2Json(const SyncPing* pMsg) {
{
uint64_t u64 = pMsg->srcId.addr;
cJSON* pTmp = pSrcId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -471,7 +471,7 @@ cJSON* syncPing2Json(const SyncPing* pMsg) {
{
uint64_t u64 = pMsg->destId.addr;
cJSON* pTmp = pDestId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -702,7 +702,7 @@ SyncPingReply* syncPingReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) {
}
cJSON* syncPingReply2Json(const SyncPingReply* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -716,7 +716,7 @@ cJSON* syncPingReply2Json(const SyncPingReply* pMsg) {
{
uint64_t u64 = pMsg->srcId.addr;
cJSON* pTmp = pSrcId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -731,7 +731,7 @@ cJSON* syncPingReply2Json(const SyncPingReply* pMsg) {
{
uint64_t u64 = pMsg->destId.addr;
cJSON* pTmp = pDestId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -869,7 +869,7 @@ SyncClientRequest* syncClientRequestFromRpcMsg2(const SRpcMsg* pRpcMsg) {
}
cJSON* syncClientRequest2Json(const SyncClientRequest* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -995,7 +995,7 @@ SyncRequestVote* syncRequestVoteFromRpcMsg2(const SRpcMsg* pRpcMsg) {
}
cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -1009,7 +1009,7 @@ cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) {
{
uint64_t u64 = pMsg->srcId.addr;
cJSON* pTmp = pSrcId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1023,7 +1023,7 @@ cJSON* syncRequestVote2Json(const SyncRequestVote* pMsg) {
{
uint64_t u64 = pMsg->destId.addr;
cJSON* pTmp = pDestId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1144,7 +1144,7 @@ SyncRequestVoteReply* syncRequestVoteReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) {
}
cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -1158,7 +1158,7 @@ cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) {
{
uint64_t u64 = pMsg->srcId.addr;
cJSON* pTmp = pSrcId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1172,7 +1172,7 @@ cJSON* syncRequestVoteReply2Json(const SyncRequestVoteReply* pMsg) {
{
uint64_t u64 = pMsg->destId.addr;
cJSON* pTmp = pDestId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1292,7 +1292,7 @@ SyncAppendEntries* syncAppendEntriesFromRpcMsg2(const SRpcMsg* pRpcMsg) {
}
cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -1306,7 +1306,7 @@ cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) {
{
uint64_t u64 = pMsg->srcId.addr;
cJSON* pTmp = pSrcId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1321,7 +1321,7 @@ cJSON* syncAppendEntries2Json(const SyncAppendEntries* pMsg) {
{
uint64_t u64 = pMsg->destId.addr;
cJSON* pTmp = pDestId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1456,7 +1456,7 @@ SyncAppendEntriesReply* syncAppendEntriesReplyFromRpcMsg2(const SRpcMsg* pRpcMsg
}
cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
@@ -1470,7 +1470,7 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) {
{
uint64_t u64 = pMsg->srcId.addr;
cJSON* pTmp = pSrcId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1485,7 +1485,7 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) {
{
uint64_t u64 = pMsg->destId.addr;
cJSON* pTmp = pDestId;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pTmp, "addr_host", host);
@@ -1624,7 +1624,7 @@ void syncApplyMsg2OriginalRpcMsg(const SyncApplyMsg* pMsg, SRpcMsg* pOriginalRpc
}
cJSON* syncApplyMsg2Json(const SyncApplyMsg* pMsg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pMsg != NULL) {
diff --git a/source/libs/sync/src/syncRaftCfg.c b/source/libs/sync/src/syncRaftCfg.c
index 70481b853ece4ba5cab45f303184042494b44609..3e1931e2c37e626b7ab049299a9b83b8a78a2cf1 100644
--- a/source/libs/sync/src/syncRaftCfg.c
+++ b/source/libs/sync/src/syncRaftCfg.c
@@ -28,7 +28,7 @@ SRaftCfg *raftCfgOpen(const char *path) {
taosLSeekFile(pCfg->pFile, 0, SEEK_SET);
- char buf[1024];
+ char buf[1024] = {0};
int len = taosReadFile(pCfg->pFile, buf, sizeof(buf));
assert(len > 0);
@@ -51,15 +51,15 @@ int32_t raftCfgPersist(SRaftCfg *pRaftCfg) {
char *s = raftCfg2Str(pRaftCfg);
taosLSeekFile(pRaftCfg->pFile, 0, SEEK_SET);
- char buf[CONFIG_FILE_LEN];
+ char buf[CONFIG_FILE_LEN] = {0};
memset(buf, 0, sizeof(buf));
ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN);
snprintf(buf, sizeof(buf), "%s", s);
int64_t ret = taosWriteFile(pRaftCfg->pFile, buf, sizeof(buf));
assert(ret == sizeof(buf));
- //int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1);
- //assert(ret == strlen(s) + 1);
+ // int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1);
+ // assert(ret == strlen(s) + 1);
taosMemoryFree(s);
taosFsyncFile(pRaftCfg->pFile);
@@ -67,7 +67,7 @@ int32_t raftCfgPersist(SRaftCfg *pRaftCfg) {
}
cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON *pRoot = cJSON_CreateObject();
if (pSyncCfg != NULL) {
@@ -170,17 +170,17 @@ int32_t raftCfgCreateFile(SSyncCfg *pCfg, int8_t isStandBy, const char *path) {
SRaftCfg raftCfg;
raftCfg.cfg = *pCfg;
raftCfg.isStandBy = isStandBy;
- char * s = raftCfg2Str(&raftCfg);
+ char *s = raftCfg2Str(&raftCfg);
- char buf[CONFIG_FILE_LEN];
+ char buf[CONFIG_FILE_LEN] = {0};
memset(buf, 0, sizeof(buf));
ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN);
snprintf(buf, sizeof(buf), "%s", s);
int64_t ret = taosWriteFile(pFile, buf, sizeof(buf));
assert(ret == sizeof(buf));
- //int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1);
- //assert(ret == strlen(s) + 1);
+ // int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1);
+ // assert(ret == strlen(s) + 1);
taosMemoryFree(s);
taosCloseFile(&pFile);
diff --git a/source/libs/sync/src/syncRaftEntry.c b/source/libs/sync/src/syncRaftEntry.c
index 21ee35eaf9c276636d754048095d6b2d44f18796..8755f71654382f3913a3c81b6ee1e9b6e91dbb69 100644
--- a/source/libs/sync/src/syncRaftEntry.c
+++ b/source/libs/sync/src/syncRaftEntry.c
@@ -107,7 +107,7 @@ SSyncRaftEntry* syncEntryDeserialize(const char* buf, uint32_t len) {
}
cJSON* syncEntry2Json(const SSyncRaftEntry* pEntry) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
if (pEntry != NULL) {
diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c
index 58fa8c2e8f1e0b4d89f003653efef4f0a3dd4b4b..a6397f8cba24694d6f36847af5e877c72bd1a920 100644
--- a/source/libs/sync/src/syncRaftLog.c
+++ b/source/libs/sync/src/syncRaftLog.c
@@ -190,7 +190,7 @@ SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore) {
}
cJSON* logStore2Json(SSyncLogStore* pLogStore) {
- char u64buf[128];
+ char u64buf[128] = {0};
SSyncLogStoreData* pData = (SSyncLogStoreData*)pLogStore->data;
cJSON* pRoot = cJSON_CreateObject();
@@ -227,7 +227,7 @@ char* logStore2Str(SSyncLogStore* pLogStore) {
}
cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore) {
- char u64buf[128];
+ char u64buf[128] = {0};
SSyncLogStoreData* pData = (SSyncLogStoreData*)pLogStore->data;
cJSON* pRoot = cJSON_CreateObject();
diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c
index d6f2e91de7739efd535a23427168180fe2aabc86..52e815292607d69e7d364f6a11c31c184f07914a 100644
--- a/source/libs/sync/src/syncRaftStore.c
+++ b/source/libs/sync/src/syncRaftStore.c
@@ -34,7 +34,7 @@ SRaftStore *raftStoreOpen(const char *path) {
memset(pRaftStore, 0, sizeof(*pRaftStore));
snprintf(pRaftStore->path, sizeof(pRaftStore->path), "%s", path);
- char storeBuf[RAFT_STORE_BLOCK_SIZE];
+ char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0};
memset(storeBuf, 0, sizeof(storeBuf));
if (!raftStoreFileExist(pRaftStore->path)) {
@@ -84,7 +84,7 @@ int32_t raftStorePersist(SRaftStore *pRaftStore) {
assert(pRaftStore != NULL);
int32_t ret;
- char storeBuf[RAFT_STORE_BLOCK_SIZE];
+ char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0};
ret = raftStoreSerialize(pRaftStore, storeBuf, sizeof(storeBuf));
assert(ret == 0);
@@ -107,7 +107,7 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) {
cJSON *pRoot = cJSON_CreateObject();
- char u64Buf[128];
+ char u64Buf[128] = {0};
snprintf(u64Buf, sizeof(u64Buf), "%lu", pRaftStore->currentTerm);
cJSON_AddStringToObject(pRoot, "current_term", u64Buf);
@@ -117,7 +117,7 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) {
cJSON_AddNumberToObject(pRoot, "vote_for_vgid", pRaftStore->voteFor.vgId);
uint64_t u64 = pRaftStore->voteFor.addr;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pRoot, "addr_host", host);
@@ -184,7 +184,7 @@ void raftStoreSetTerm(SRaftStore *pRaftStore, SyncTerm term) {
int32_t raftStoreFromJson(SRaftStore *pRaftStore, cJSON *pJson) { return 0; }
cJSON *raftStore2Json(SRaftStore *pRaftStore) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON *pRoot = cJSON_CreateObject();
if (pRaftStore != NULL) {
@@ -196,7 +196,7 @@ cJSON *raftStore2Json(SRaftStore *pRaftStore) {
cJSON_AddStringToObject(pVoteFor, "addr", u64buf);
{
uint64_t u64 = pRaftStore->voteFor.addr;
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(u64, host, sizeof(host), &port);
cJSON_AddStringToObject(pVoteFor, "addr_host", host);
diff --git a/source/libs/sync/src/syncRequestVote.c b/source/libs/sync/src/syncRequestVote.c
index 619a1546a96ad9642272b7227466d99be833be9f..265677129213c6887012ee72da9066aad25adc09 100644
--- a/source/libs/sync/src/syncRequestVote.c
+++ b/source/libs/sync/src/syncRequestVote.c
@@ -44,7 +44,7 @@
int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg) {
int32_t ret = 0;
- char logBuf[128];
+ char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "==syncNodeOnRequestVoteCb== term:%lu", ths->pRaftStore->currentTerm);
syncRequestVoteLog2(logBuf, pMsg);
diff --git a/source/libs/sync/src/syncRequestVoteReply.c b/source/libs/sync/src/syncRequestVoteReply.c
index a6348dff50132f860ada45e9cc3bddfabd6d62d0..75236aee2bcec1ca9c7ae07165c427edbc1e0a04 100644
--- a/source/libs/sync/src/syncRequestVoteReply.c
+++ b/source/libs/sync/src/syncRequestVoteReply.c
@@ -39,7 +39,7 @@
int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg) {
int32_t ret = 0;
- char logBuf[128];
+ char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "==syncNodeOnRequestVoteReplyCb== term:%lu", ths->pRaftStore->currentTerm);
syncRequestVoteReplyLog2(logBuf, pMsg);
@@ -56,7 +56,7 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg)
// }
if (pMsg->term > ths->pRaftStore->currentTerm) {
- char logBuf[128];
+ char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "syncNodeOnRequestVoteReplyCb error term, receive:%lu current:%lu", pMsg->term,
ths->pRaftStore->currentTerm);
syncNodePrint2(logBuf, ths);
diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c
index cf045a692611a64e75c2f4c595180f1e324e75f9..d754acd9f831ac18ce7e28b5ef2fda4b2d8650db 100644
--- a/source/libs/sync/src/syncUtil.c
+++ b/source/libs/sync/src/syncUtil.c
@@ -43,7 +43,7 @@ void syncUtilnodeInfo2EpSet(const SNodeInfo* pNodeInfo, SEpSet* pEpSet) {
}
void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet) {
- char host[TSDB_FQDN_LEN];
+ char host[TSDB_FQDN_LEN] = {0};
uint16_t port;
syncUtilU642Addr(raftId->addr, host, sizeof(host), &port);
@@ -62,7 +62,7 @@ void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet) {
void syncUtilnodeInfo2raftId(const SNodeInfo* pNodeInfo, SyncGroupId vgId, SRaftId* raftId) {
uint32_t ipv4 = taosGetIpv4FromFqdn(pNodeInfo->nodeFqdn);
assert(ipv4 != 0xFFFFFFFF);
- char ipbuf[128];
+ char ipbuf[128] = {0};
tinet_ntoa(ipbuf, ipv4);
raftId->addr = syncUtilAddr2U64(ipbuf, pNodeInfo->nodePort);
raftId->vgId = vgId;
@@ -106,7 +106,7 @@ int32_t syncUtilElectRandomMS(int32_t min, int32_t max) {
int32_t syncUtilQuorum(int32_t replicaNum) { return replicaNum / 2 + 1; }
cJSON* syncUtilNodeInfo2Json(const SNodeInfo* p) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
cJSON_AddStringToObject(pRoot, "nodeFqdn", p->nodeFqdn);
@@ -118,12 +118,12 @@ cJSON* syncUtilNodeInfo2Json(const SNodeInfo* p) {
}
cJSON* syncUtilRaftId2Json(const SRaftId* p) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON* pRoot = cJSON_CreateObject();
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", p->addr);
cJSON_AddStringToObject(pRoot, "addr", u64buf);
- char host[128];
+ char host[128] = {0};
uint16_t port;
syncUtilU642Addr(p->addr, host, sizeof(host), &port);
cJSON_AddStringToObject(pRoot, "host", host);
diff --git a/source/libs/sync/src/syncVoteMgr.c b/source/libs/sync/src/syncVoteMgr.c
index 1c1f0809bd796f562e74cfd1d6b5e14015abd485..528c2f26c85c17f33f0a783def69ef9f26798b1b 100644
--- a/source/libs/sync/src/syncVoteMgr.c
+++ b/source/libs/sync/src/syncVoteMgr.c
@@ -90,7 +90,7 @@ void voteGrantedReset(SVotesGranted *pVotesGranted, SyncTerm term) {
}
cJSON *voteGranted2Json(SVotesGranted *pVotesGranted) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON *pRoot = cJSON_CreateObject();
if (pVotesGranted != NULL) {
@@ -220,7 +220,7 @@ void votesRespondReset(SVotesRespond *pVotesRespond, SyncTerm term) {
}
cJSON *votesRespond2Json(SVotesRespond *pVotesRespond) {
- char u64buf[128];
+ char u64buf[128] = {0};
cJSON *pRoot = cJSON_CreateObject();
if (pVotesRespond != NULL) {
diff --git a/source/libs/sync/test/syncConfigChangeTest.cpp b/source/libs/sync/test/syncConfigChangeTest.cpp
index 1755b7a8fd967fa5db22b4fdc523cc3f771d3c4b..1ab3ce203ad4a3968bc45ab2382108fa7d97f40c 100644
--- a/source/libs/sync/test/syncConfigChangeTest.cpp
+++ b/source/libs/sync/test/syncConfigChangeTest.cpp
@@ -42,7 +42,7 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) {
}
if (cbMeta.index > beginIndex) {
- char logBuf[256];
+ char logBuf[256] = {0};
snprintf(logBuf, sizeof(logBuf),
"==callback== ==CommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm,
cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag);
@@ -53,7 +53,7 @@ void CommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) {
}
void PreCommitCb(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta) {
- char logBuf[256];
+ char logBuf[256] = {0};
snprintf(logBuf, sizeof(logBuf),
"==callback== ==PreCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s flag:%lu\n", pFsm,
cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), cbMeta.flag);
@@ -84,14 +84,15 @@ void ReConfigCb(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta)
SSyncFSM* createFsm() {
SSyncFSM* pFsm = (SSyncFSM*)taosMemoryMalloc(sizeof(SSyncFSM));
+ memset(pFsm, 0, sizeof(*pFsm));
+
pFsm->FpCommitCb = CommitCb;
pFsm->FpPreCommitCb = PreCommitCb;
pFsm->FpRollBackCb = RollBackCb;
pFsm->FpGetSnapshot = GetSnapshotCb;
pFsm->FpRestoreFinishCb = RestoreFinishCb;
- pFsm->FpSnapshotApply = NULL;
- pFsm->FpSnapshotRead = NULL;
+
pFsm->FpReConfigCb = ReConfigCb;
diff --git a/source/libs/sync/test/syncSnapshotTest.cpp b/source/libs/sync/test/syncSnapshotTest.cpp
index 8ccd69890708781dbfb5b4a3ae835acc5c17d15c..820500e2d8f8b57427fec1f20741755a2ddc2d5c 100644
--- a/source/libs/sync/test/syncSnapshotTest.cpp
+++ b/source/libs/sync/test/syncSnapshotTest.cpp
@@ -75,6 +75,7 @@ int32_t GetSnapshotCb(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) {
void initFsm() {
pFsm = (SSyncFSM *)taosMemoryMalloc(sizeof(SSyncFSM));
+ memset(pFsm, 0, sizeof(*pFsm));
pFsm->FpCommitCb = CommitCb;
pFsm->FpPreCommitCb = PreCommitCb;
pFsm->FpRollBackCb = RollBackCb;
diff --git a/source/libs/transport/src/transSrv.c b/source/libs/transport/src/transSvr.c
similarity index 91%
rename from source/libs/transport/src/transSrv.c
rename to source/libs/transport/src/transSvr.c
index 9018eaacf600a9f8ceedde86672b2362039fbd0e..52b36433bb45ace6b0fa4224fb80b65e0e5e2627 100644
--- a/source/libs/transport/src/transSrv.c
+++ b/source/libs/transport/src/transSvr.c
@@ -20,15 +20,15 @@
static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT;
static char* notify = "a";
-static int transSrvInst = 0;
+static int tranSSvrInst = 0;
typedef struct {
int notifyCount; //
int init; // init or not
STransMsg msg;
-} SSrvRegArg;
+} SSvrRegArg;
-typedef struct SSrvConn {
+typedef struct SSvrConn {
T_REF_DECLARE()
uv_tcp_t* pTcp;
uv_write_t pWriter;
@@ -42,7 +42,7 @@ typedef struct SSrvConn {
void* hostThrd;
STransQueue srvMsgs;
- SSrvRegArg regArg;
+ SSvrRegArg regArg;
bool broken; // conn broken;
ConnStatus status;
@@ -55,14 +55,14 @@ typedef struct SSrvConn {
char user[TSDB_UNI_LEN]; // user ID for the link
char secret[TSDB_PASSWORD_LEN];
char ckey[TSDB_PASSWORD_LEN]; // ciphering key
-} SSrvConn;
+} SSvrConn;
-typedef struct SSrvMsg {
- SSrvConn* pConn;
+typedef struct SSvrMsg {
+ SSvrConn* pConn;
STransMsg msg;
queue q;
STransMsgType type;
-} SSrvMsg;
+} SSvrMsg;
typedef struct SWorkThrdObj {
TdThread thread;
@@ -127,25 +127,25 @@ static void uvWorkAfterTask(uv_work_t* req, int status);
static void uvWalkCb(uv_handle_t* handle, void* arg);
static void uvFreeCb(uv_handle_t* handle);
-static void uvStartSendRespInternal(SSrvMsg* smsg);
-static void uvPrepareSendData(SSrvMsg* msg, uv_buf_t* wb);
-static void uvStartSendResp(SSrvMsg* msg);
+static void uvStartSendRespInternal(SSvrMsg* smsg);
+static void uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb);
+static void uvStartSendResp(SSvrMsg* msg);
-static void uvNotifyLinkBrokenToApp(SSrvConn* conn);
+static void uvNotifyLinkBrokenToApp(SSvrConn* conn);
-static void destroySmsg(SSrvMsg* smsg);
+static void destroySmsg(SSvrMsg* smsg);
// check whether already read complete packet
-static SSrvConn* createConn(void* hThrd);
-static void destroyConn(SSrvConn* conn, bool clear /*clear handle or not*/);
-static void destroyConnRegArg(SSrvConn* conn);
+static SSvrConn* createConn(void* hThrd);
+static void destroyConn(SSvrConn* conn, bool clear /*clear handle or not*/);
+static void destroyConnRegArg(SSvrConn* conn);
-static int reallocConnRefHandle(SSrvConn* conn);
+static int reallocConnRefHandle(SSvrConn* conn);
-static void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd);
-static void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd);
-static void uvHandleResp(SSrvMsg* msg, SWorkThrdObj* thrd);
-static void uvHandleRegister(SSrvMsg* msg, SWorkThrdObj* thrd);
-static void (*transAsyncHandle[])(SSrvMsg* msg, SWorkThrdObj* thrd) = {uvHandleResp, uvHandleQuit, uvHandleRelease,
+static void uvHandleQuit(SSvrMsg* msg, SWorkThrdObj* thrd);
+static void uvHandleRelease(SSvrMsg* msg, SWorkThrdObj* thrd);
+static void uvHandleResp(SSvrMsg* msg, SWorkThrdObj* thrd);
+static void uvHandleRegister(SSvrMsg* msg, SWorkThrdObj* thrd);
+static void (*transAsyncHandle[])(SSvrMsg* msg, SWorkThrdObj* thrd) = {uvHandleResp, uvHandleQuit, uvHandleRelease,
uvHandleRegister, NULL};
static int32_t exHandlesMgt;
@@ -178,7 +178,7 @@ static bool addHandleToAcceptloop(void* arg);
tTrace("server conn %p received release request", conn); \
\
STransMsg tmsg = {.code = 0, .info.handle = (void*)conn, .info.ahandle = NULL}; \
- SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); \
+ SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg)); \
srvMsg->msg = tmsg; \
srvMsg->type = Release; \
srvMsg->pConn = conn; \
@@ -233,18 +233,18 @@ static bool addHandleToAcceptloop(void* arg);
} while (0)
void uvAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) {
- SSrvConn* conn = handle->data;
+ SSvrConn* conn = handle->data;
SConnBuffer* pBuf = &conn->readBuf;
transAllocBuffer(pBuf, buf);
}
// refers specifically to query or insert timeout
static void uvHandleActivityTimeout(uv_timer_t* handle) {
- SSrvConn* conn = handle->data;
+ SSvrConn* conn = handle->data;
tDebug("%p timeout since no activity", conn);
}
-static void uvHandleReq(SSrvConn* pConn) {
+static void uvHandleReq(SSvrConn* pConn) {
SConnBuffer* pBuf = &pConn->readBuf;
char* msg = pBuf->buf;
uint32_t msgLen = pBuf->len;
@@ -316,7 +316,7 @@ static void uvHandleReq(SSrvConn* pConn) {
void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) {
// opt
- SSrvConn* conn = cli->data;
+ SSvrConn* conn = cli->data;
SConnBuffer* pBuf = &conn->readBuf;
if (nread > 0) {
pBuf->len += nread;
@@ -354,17 +354,17 @@ void uvAllocConnBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* b
void uvOnTimeoutCb(uv_timer_t* handle) {
// opt
- SSrvConn* pConn = handle->data;
+ SSvrConn* pConn = handle->data;
tError("server conn %p time out", pConn);
}
void uvOnSendCb(uv_write_t* req, int status) {
- SSrvConn* conn = req->data;
+ SSvrConn* conn = req->data;
// transClearBuffer(&conn->readBuf);
if (status == 0) {
tTrace("server conn %p data already was written on stream", conn);
if (!transQueueEmpty(&conn->srvMsgs)) {
- SSrvMsg* msg = transQueuePop(&conn->srvMsgs);
+ SSvrMsg* msg = transQueuePop(&conn->srvMsgs);
// if (msg->type == Release && conn->status != ConnNormal) {
// conn->status = ConnNormal;
// transUnrefSrvHandle(conn);
@@ -376,7 +376,7 @@ void uvOnSendCb(uv_write_t* req, int status) {
destroySmsg(msg);
// send second data, just use for push
if (!transQueueEmpty(&conn->srvMsgs)) {
- msg = (SSrvMsg*)transQueueGet(&conn->srvMsgs, 0);
+ msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0);
if (msg->type == Register && conn->status == ConnAcquire) {
conn->regArg.notifyCount = 0;
conn->regArg.init = 1;
@@ -389,7 +389,7 @@ void uvOnSendCb(uv_write_t* req, int status) {
transQueuePop(&conn->srvMsgs);
taosMemoryFree(msg);
- msg = (SSrvMsg*)transQueueGet(&conn->srvMsgs, 0);
+ msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0);
if (msg != NULL) {
uvStartSendRespInternal(msg);
}
@@ -415,10 +415,10 @@ static void uvOnPipeWriteCb(uv_write_t* req, int status) {
taosMemoryFree(req);
}
-static void uvPrepareSendData(SSrvMsg* smsg, uv_buf_t* wb) {
+static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
tTrace("server conn %p prepare to send resp", smsg->pConn);
- SSrvConn* pConn = smsg->pConn;
+ SSvrConn* pConn = smsg->pConn;
STransMsg* pMsg = &smsg->msg;
if (pMsg->pCont == 0) {
pMsg->pCont = (void*)rpcMallocCont(0);
@@ -455,17 +455,17 @@ static void uvPrepareSendData(SSrvMsg* smsg, uv_buf_t* wb) {
wb->len = len;
}
-static void uvStartSendRespInternal(SSrvMsg* smsg) {
+static void uvStartSendRespInternal(SSvrMsg* smsg) {
uv_buf_t wb;
uvPrepareSendData(smsg, &wb);
- SSrvConn* pConn = smsg->pConn;
+ SSvrConn* pConn = smsg->pConn;
// uv_timer_stop(&pConn->pTimer);
uv_write(&pConn->pWriter, (uv_stream_t*)pConn->pTcp, &wb, 1, uvOnSendCb);
}
-static void uvStartSendResp(SSrvMsg* smsg) {
+static void uvStartSendResp(SSvrMsg* smsg) {
// impl
- SSrvConn* pConn = smsg->pConn;
+ SSvrConn* pConn = smsg->pConn;
if (pConn->broken == true) {
// persist by
@@ -485,7 +485,7 @@ static void uvStartSendResp(SSrvMsg* smsg) {
return;
}
-static void destroySmsg(SSrvMsg* smsg) {
+static void destroySmsg(SSvrMsg* smsg) {
if (smsg == NULL) {
return;
}
@@ -499,7 +499,7 @@ static void destroyAllConn(SWorkThrdObj* pThrd) {
QUEUE_REMOVE(h);
QUEUE_INIT(h);
- SSrvConn* c = QUEUE_DATA(h, SSrvConn, queue);
+ SSvrConn* c = QUEUE_DATA(h, SSvrConn, queue);
while (T_REF_VAL_GET(c) >= 2) {
transUnrefSrvHandle(c);
}
@@ -509,7 +509,7 @@ static void destroyAllConn(SWorkThrdObj* pThrd) {
void uvWorkerAsyncCb(uv_async_t* handle) {
SAsyncItem* item = handle->data;
SWorkThrdObj* pThrd = item->pThrd;
- SSrvConn* conn = NULL;
+ SSvrConn* conn = NULL;
queue wq;
// batch process to avoid to lock/unlock frequently
@@ -521,7 +521,7 @@ void uvWorkerAsyncCb(uv_async_t* handle) {
queue* head = QUEUE_HEAD(&wq);
QUEUE_REMOVE(head);
- SSrvMsg* msg = QUEUE_DATA(head, SSrvMsg, q);
+ SSvrMsg* msg = QUEUE_DATA(head, SSvrMsg, q);
if (msg == NULL) {
tError("unexcept occurred, continue");
continue;
@@ -649,7 +649,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
uv_handle_type pending = uv_pipe_pending_type(pipe);
assert(pending == UV_TCP);
- SSrvConn* pConn = createConn(pThrd);
+ SSvrConn* pConn = createConn(pThrd);
pConn->pTransInst = pThrd->pTransInst;
/* init conn timer*/
@@ -768,10 +768,10 @@ void* transWorkerThread(void* arg) {
return NULL;
}
-static SSrvConn* createConn(void* hThrd) {
+static SSvrConn* createConn(void* hThrd) {
SWorkThrdObj* pThrd = hThrd;
- SSrvConn* pConn = (SSrvConn*)taosMemoryCalloc(1, sizeof(SSrvConn));
+ SSvrConn* pConn = (SSvrConn*)taosMemoryCalloc(1, sizeof(SSvrConn));
QUEUE_INIT(&pConn->queue);
QUEUE_PUSH(&pThrd->conn, &pConn->queue);
@@ -794,7 +794,7 @@ static SSrvConn* createConn(void* hThrd) {
return pConn;
}
-static void destroyConn(SSrvConn* conn, bool clear) {
+static void destroyConn(SSvrConn* conn, bool clear) {
if (conn == NULL) {
return;
}
@@ -808,13 +808,13 @@ static void destroyConn(SSrvConn* conn, bool clear) {
// uv_shutdown(req, (uv_stream_t*)conn->pTcp, uvShutDownCb);
}
}
-static void destroyConnRegArg(SSrvConn* conn) {
+static void destroyConnRegArg(SSvrConn* conn) {
if (conn->regArg.init == 1) {
transFreeMsg(conn->regArg.msg.pCont);
conn->regArg.init = 0;
}
}
-static int reallocConnRefHandle(SSrvConn* conn) {
+static int reallocConnRefHandle(SSvrConn* conn) {
uvReleaseExHandle(conn->refId);
uvRemoveExHandle(conn->refId);
// avoid app continue to send msg on invalid handle
@@ -828,7 +828,7 @@ static int reallocConnRefHandle(SSrvConn* conn) {
return 0;
}
static void uvDestroyConn(uv_handle_t* handle) {
- SSrvConn* conn = handle->data;
+ SSvrConn* conn = handle->data;
if (conn == NULL) {
return;
}
@@ -884,7 +884,7 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
uv_loop_init(srv->loop);
taosThreadOnce(&transModuleInit, uvInitEnv);
- transSrvInst++;
+ tranSSvrInst++;
assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0));
#ifdef WINDOWS
@@ -981,7 +981,7 @@ void uvDestoryExHandle(void* handle) {
taosMemoryFree(handle);
}
-void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd) {
+void uvHandleQuit(SSvrMsg* msg, SWorkThrdObj* thrd) {
thrd->quit = true;
if (QUEUE_IS_EMPTY(&thrd->conn)) {
uv_walk(thrd->loop, uvWalkCb, NULL);
@@ -990,8 +990,8 @@ void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd) {
}
taosMemoryFree(msg);
}
-void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd) {
- SSrvConn* conn = msg->pConn;
+void uvHandleRelease(SSvrMsg* msg, SWorkThrdObj* thrd) {
+ SSvrConn* conn = msg->pConn;
if (conn->status == ConnAcquire) {
reallocConnRefHandle(conn);
if (!transQueuePush(&conn->srvMsgs, msg)) {
@@ -1004,13 +1004,13 @@ void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd) {
}
destroySmsg(msg);
}
-void uvHandleResp(SSrvMsg* msg, SWorkThrdObj* thrd) {
+void uvHandleResp(SSvrMsg* msg, SWorkThrdObj* thrd) {
// send msg to client
tDebug("server conn %p start to send resp (2/2)", msg->pConn);
uvStartSendResp(msg);
}
-void uvHandleRegister(SSrvMsg* msg, SWorkThrdObj* thrd) {
- SSrvConn* conn = msg->pConn;
+void uvHandleRegister(SSvrMsg* msg, SWorkThrdObj* thrd) {
+ SSvrConn* conn = msg->pConn;
tDebug("server conn %p register brokenlink callback", conn);
if (conn->status == ConnAcquire) {
if (!transQueuePush(&conn->srvMsgs, msg)) {
@@ -1036,13 +1036,13 @@ void destroyWorkThrd(SWorkThrdObj* pThrd) {
}
taosThreadJoin(pThrd->thread, NULL);
SRV_RELEASE_UV(pThrd->loop);
- TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSrvMsg, destroySmsg);
+ TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSvrMsg, destroySmsg);
transDestroyAsyncPool(pThrd->asyncPool);
taosMemoryFree(pThrd->loop);
taosMemoryFree(pThrd);
}
void sendQuitToWorkThrd(SWorkThrdObj* pThrd) {
- SSrvMsg* msg = taosMemoryCalloc(1, sizeof(SSrvMsg));
+ SSvrMsg* msg = taosMemoryCalloc(1, sizeof(SSvrMsg));
msg->type = Quit;
tDebug("server send quit msg to work thread");
transSendAsync(pThrd->asyncPool, &msg->q);
@@ -1075,8 +1075,8 @@ void transCloseServer(void* arg) {
taosMemoryFree(srv);
- transSrvInst--;
- if (transSrvInst == 0) {
+ tranSSvrInst--;
+ if (tranSSvrInst == 0) {
TdThreadOnce tmpInit = PTHREAD_ONCE_INIT;
memcpy(&transModuleInit, &tmpInit, sizeof(TdThreadOnce));
uvCloseExHandleMgt();
@@ -1087,7 +1087,7 @@ void transRefSrvHandle(void* handle) {
if (handle == NULL) {
return;
}
- int ref = T_REF_INC((SSrvConn*)handle);
+ int ref = T_REF_INC((SSvrConn*)handle);
tDebug("server conn %p ref count: %d", handle, ref);
}
@@ -1095,10 +1095,10 @@ void transUnrefSrvHandle(void* handle) {
if (handle == NULL) {
return;
}
- int ref = T_REF_DEC((SSrvConn*)handle);
+ int ref = T_REF_DEC((SSvrConn*)handle);
tDebug("server conn %p ref count: %d", handle, ref);
if (ref == 0) {
- destroyConn((SSrvConn*)handle, true);
+ destroyConn((SSvrConn*)handle, true);
}
}
@@ -1113,12 +1113,12 @@ void transReleaseSrvHandle(void* handle) {
STransMsg tmsg = {.code = 0, .info.handle = exh, .info.ahandle = NULL, .info.refId = refId};
- SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg));
- srvMsg->msg = tmsg;
- srvMsg->type = Release;
+ SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg));
+ m->msg = tmsg;
+ m->type = Release;
tTrace("server conn %p start to release", exh->handle);
- transSendAsync(pThrd->asyncPool, &srvMsg->q);
+ transSendAsync(pThrd->asyncPool, &m->q);
uvReleaseExHandle(refId);
return;
_return1:
@@ -1141,11 +1141,11 @@ void transSendResponse(const STransMsg* msg) {
SWorkThrdObj* pThrd = exh->pThrd;
ASYNC_ERR_JRET(pThrd);
- SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg));
- srvMsg->msg = tmsg;
- srvMsg->type = Normal;
+ SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg));
+ m->msg = tmsg;
+ m->type = Normal;
tDebug("server conn %p start to send resp (1/2)", exh->handle);
- transSendAsync(pThrd->asyncPool, &srvMsg->q);
+ transSendAsync(pThrd->asyncPool, &m->q);
uvReleaseExHandle(refId);
return;
_return1:
@@ -1169,11 +1169,11 @@ void transRegisterMsg(const STransMsg* msg) {
SWorkThrdObj* pThrd = exh->pThrd;
ASYNC_ERR_JRET(pThrd);
- SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg));
- srvMsg->msg = tmsg;
- srvMsg->type = Register;
+ SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg));
+ m->msg = tmsg;
+ m->type = Register;
tTrace("server conn %p start to register brokenlink callback", exh->handle);
- transSendAsync(pThrd->asyncPool, &srvMsg->q);
+ transSendAsync(pThrd->asyncPool, &m->q);
uvReleaseExHandle(refId);
return;
@@ -1193,7 +1193,7 @@ int transGetConnInfo(void* thandle, STransHandleInfo* pInfo) {
return -1;
}
SExHandle* ex = thandle;
- SSrvConn* pConn = ex->handle;
+ SSvrConn* pConn = ex->handle;
struct sockaddr_in addr = pConn->addr;
pInfo->clientIp = (uint32_t)(addr.sin_addr.s_addr);
diff --git a/source/libs/transport/test/CMakeLists.txt b/source/libs/transport/test/CMakeLists.txt
index 98a252e008d85b27206fa58055f757dd02d64a78..468b70fb711a15a83c97a5a45adb68dee3d1c368 100644
--- a/source/libs/transport/test/CMakeLists.txt
+++ b/source/libs/transport/test/CMakeLists.txt
@@ -111,10 +111,12 @@ target_link_libraries (pushServer
)
-add_test(
- NAME transUT
- COMMAND transUT
-)
+if(NOT TD_WINDOWS)
+ add_test(
+ NAME transUT
+ COMMAND transUT
+ )
+endif(NOT TD_WINDOWS)
add_test(
NAME transUtilUt
COMMAND transportTest
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 66d6ea3ef39c69cca349caf75c4983617e89630c..178d6e8d2b48a5adc62b6c5d83dd414050ffa9f1 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -74,6 +74,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_REPEAT_INIT, "Repeat initialization
TAOS_DEFINE_ERROR(TSDB_CODE_DUP_KEY, "Cannot add duplicate keys to hash")
TAOS_DEFINE_ERROR(TSDB_CODE_NEED_RETRY, "Retry needed")
TAOS_DEFINE_ERROR(TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE, "Out of memory in rpc queue")
+TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TIMESTAMP, "Invalid timestamp format")
TAOS_DEFINE_ERROR(TSDB_CODE_REF_NO_MEMORY, "Ref out of memory")
TAOS_DEFINE_ERROR(TSDB_CODE_REF_FULL, "too many Ref Objs")
diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c
index 94b6d0a06cc381a8a09407445e7f54d8f2ce478a..353e94a49096822fe581d7faa0df8a29a6494c12 100644
--- a/source/util/src/tlog.c
+++ b/source/util/src/tlog.c
@@ -491,7 +491,7 @@ void taosDumpData(unsigned char *msg, int32_t len) {
if (!osLogSpaceAvailable()) return;
taosUpdateLogNums(DEBUG_DUMP);
- char temp[256];
+ char temp[256] = {0};
int32_t i, pos = 0, c = 0;
for (i = 0; i < len; ++i) {
diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c
index 6a10794ea154306f3c26b9666482a7c3a5b61958..37935087fad693eed254549977182ccaca1085f2 100644
--- a/source/util/src/tqueue.c
+++ b/source/util/src/tqueue.c
@@ -26,6 +26,7 @@ typedef struct STaosQnode STaosQnode;
typedef struct STaosQnode {
STaosQnode *next;
STaosQueue *queue;
+ int64_t timestamp;
int32_t size;
int8_t itype;
int8_t reserved[3];
@@ -144,6 +145,7 @@ void *taosAllocateQitem(int32_t size, EQItype itype) {
STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size);
pNode->size = size;
pNode->itype = itype;
+ pNode->timestamp = taosGetTimestampUs();
if (pNode == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -393,7 +395,7 @@ void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue) {
int32_t taosGetQueueNumber(STaosQset *qset) { return qset->numOfQueues; }
-int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp) {
+int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp) {
STaosQnode *pNode = NULL;
int32_t code = 0;
@@ -415,6 +417,7 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FI
*ppItem = pNode->item;
if (ahandle) *ahandle = queue->ahandle;
if (itemFp) *itemFp = queue->itemFp;
+ if (ts) *ts = pNode->timestamp;
queue->head = pNode->next;
if (queue->head == NULL) queue->tail = NULL;
diff --git a/source/util/src/tstrbuild.c b/source/util/src/tstrbuild.c
index 2aae588046402e37569f5a2bde5ed5f72fa24346..c87b889e82ece82c251ddabad1964bc1f0b3ab2f 100644
--- a/source/util/src/tstrbuild.c
+++ b/source/util/src/tstrbuild.c
@@ -69,13 +69,13 @@ void taosStringBuilderAppendString(SStringBuilder* sb, const char* str) {
void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendStringLen(sb, "null", 4); }
void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) {
- char buf[64];
+ char buf[64] = {0};
size_t len = snprintf(buf, sizeof(buf), "%" PRId64, v);
taosStringBuilderAppendStringLen(sb, buf, TMIN(len, sizeof(buf)));
}
void taosStringBuilderAppendDouble(SStringBuilder* sb, double v) {
- char buf[512];
+ char buf[512] = {0};
size_t len = snprintf(buf, sizeof(buf), "%.9lf", v);
taosStringBuilderAppendStringLen(sb, buf, TMIN(len, sizeof(buf)));
}
diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c
index dc48fc3f8d2b2e803e8f1593d5471184fa99e059..686e0696ec689b48ecff8f27c7db2eb86daa5eb2 100644
--- a/source/util/src/tworker.c
+++ b/source/util/src/tworker.c
@@ -75,19 +75,20 @@ static void *tQWorkerThreadFp(SQWorker *worker) {
void *msg = NULL;
void *ahandle = NULL;
int32_t code = 0;
+ int64_t ts = 0;
taosBlockSIGPIPE();
setThreadName(pool->name);
uDebug("worker:%s:%d is running", pool->name, worker->id);
while (1) {
- if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ahandle, &fp) == 0) {
+ if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ts, &ahandle, &fp) == 0) {
uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset);
break;
}
if (fp != NULL) {
- SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num};
+ SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num, .timestamp = ts};
(*fp)(&info, msg);
}
}
diff --git a/tests/pytest/stream/test3.py b/tests/pytest/stream/test3.py
new file mode 100644
index 0000000000000000000000000000000000000000..b45521a9476961394c1cf4b2454d6fb9e2368c68
--- /dev/null
+++ b/tests/pytest/stream/test3.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+import sys
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import tdCom
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ #for i in range(100):
+ tdSql.prepare()
+ dbname = tdCom.getLongName(10, "letters")
+ tdSql.execute('create database if not exists djnhawvlgq vgroups 1')
+ tdSql.execute('use djnhawvlgq')
+ tdSql.execute('create table if not exists downsampling_stb (ts timestamp, c1 int, c2 double, c3 varchar(100), c4 bool) tags (t1 int, t2 double, t3 varchar(100), t4 bool);')
+ tdSql.execute('create table downsampling_ct1 using downsampling_stb tags(10, 10.1, "Beijing", True);')
+ tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 nchar(20), c5 nchar(20)) tags (t1 int);')
+ tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);')
+ tdSql.execute('create table if not exists data_filter_stb (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 tinyint unsigned, t11 smallint unsigned, t12 int unsigned, t13 bigint unsigned)')
+ tdSql.execute('create table if not exists data_filter_ct1 using data_filter_stb tags (1, 2, 3, 4, 5.5, 6.6, "binary7", "nchar8", true, 11, 12, 13, 14)')
+ tdSql.execute('create stream data_filter_stream into output_data_filter_stb as select * from data_filter_stb where ts >= 1653648072973+1s and c1 = 1 or c2 > 1 and c3 != 4 or c4 <= 3 and c5 <> 0 or c6 is not Null or c7 is Null or c8 between "na" and "nchar4" and c8 not between "bi" and "binary" and c8 match "nchar[19]" and c8 nmatch "nchar[25]" or c9 in (1, 2, 3) or c10 not in (6, 7) and c8 like "nch%" and c7 not like "bina_" and c11 <= 10 or c12 is Null or c13 >= 4;')
+ tdSql.execute('insert into data_filter_ct1 values (1653648072973, 1, 1, 1, 3, 1.1, 1.1, "binary1", "nchar1", true, 1, 2, 3, 4);')
+ tdSql.execute('insert into data_filter_ct1 values (1653648072973+1s, 2, 2, 1, 3, 1.1, 1.1, "binary2", "nchar2", true, 2, 3, 4, 5);')
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py
index 35abc4802f9de2080a6b6a166daf833c9cf04578..8c791efbc644924cfe4c1d85d6422bb671fd1216 100644
--- a/tests/pytest/util/common.py
+++ b/tests/pytest/util/common.py
@@ -14,23 +14,93 @@
import random
import string
from util.sql import tdSql
-
+from util.dnodes import tdDnodes
+import requests
+import time
+import socket
class TDCom:
def init(self, conn, logSql):
tdSql.init(conn.cursor(), logSql)
- def cleanTb(self):
+ def preDefine(self):
+ header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='}
+ sql_url = "http://127.0.0.1:6041/rest/sql"
+ sqlt_url = "http://127.0.0.1:6041/rest/sqlt"
+ sqlutc_url = "http://127.0.0.1:6041/rest/sqlutc"
+ influx_url = "http://127.0.0.1:6041/influxdb/v1/write"
+ telnet_url = "http://127.0.0.1:6041/opentsdb/v1/put/telnet"
+ return header, sql_url, sqlt_url, sqlutc_url, influx_url, telnet_url
+
+ def genTcpParam(self):
+ MaxBytes = 1024*1024
+ host ='127.0.0.1'
+ port = 6046
+ return MaxBytes, host, port
+
+ def tcpClient(self, input):
+ MaxBytes = tdCom.genTcpParam()[0]
+ host = tdCom.genTcpParam()[1]
+ port = tdCom.genTcpParam()[2]
+ sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
+ sock.connect((host, port))
+ sock.send(input.encode())
+ sock.close()
+
+ def restApiPost(self, sql):
+ requests.post(self.preDefine()[1], sql.encode("utf-8"), headers = self.preDefine()[0])
+
+ def createDb(self, dbname="test", db_update_tag=0, api_type="taosc"):
+ if api_type == "taosc":
+ if db_update_tag == 0:
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname} precision 'us'")
+ else:
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname} precision 'us' update 1")
+ elif api_type == "restful":
+ if db_update_tag == 0:
+ self.restApiPost(f"drop database if exists {dbname}")
+ self.restApiPost(f"create database if not exists {dbname} precision 'us'")
+ else:
+ self.restApiPost(f"drop database if exists {dbname}")
+ self.restApiPost(f"create database if not exists {dbname} precision 'us' update 1")
+ tdSql.execute(f'use {dbname}')
+
+ def genUrl(self, url_type, dbname, precision):
+ if url_type == "influxdb":
+ if precision is None:
+ url = self.preDefine()[4] + "?" + "db=" + dbname
+ else:
+ url = self.preDefine()[4] + "?" + "db=" + dbname + "&precision=" + precision
+ elif url_type == "telnet":
+ url = self.preDefine()[5] + "/" + dbname
+ else:
+ url = self.preDefine()[1]
+ return url
+
+ def schemalessApiPost(self, sql, url_type="influxdb", dbname="test", precision=None):
+ if url_type == "influxdb":
+ url = self.genUrl(url_type, dbname, precision)
+ elif url_type == "telnet":
+ url = self.genUrl(url_type, dbname, precision)
+ res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0])
+ return res
+
+ def cleanTb(self, type="taosc"):
+ '''
+ type is taosc or restful
+ '''
query_sql = "show stables"
res_row_list = tdSql.query(query_sql, True)
stb_list = map(lambda x: x[0], res_row_list)
for stb in stb_list:
- tdSql.execute(f'drop table if exists {stb}')
+ if type == "taosc":
+ tdSql.execute(f'drop table if exists {stb}')
+ elif type == "restful":
+ self.restApiPost(f"drop table if exists {stb}")
- query_sql = "show tables"
- res_row_list = tdSql.query(query_sql, True)
- tb_list = map(lambda x: x[0], res_row_list)
- for tb in tb_list:
- tdSql.execute(f'drop table if exists {tb}')
+ def dateToTs(self, datetime_input):
+ return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
def getLongName(self, len, mode = "mixed"):
"""
@@ -47,6 +117,52 @@ class TDCom:
chars = ''.join(random.choice(string.ascii_letters.lower() + string.digits) for i in range(len))
return chars
+ def restartTaosd(self, index=1, db_name="db"):
+ tdDnodes.stop(index)
+ tdDnodes.startWithoutSleep(index)
+ tdSql.execute(f"use {db_name}")
+
+ def typeof(self, variate):
+ v_type=None
+ if type(variate) is int:
+ v_type = "int"
+ elif type(variate) is str:
+ v_type = "str"
+ elif type(variate) is float:
+ v_type = "float"
+ elif type(variate) is bool:
+ v_type = "bool"
+ elif type(variate) is list:
+ v_type = "list"
+ elif type(variate) is tuple:
+ v_type = "tuple"
+ elif type(variate) is dict:
+ v_type = "dict"
+ elif type(variate) is set:
+ v_type = "set"
+ return v_type
+
+ def splitNumLetter(self, input_mix_str):
+ nums, letters = "", ""
+ for i in input_mix_str:
+ if i.isdigit():
+ nums += i
+ elif i.isspace():
+ pass
+ else:
+ letters += i
+ return nums, letters
+
+ def smlPass(self, func):
+ smlChildTableName = "no"
+ def wrapper(*args):
+ # if tdSql.getVariable("smlChildTableName")[0].upper() == "ID":
+ if smlChildTableName.upper() == "ID":
+ return func(*args)
+ else:
+ pass
+ return wrapper
+
def close(self):
self.cursor.close()
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index 2869cd6fdfedb9b080f21c7a61f106aec8609f8b..e8d01de3e5a6a5943472778453d3be28f758f18c 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -17,6 +17,10 @@ import os.path
import platform
import subprocess
from time import sleep
+import base64
+import json
+import copy
+from fabric2 import Connection
from util.log import *
@@ -111,6 +115,7 @@ class TDDnode:
self.deployed = 0
self.testCluster = False
self.valgrind = 0
+ self.remoteIP = ""
self.cfgDict = {
"walLevel": "2",
"fsync": "1000",
@@ -137,8 +142,9 @@ class TDDnode:
"telemetryReporting": "0"
}
- def init(self, path):
+ def init(self, path, remoteIP = ""):
self.path = path
+ self.remoteIP = remoteIP
def setTestCluster(self, value):
self.testCluster = value
@@ -162,6 +168,24 @@ class TDDnode:
def addExtraCfg(self, option, value):
self.cfgDict.update({option: value})
+ def remoteExec(self, updateCfgDict, execCmd):
+ remote_conn = Connection(self.remoteIP, port=22, user='root', connect_kwargs={'password':'123456'})
+ remote_top_dir = '~/test'
+ valgrindStr = ''
+ if (self.valgrind==1):
+ valgrindStr = '-g'
+ remoteCfgDict = copy.deepcopy(updateCfgDict)
+ if ("logDir" in remoteCfgDict):
+ del remoteCfgDict["logDir"]
+ if ("dataDir" in remoteCfgDict):
+ del remoteCfgDict["dataDir"]
+ if ("cfgDir" in remoteCfgDict):
+ del remoteCfgDict["cfgDir"]
+ remoteCfgDictStr = base64.b64encode(json.dumps(remoteCfgDict).encode()).decode()
+ execCmdStr = base64.b64encode(execCmd.encode()).decode()
+ with remote_conn.cd((remote_top_dir+sys.path[0].replace(self.path, '')).replace('\\','/')):
+ remote_conn.run("python3 ./test.py %s -d %s -e %s"%(valgrindStr,remoteCfgDictStr,execCmdStr))
+
def deploy(self, *updatecfgDict):
self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index)
self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index)
@@ -229,8 +253,11 @@ class TDDnode:
self.cfg(value, key)
else:
self.addExtraCfg(key, value)
- for key, value in self.cfgDict.items():
- self.cfg(key, value)
+ if (self.remoteIP == ""):
+ for key, value in self.cfgDict.items():
+ self.cfg(key, value)
+ else:
+ self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)"%self.index)
self.deployed = 1
tdLog.debug(
@@ -268,117 +295,68 @@ class TDDnode:
tdLog.exit("dnode:%d is not deployed" % (self.index))
if self.valgrind == 0:
- cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
- binPath, self.cfgDir)
+ if platform.system().lower() == 'windows':
+ cmd = "mintty -h never -w hide %s -c %s" % (
+ binPath, self.cfgDir)
+ else:
+ cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
+ binPath, self.cfgDir)
else:
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
- cmd = "nohup %s %s -c %s 2>&1 & " % (
- valgrindCmdline, binPath, self.cfgDir)
+ if platform.system().lower() == 'windows':
+ cmd = "mintty -h never -w hide %s %s -c %s" % (
+ valgrindCmdline, binPath, self.cfgDir)
+ else:
+ cmd = "nohup %s %s -c %s 2>&1 & " % (
+ valgrindCmdline, binPath, self.cfgDir)
print(cmd)
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
- self.running = 1
- tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
- if self.valgrind == 0:
- time.sleep(0.1)
- key = 'from offline to online'
- bkey = bytes(key, encoding="utf8")
- logFile = self.logDir + "/taosdlog.0"
- i = 0
- while not os.path.exists(logFile):
- sleep(0.1)
- i += 1
- if i > 50:
- break
- popen = subprocess.Popen(
- 'tail -f ' + logFile,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True)
- pid = popen.pid
- # print('Popen.pid:' + str(pid))
- timeout = time.time() + 60 * 2
- while True:
- line = popen.stdout.readline().strip()
- if bkey in line:
- popen.kill()
- break
- if time.time() > timeout:
- tdLog.exit('wait too long for taosd start')
- tdLog.debug("the dnode:%d has been started." % (self.index))
- else:
- tdLog.debug(
- "wait 10 seconds for the dnode:%d to start." %
- (self.index))
- time.sleep(10)
-
- # time.sleep(5)
- def startWin(self):
- binPath = self.getPath("taosd.exe")
-
- if (binPath == ""):
- tdLog.exit("taosd.exe not found!")
+ if (not self.remoteIP == ""):
+ self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)\ntdDnodes.start(%d)"%(self.index, self.index))
+ self.running = 1
else:
- tdLog.info("taosd.exe found: %s" % binPath)
-
- taosadapterBinPath = self.getPath("taosadapter.exe")
- if (taosadapterBinPath == ""):
- tdLog.info("taosAdapter.exe not found!")
- else:
- tdLog.info("taosAdapter.exe found in %s" % taosadapterBuildPath)
-
- if self.deployed == 0:
- tdLog.exit("dnode:%d is not deployed" % (self.index))
-
- cmd = "mintty -h never -w hide %s -c %s" % (
- binPath, self.cfgDir)
-
- if (taosadapterBinPath != ""):
- taosadapterCmd = "mintty -h never -w hide %s --monitor.writeToTD=false " % (
- taosadapterBinPath)
- if os.system(taosadapterCmd) != 0:
- tdLog.exit(taosadapterCmd)
-
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
-
- self.running = 1
- tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
- if self.valgrind == 0:
- time.sleep(0.1)
- key = 'from offline to online'
- bkey = bytes(key, encoding="utf8")
- logFile = self.logDir + "/taosdlog.0"
- i = 0
- while not os.path.exists(logFile):
- sleep(0.1)
- i += 1
- if i > 50:
- break
- popen = subprocess.Popen(
- 'tail -n +0 -f ' + logFile,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True)
- pid = popen.pid
- # print('Popen.pid:' + str(pid))
- timeout = time.time() + 60 * 2
- while True:
- line = popen.stdout.readline().strip()
- if bkey in line:
- popen.kill()
- break
- if time.time() > timeout:
- tdLog.exit('wait too long for taosd start')
- tdLog.debug("the dnode:%d has been started." % (self.index))
- else:
- tdLog.debug(
- "wait 10 seconds for the dnode:%d to start." %
- (self.index))
- time.sleep(10)
+ if os.system(cmd) != 0:
+ tdLog.exit(cmd)
+ self.running = 1
+ print("dnode:%d is running with %s " % (self.index, cmd))
+ tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
+ if self.valgrind == 0:
+ time.sleep(0.1)
+ key = 'from offline to online'
+ bkey = bytes(key, encoding="utf8")
+ logFile = self.logDir + "/taosdlog.0"
+ i = 0
+ while not os.path.exists(logFile):
+ sleep(0.1)
+ i += 1
+ if i > 50:
+ break
+ tailCmdStr = 'tail -f '
+ if platform.system().lower() == 'windows':
+ tailCmdStr = 'tail -n +0 -f '
+ popen = subprocess.Popen(
+ tailCmdStr + logFile,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True)
+ pid = popen.pid
+ # print('Popen.pid:' + str(pid))
+ timeout = time.time() + 60 * 2
+ while True:
+ line = popen.stdout.readline().strip()
+ if bkey in line:
+ popen.kill()
+ break
+ if time.time() > timeout:
+ tdLog.exit('wait too long for taosd start')
+ tdLog.debug("the dnode:%d has been started." % (self.index))
+ else:
+ tdLog.debug(
+ "wait 10 seconds for the dnode:%d to start." %
+ (self.index))
+ time.sleep(10)
def startWithoutSleep(self):
binPath = self.getPath()
@@ -402,12 +380,19 @@ class TDDnode:
print(cmd)
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
+ if (self.remoteIP == ""):
+ if os.system(cmd) != 0:
+ tdLog.exit(cmd)
+ else:
+ self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)\ntdDnodes.startWithoutSleep(%d)"%(self.index, self.index))
+
self.running = 1
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
def stop(self):
+ if (not self.remoteIP == ""):
+ self.remoteExec(self.cfgDict, "tdDnodes.stop(%d)"%self.index)
+ return
if self.valgrind == 0:
toBeKilled = "taosd"
else:
@@ -435,6 +420,9 @@ class TDDnode:
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
def forcestop(self):
+ if (not self.remoteIP == ""):
+ self.remoteExec(self.cfgDict, "tdDnodes.forcestop(%d)"%self.index)
+ return
if self.valgrind == 0:
toBeKilled = "taosd"
else:
@@ -499,8 +487,10 @@ class TDDnodes:
self.dnodes.append(TDDnode(9))
self.dnodes.append(TDDnode(10))
self.simDeployed = False
+ self.testCluster = False
+ self.valgrind = 0
- def init(self, path):
+ def init(self, path, remoteIP = ""):
psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
@@ -520,9 +510,9 @@ class TDDnodes:
psCmd, shell=True).decode("utf-8")
binPath = self.dnodes[0].getPath() + "/../../../"
- tdLog.debug("binPath %s" % (binPath))
+ # tdLog.debug("binPath %s" % (binPath))
binPath = os.path.realpath(binPath)
- tdLog.debug("binPath real path %s" % (binPath))
+ # tdLog.debug("binPath real path %s" % (binPath))
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
# tdLog.debug(cmd)
@@ -545,7 +535,7 @@ class TDDnodes:
self.path = os.path.realpath(path)
for i in range(len(self.dnodes)):
- self.dnodes[i].init(self.path)
+ self.dnodes[i].init(self.path, remoteIP)
self.sim = TDSimClient(self.path)
def setTestCluster(self, value):
@@ -572,10 +562,7 @@ class TDDnodes:
def start(self, index):
self.check(index)
- if platform.system().lower() == 'windows':
- self.dnodes[index - 1].startWin()
- else:
- self.dnodes[index - 1].start()
+ self.dnodes[index - 1].start()
def startWithoutSleep(self, index):
self.check(index)
diff --git a/tests/pytest/util/types.py b/tests/pytest/util/types.py
new file mode 100644
index 0000000000000000000000000000000000000000..218a4770269328a5ef7161cc56c0e0dc0c420f73
--- /dev/null
+++ b/tests/pytest/util/types.py
@@ -0,0 +1,38 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+from enum import Enum
+
+class TDSmlProtocolType(Enum):
+ '''
+ Schemaless Protocol types
+ 0 - unknown
+ 1 - InfluxDB Line Protocol
+ 2 - OpenTSDB Telnet Protocl
+ 3 - OpenTSDB JSON Protocol
+ '''
+ UNKNOWN = 0
+ LINE = 1
+ TELNET = 2
+ JSON = 3
+
+class TDSmlTimestampType(Enum):
+ NOT_CONFIGURED = 0
+ HOUR = 1
+ MINUTE = 2
+ SECOND = 3
+ MILLI_SECOND = 4
+ MICRO_SECOND = 5
+ NANO_SECOND = 6
+
+
diff --git a/tests/script/tsim/stream/triggerInterval0.sim b/tests/script/tsim/stream/triggerInterval0.sim
new file mode 100644
index 0000000000000000000000000000000000000000..6f1d8f4b7bf88913239ccf1cc3a89fb1dbdf6bc9
--- /dev/null
+++ b/tests/script/tsim/stream/triggerInterval0.sim
@@ -0,0 +1,185 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+print =============== create database
+sql create database test vgroups 1
+sql show databases
+if $rows != 3 then
+ return -1
+endi
+
+print $data00 $data01 $data02
+
+sql use test
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams1 trigger window_close into streamt as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s);
+
+sql insert into t1 values(1648791213001,1,2,3,1.0);
+sleep 300
+sql select * from streamt;
+if $rows != 0 then
+ print ======$rows
+ return -1
+endi
+
+sql insert into t1 values(1648791223001,2,2,3,1.1);
+sql insert into t1 values(1648791223002,2,2,3,1.1);
+sql insert into t1 values(1648791223003,2,2,3,1.1);
+sql insert into t1 values(1648791223001,2,2,3,1.1);
+sleep 300
+sql select * from streamt;
+if $rows != 1 then
+ print ======$rows
+ return -1
+endi
+
+if $data01 != 1 then
+ print ======$data01
+ return -1
+endi
+
+sql insert into t1 values(1648791233001,2,2,3,1.1);
+sleep 300
+sql select * from streamt;
+if $rows != 2 then
+ print ======$rows
+ return -1
+endi
+if $data01 != 1 then
+ print ======$data01
+ return -1
+endi
+if $data11 != 3 then
+ print ======$data11
+ return -1
+endi
+
+sql insert into t1 values(1648791223004,2,2,3,1.1);
+sql insert into t1 values(1648791223004,2,2,3,1.1);
+sql insert into t1 values(1648791223005,2,2,3,1.1);
+sleep 300
+sql select * from streamt;
+if $rows != 2 then
+ print ======$rows
+ return -1
+endi
+if $data01 != 1 then
+ print ======$data01
+ return -1
+endi
+if $data11 != 5 then
+ print ======$data11
+ return -1
+endi
+
+
+sql insert into t1 values(1648791233002,3,2,3,2.1);
+sql insert into t1 values(1648791213002,4,2,3,3.1)
+sql insert into t1 values(1648791213002,4,2,3,4.1);
+sleep 300
+sql select * from streamt;
+if $rows != 2 then
+ print ======$rows
+ return -1
+endi
+if $data01 != 2 then
+ print ======$data01
+ return -1
+endi
+if $data11 != 5 then
+ print ======$data11
+ return -1
+endi
+
+sql create table t2(ts timestamp, a int, b int , c int, d double);
+sql create stream streams2 trigger window_close watermark 20s into streamt2 as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 interval(10s);
+sql insert into t2 values(1648791213000,1,2,3,1.0);
+sql insert into t2 values(1648791239999,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 0 then
+ print ======$rows
+ return -1
+endi
+
+sql insert into t2 values(1648791240000,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 1 then
+ print ======$rows
+ return -1
+endi
+if $data01 != 1 then
+ print ======$data01
+ return -1
+endi
+
+sql insert into t2 values(1648791250001,1,2,3,1.0) (1648791250002,1,2,3,1.0) (1648791250003,1,2,3,1.0) (1648791240000,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 1 then
+ print ======$rows
+ return -1
+endi
+if $data01 != 1 then
+ print ======$data01
+ return -1
+endi
+
+sql insert into t2 values(1648791280000,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 4 then
+ print ======$rows
+ return -1
+endi
+if $data01 != 1 then
+ print ======$data01
+ return -1
+endi
+if $data11 != 1 then
+ print ======$data11
+ return -1
+endi
+if $data21 != 1 then
+ print ======$data21
+ return -1
+endi
+if $data31 != 3 then
+ print ======$data31
+ return -1
+endi
+
+sql insert into t2 values(1648791250001,1,2,3,1.0) (1648791250002,1,2,3,1.0) (1648791250003,1,2,3,1.0) (1648791280000,1,2,3,1.0) (1648791280001,1,2,3,1.0) (1648791280002,1,2,3,1.0) (1648791310000,1,2,3,1.0) (1648791280001,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+
+if $rows != 5 then
+ print ======$rows
+ return -1
+endi
+if $data01 != 1 then
+ print ======$data01
+ return -1
+endi
+if $data11 != 1 then
+ print ======$data11
+ return -1
+endi
+if $data21 != 1 then
+ print ======$data21
+ return -1
+endi
+if $data31 != 3 then
+ print ======$data31
+ return -1
+endi
+if $data41 != 3 then
+ print ======$data31
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/stream/triggerSession0.sim b/tests/script/tsim/stream/triggerSession0.sim
new file mode 100644
index 0000000000000000000000000000000000000000..fb0666fdcfe847dd25a3e4eb3b66acd16ed09f63
--- /dev/null
+++ b/tests/script/tsim/stream/triggerSession0.sim
@@ -0,0 +1,105 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+print =============== create database
+sql create database test vgroups 1
+sql show databases
+if $rows != 3 then
+ return -1
+endi
+
+print $data00 $data01 $data02
+
+sql use test
+sql create table t2(ts timestamp, a int, b int , c int, d double);
+sql create stream streams2 trigger window_close into streamt2 as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 session(ts, 10s);
+
+sql insert into t2 values(1648791213000,1,2,3,1.0);
+sql insert into t2 values(1648791222999,1,2,3,1.0);
+sql insert into t2 values(1648791223000,1,2,3,1.0);
+sql insert into t2 values(1648791223001,1,2,3,1.0);
+sql insert into t2 values(1648791233001,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 0 then
+ print ======$rows
+ return -1
+endi
+
+sql insert into t2 values(1648791243002,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 1 then
+ print ======$rows
+ return -1
+endi
+
+if $data01 != 5 then
+ print ======$data01
+ return -1
+endi
+
+sql insert into t2 values(1648791223001,1,2,3,1.0) (1648791223002,1,2,3,1.0) (1648791222999,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 1 then
+ print ======$rows
+ return -1
+endi
+
+if $data01 != 6 then
+ print ======$data01
+ return -1
+endi
+
+sql insert into t2 values(1648791233002,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 1 then
+ print ======$rows
+ return -1
+endi
+
+if $data01 != 6 then
+ print ======$data01
+ return -1
+endi
+
+sql insert into t2 values(1648791253003,1,2,3,1.0);
+sleep 300
+sql select * from streamt2;
+if $rows != 1 then
+ print ======$rows
+ return -1
+endi
+
+if $data01 != 8 then
+ print ======$data01
+ return -1
+endi
+
+sql insert into t2 values(1648791243003,1,2,3,1.0) (1648791243002,1,2,3,1.0) (1648791270004,1,2,3,1.0) (1648791280005,1,2,3,1.0) (1648791290006,1,2,3,1.0);
+sleep 500
+sql select * from streamt2;
+if $rows != 3 then
+ print ======$rows
+ return -1
+endi
+
+if $data01 != 10 then
+ print ======$data01
+ return -1
+endi
+if $data11 != 1 then
+ print ======$data11
+ return -1
+endi
+if $data21 != 1 then
+ print ======$data21
+ return -1
+endi
+
+#system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/account_create.sim b/tests/script/unique/account/account_create.sim
deleted file mode 100644
index e36de29e7c5835ddc78a9f3eab4b2b4d34634c42..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/account_create.sim
+++ /dev/null
@@ -1,80 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-print ============================ dnode1 start
-
-$i = 0
-$dbPrefix = acdb
-$tbPrefix = actb
-$db = $dbPrefix . $i
-$tb = $tbPrefix . $i
-$accountPrefix = acac
-
-print =============== step1-4
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-$i = 0
-$acc = $accountPrefix . $i
-sql_error create account $acc PASS pass123
-sql create account $acc PASS 'pass123'
-#sql create account $acc PASS 'pass123' -x step1
-# return -1
-#step1:
-sql create user $acc PASS 'pass123' -x step2
- return -1
-step2:
-
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-print =============== step5-6
-sql drop account $acc
-sql drop account $acc -x step5
- return -1
-step5:
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-print =============== step7
-sql create account $acc PASS 'pass123'
-#sql create account $acc PASS 'pass123' -x step7
-# return -1
-#step7:
-
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-sql drop account $acc
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/account_delete.sim b/tests/script/unique/account/account_delete.sim
deleted file mode 100644
index d99a8b559dc6e04e4d6996e042d915671781d699..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/account_delete.sim
+++ /dev/null
@@ -1,99 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-print ============= step1
-sql create account oroot pass 'taosdata'
-sql close
-sql connect oroot
-sleep 2000
-
-print ============= step2
-sql create user read pass 'taosdata'
-sql create user write pass 'taosdata'
-
-sql create database d1
-sql create database d2
-sql create table d1.t1 (ts timestamp, i int)
-sql create table d2.t2 (ts timestamp, i int)
-sql insert into d1.t1 values(now, 1)
-sql insert into d2.t2 values(now, 1)
-sql insert into d2.t2 values(now+1s, 2)
-
-sql show databases
-if $rows != 2 then
- return -1
-endi
-sql show users
-if $rows != 4 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-sql select * from d2.t2
-if $rows != 2 then
- return -1
-endi
-
-print ============= step3
-sql close
-sql connect
-sleep 2000
-
-sql show databases
-if $rows != 0 then
- return -1
-endi
-sql show dnodes
-print $data00 $data01 $data02 $data03
-if $data02 != 2 then
- return -1
-endi
-sql drop account oroot
-
-print ============= step4
-$x = 0
-show4:
- $x = $x + 1
- sleep 2000
- if $x == 10 then
- return -1
- endi
-
-sql show dnodes
-if $data02 != 0 then
- goto show4
-endi
-
-print ============= step5
-sql create account oroot pass 'taosdata'
-
-sql close
-sql connect oroot
-sleep 2000
-
-sql show databases
-if $rows != 0 then
- return -1
-endi
-sql show users
-if $rows != 2 then
- return -1
-endi
-
-sql close
-sql connect
-sleep 2000
-sql drop account oroot
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/account_len.sim b/tests/script/unique/account/account_len.sim
deleted file mode 100644
index f8379bdf954bdde122e68585b973f4957ef15739..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/account_len.sim
+++ /dev/null
@@ -1,92 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-$i = 0
-$dbPrefix = aldb
-$tbPrefix = altb
-$db = $dbPrefix . $i
-$tb = $tbPrefix . $i
-
-print =============== step1
-sql drop account ac -x step0
- return -1
-step0:
-
-sql create account PASS 123 -x step1
- return -1
-step1:
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-print =============== step2
-sql drop account a -x step2
-step2:
-sql create account a PASS '123'
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-sql drop account a
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-print =============== step3
-sql drop account abc01234567890123456789 -x step3
-step3:
-sql create account abc01234567890123456789 PASS '123'
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-sql drop account abc01234567890123456789
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-print =============== step4
-sql create account abcd01234567890123456789012345689012345 PASS '123' -x step4
- return -1
-step4:
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-print =============== step5
-sql drop account 123 -x step5
-step5:
-sql create account 123 pass '123' -x step51
- return -1
-step51:
-
-sql create account a123 PASS '123'
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-sql drop account a123
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/authority.sim b/tests/script/unique/account/authority.sim
deleted file mode 100644
index 8f2408de1429a8ea34add79e335f6bf7f42ca2b0..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/authority.sim
+++ /dev/null
@@ -1,346 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-print ============= step1
-
-sql create user read pass 'taosdata'
-sql create user write pass 'taosdata'
-sql create user manage pass 'taosdata'
-
-sql create user a PASS 'ade' privilege -x step11
- return -1
-step11:
-
-sql create user a PASS 'ade' privilege a -x step12
- return -1
-step12:
-
-sql create user a PASS 'ade' privilege read -x step13
- return -1
-step13:
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 6 then
- return -1
-endi
-
-sql alter user read privilege read
-sql alter user write privilege write
-sql_error alter user manage privilege super
-
-print ============= step2
-sql close
-sql connect write
-sleep 2000
-
-sql create database d1
-sql create database d2
-sql create table d1.t1 (ts timestamp, i int)
-sql create table d2.t2 (ts timestamp, i int)
-sql insert into d1.t1 values(now, 1)
-sql insert into d2.t2 values(now, 1)
-sql insert into d2.t2 values(now+1s, 2)
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 6 then
- return -1
-endi
-sql show databases
-if $rows != 2 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-sql select * from d2.t2
-if $rows != 2 then
- return -1
-endi
-
-sql create account t1 pass 'taosdata' -x step21
- return -1
-step21:
-
-sql create user t1 pass 'taosdata' -x step22
- return -1
-step22:
-
-sql alter user read pass 'taosdata' -x step23
- return -1
-step23:
-
-sql create dnode $hostname2 -x step24
- return -1
-step24:
-
-sql drop dnode $hostname2 -x step25
- return -1
-step25:
-
-sql create mnode 192.168.0.2 -x step26
- return -1
-step26:
-
-sql drop mnode 192.168.0.2 -x step27
- return -1
-step27:
-
-sql drop account root -x step28
- return -1
-step28:
-
-sql alter user write pass 'taosdata'
-
-print ============= step3
-sql close
-sql connect read
-sleep 2000
-
-sql create database d3 -x step31
- return -1
-step31:
-
-sql create table d1.t3 (ts timestamp, i int) -x step32
- return -1
-step32:
-
-#sql insert into d1.t1 values(now, 2) -x step33
-# return -1
-#step33:
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 6 then
- return -1
-endi
-sql show databases
-if $rows != 2 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-
-sql select * from d2.t2
-if $rows != 2 then
- return -1
-endi
-
-sql sql create account t1 pass 'taosdata' -x step34
- return -1
-step34:
-
-sql sql create user t1 pass 'taosdata' -x step35
- return -1
-step35:
-
-print ============= step4
-sql close
-sql connect manage
-sleep 2000
-
-sql create database d3
-sql create database d4
-sql create table d3.t3 (ts timestamp, i int)
-sql create table d4.t4 (ts timestamp, i int)
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 6 then
- return -1
-endi
-sql show databases
-if $rows != 4 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-sql select * from d2.t2
-if $rows != 2 then
- return -1
-endi
-
-sql create account other pass 'taosdata' -x step41
- return -1
-step41:
-
-sql close
-sql connect
-sleep 2000
-sql create account other pass 'taosdata'
-
-print ============= step5
-sql close
-sql connect other
-sleep 2000
-sql create user read pass 'taosdata' -x step51
- return -1
-step51:
-sql create other write pass 'taosdata' -x step52
- return -1
-step52:
-
-sql create user oread pass 'taosdata'
-sql create user owrite pass 'taosdata'
-sql create user omanage pass 'taosdata'
-
-sql show users
-print show users $rows
-if $rows != 5 then
- return -1
-endi
-
-sql alter user oread privilege read
-sql alter user owrite privilege write
-sql alter user oroot privilege super -x step53
- return -1
-step53:
-sql alter user read privilege read -x step54
- return -1
-step54:
-
-print ============= step6
-sql close
-sql connect owrite
-sleep 2000
-sql reset query cache
-sleep 1000
-sql create database d1
-sql create database d3
-sql create table d1.t1 (ts timestamp, i int)
-sql create table d3.t3 (ts timestamp, i int)
-sql insert into d1.t1 values(now, 11)
-sql insert into d3.t3 values(now, 11)
-sql insert into d3.t3 values(now+1s, 12)
-
-sql show databases
-if $rows != 2 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-sql select * from d2.t2 -x step6
- return -1
-step6:
-sql select * from d3.t3
-if $rows != 2 then
- return -1
-endi
-
-sql sql create account t1 pass 'taosdata' -x step61
- return -1
-step61:
-
-sql sql create user t1 pass 'taosdata' -x step62
- return -1
-step62:
-
-print ============= step7
-sql close
-sql connect oread
-sleep 2000
-
-sql create database d7 -x step71
- return -1
-step71:
-
-sql show databases
-if $rows != 2 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-sql select * from d2.t2 -x step72
- return -1
-step72:
-sql select * from d3.t3
-if $rows != 2 then
- return -1
-endi
-
-sql sql create account t1 pass 'taosdata' -x step73
- return -1
-step73:
-
-sql sql create user t1 pass 'taosdata' -x step74
- return -1
-step74:
-
-print ============= step8
-sql close
-sql connect omanage
-sleep 2000
-
-sql create account t1 pass 'taosdata' -x step81
- return -1
-step81:
-
-sql create database d4
-sql create table d4.t4 (ts timestamp, i int)
-
-sql show databases
-if $rows != 3 then
- return -1
-endi
-sql select * from d1.t1
-if $rows != 1 then
- return -1
-endi
-sql select * from d2.t2 -x step82
- return -1
-step82:
-sql select * from d3.t3
-if $rows != 2 then
- return -1
-endi
-
-print ============= step9
-sql close
-sql connect
-sleep 2000
-sql show databases
-if $rows != 4 then
- return -1
-endi
-
-sql drop account other
-sql drop user read
-sql drop user manage
-sql drop user write
-
-sql close
-sql connect
-sleep 2000
-sql drop database d1
-sql drop database d2
-sql drop database d3
-sql drop database d4
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/basic.sim b/tests/script/unique/account/basic.sim
deleted file mode 100644
index 00e706a4482d9fa57ed2f97a9995ce84d3667fa1..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/basic.sim
+++ /dev/null
@@ -1,46 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/exec.sh -n dnode1 -s start
-sleep 2000
-sql connect
-
-print =============== show accounts
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-print $data00 $data01 $data02
-
-print =============== create account1
-sql create account account1 PASS 'account1'
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-print $data00 $data01 $data02
-print $data10 $data11 $data22
-
-print =============== create account2
-sql create account account2 PASS 'account2'
-sql show accounts
-if $rows != 3 then
- return -1
-endi
-
-print $data00 $data01 $data02
-print $data10 $data11 $data22
-print $data20 $data11 $data22
-
-print =============== drop account1
-sql drop account account1
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-print $data00 $data01 $data02
-print $data10 $data11 $data22
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/paras.sim b/tests/script/unique/account/paras.sim
deleted file mode 100644
index 102f5b6a381e5100b35a4f0125b1318bcb8b1d76..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/paras.sim
+++ /dev/null
@@ -1,114 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/exec.sh -n dnode1 -s start
-sleep 2000
-sql connect
-
-print =============== show accounts
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-print $data00 $data01 $data02 $data03 $data04 $data05 $data06
-if $data00 != root then
- return -1
-endi
-if $data02 != 3/128 then
- return -1
-endi
-if $data03 != 0/128 then
- return -1
-endi
-if $data04 != 0/2147483647 then
- return -1
-endi
-if $data05 != 0/1000 then
- return -1
-endi
-if $data06 != 0.000/unlimited then
- return -1
-endi
-
-print =============== create account
-sql create account hou pass "hou" tseries 80000 storage 10737418240 streams 10 qtime 3600 dbs 3 users 3 conns 10
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-print $data10 $data11 $data12 $data13 $data14 $data15 $data16
-if $data10 != hou then
- return -1
-endi
-if $data12 != 2/3 then
- return -1
-endi
-if $data13 != 0/3 then
- return -1
-endi
-if $data14 != 0/80000 then
- return -1
-endi
-if $data15 != 0/10 then
- return -1
-endi
-if $data16 != 0.000/10.000 then
- return -1
-endi
-
-print =============== alter account
-sql alter account hou pass "hou" tseries 8000 streams 10 dbs 5 users 5
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-print $data10 $data11 $data12 $data13 $data14 $data15 $data16
-if $data10 != hou then
- return -1
-endi
-if $data12 != 2/5 then
- return -1
-endi
-if $data13 != 0/5 then
- return -1
-endi
-if $data14 != 0/8000 then
- return -1
-endi
-if $data15 != 0/10 then
- return -1
-endi
-if $data16 != 0.000/10.000 then
- return -1
-endi
-
-print =============== alter account
-sql create account hou pass "hou" tseries 8000 streams 10 dbs 5 users 6
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-
-print $data10 $data11 $data12 $data13 $data14 $data15 $data16
-if $data10 != hou then
- return -1
-endi
-if $data12 != 2/6 then
- return -1
-endi
-if $data13 != 0/5 then
- return -1
-endi
-if $data14 != 0/8000 then
- return -1
-endi
-if $data15 != 0/10 then
- return -1
-endi
-if $data16 != 0.000/10.000 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/pass_alter.sim b/tests/script/unique/account/pass_alter.sim
deleted file mode 100644
index 8b857b014a292d53536c5acf2a00daa15be11239..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/pass_alter.sim
+++ /dev/null
@@ -1,116 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-print ============= step1
-sql create user read pass 'taosdata1'
-sql create user write pass 'taosdata1'
-
-sql alter user read pass 'taosdata'
-sql alter user write pass 'taosdata'
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-sql show users
-if $rows != 5 then
- return -1
-endi
-
-print ============= step2
-sql close
-sql connect read
-sleep 2000
-sql alter user read pass 'taosdata'
-sql alter user write pass 'taosdata1' -x step2
- return -1
-step2:
-
-
-print ============= step3
-sql close
-sql connect write
-sleep 2000
-sql alter user write pass 'taosdata'
-sql alter user read pass 'taosdata' -x step3
- return -1
-step3:
-
-print ============= step4
-sql close
-sleep 1000
-sql connect
-sleep 2000
-sql create account oroot pass 'taosdata'
-sql show accounts
-if $rows != 2 then
- return -1
-endi
-sql show users
-if $rows != 5 then
- return -1
-endi
-
-print ============= step5
-sql close
-sql connect oroot
-sleep 2000
-
-sql create user oread pass 'taosdata1'
-sql create user owrite pass 'taosdata1'
-sql alter user oread pass 'taosdata'
-sql alter user owrite pass 'taosdata'
-
-sql create user read pass 'taosdata1' -x step51
- return -1
-step51:
-sql alter user read pass 'taosdata1' -x step52
- return -1
-step52:
-
-sql show accounts -x step53
- return -1
-step53:
-sql show users
-print show users $rows
-if $rows != 4 then
- return -1
-endi
-
-print ============= step6
-sql close
-sql connect oread
-sleep 2000
-sql alter user oread pass 'taosdata'
-sql alter user owrite pass 'taosdata1' -x step6
- return -1
-step6:
-
-
-print ============= step7
-sql close
-sql connect owrite
-sleep 2000
-sql alter user owrite pass 'taosdata'
-sql alter user oread pass 'taosdata' -x step7
- return -1
-step7:
-
-print ============= step8
-sql close
-sql connect
-sleep 2000
-sql alter user oread pass 'taosdata'
-sql alter user owrite pass 'taosdata'
-sql alter user oroot pass 'taosdata'
-
-sql drop account oroot
-sql drop user read
-sql drop user write
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/pass_len.sim b/tests/script/unique/account/pass_len.sim
deleted file mode 100644
index f4ceb76f7b8b41873217bd11ae2c3d385386b0e9..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/pass_len.sim
+++ /dev/null
@@ -1,81 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-$i = 0
-$dbPrefix = apdb
-$tbPrefix = aptb
-$db = $dbPrefix . $i
-$tb = $tbPrefix . $i
-$userPrefix = apusr
-
-print =============== step1
-$i = 0
-$user = $userPrefix . $i
-
-sql drop user $user -x step11
- return -1
-step11:
-
-sql create user $user PASS -x step12
- return -1
-step12:
-
-sql create user $user PASS 'taosdata'
-
-sql show users
-if $rows != 4 then
- return -1
-endi
-
-print =============== step2
-$i = 1
-$user = $userPrefix . $i
-sql drop user $user -x step2
-step2:
-sql create user $user PASS '1'
-sql show users
-if $rows != 5 then
- return -1
-endi
-
-print =============== step3
-$i = 2
-$user = $userPrefix . $i
-sql drop user $user -x step3
-step3:
-
-sql create user $user PASS 'abc0123456789'
-sql show users
-if $rows != 6 then
- return -1
-endi
-
-print =============== step4
-$i = 3
-$user = $userPrefix . $i
-sql create user $user PASS 'abcd012345678901234567891234567890' -x step4
- return -1
-step4:
-sql show users
-if $rows != 6 then
- return -1
-endi
-
-$i = 0
-while $i < 3
- $user = $userPrefix . $i
- sql drop user $user
- $i = $i + 1
-endw
-
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/testSuite.sim b/tests/script/unique/account/testSuite.sim
deleted file mode 100644
index 9d4141cfe0c086f9a8863fffb00a9cb0f410e265..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/testSuite.sim
+++ /dev/null
@@ -1,11 +0,0 @@
-run unique/account/account_create.sim
-run unique/account/account_delete.sim
-run unique/account/account_len.sim
-run unique/account/authority.sim
-run unique/account/basic.sim
-run unique/account/paras.sim
-run unique/account/pass_alter.sim
-run unique/account/pass_len.sim
-run unique/account/usage.sim
-run unique/account/user_create.sim
-run unique/account/user_len.sim
diff --git a/tests/script/unique/account/usage.sim b/tests/script/unique/account/usage.sim
deleted file mode 100644
index 3b9c20b159a6237f469fc1e48b5b3a3f4ca5f7b8..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/usage.sim
+++ /dev/null
@@ -1,154 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/exec.sh -n dnode1 -s start
-#system sh/exec.sh -n monitor -s 1
-system sh/exec.sh -n monitorInterval -s 1
-sleep 2000
-sql connect
-
-print =============== show accounts
-
-print =============== create account
-sql alter account root pass "taosdata" tseries 8000 streams 10 dbs 5 users 5
-sql show accounts
-print $data00 $data01 $data02 $data03 $data04 $data05 $data06
-if $data00 != root then
- return -1
-endi
-if $data02 != 3/5 then
- return -1
-endi
-if $data03 != 0/5 then
- return -1
-endi
-if $data04 != 0/8000 then
- return -1
-endi
-if $data05 != 0/10 then
- return -1
-endi
-if $data06 != 0.000/unlimited then
- return -1
-endi
-
-print =============== check usage account
-sql create database d1 wal 2
-sql create database d2 wal 2
-sql create database d3 wal 2
-sql create database d4 wal 2
-sql create database d5 wal 2
-
-sql create table d1.t1 (ts timestamp, i int);
-sql create user u1 pass "u1"
-
-sql show accounts
-print $data10 $data11 $data12 $data13 $data14 $data15 $data16
-if $data00 != root then
- return -1
-endi
-if $data02 != 4/5 then
- return -1
-endi
-if $data03 != 5/5 then
- return -1
-endi
-if $data04 != 1/8000 then
- return -1
-endi
-if $data05 != 0/10 then
- return -1
-endi
-if $data06 != 0.000/unlimited then
- return -1
-endi
-
-print =============== step2
-sql alter account root pass "taosdata" tseries 10 storage 1073741824 streams 10 dbs 5 users 5
-sql show accounts
-print $data00 $data01 $data02 $data03 $data04 $data05 $data06
-if $data00 != root then
- return -1
-endi
-if $data02 != 4/5 then
- return -1
-endi
-if $data03 != 5/5 then
- return -1
-endi
-if $data04 != 1/10 then
- return -1
-endi
-if $data05 != 0/10 then
- return -1
-endi
-if $data06 != 0.000/1.000 then
- return -1
-endi
-
-print =============== step3
-sql alter account root pass "taosdata" tseries 10 storage 16 streams 10 dbs 5 users 5
-sql show accounts
-print $data00 $data01 $data02 $data03 $data04 $data05 $data06
-if $data00 != root then
- return -1
-endi
-if $data02 != 4/5 then
- return -1
-endi
-if $data03 != 5/5 then
- return -1
-endi
-if $data04 != 1/10 then
- return -1
-endi
-if $data05 != 0/10 then
- return -1
-endi
-if $data06 != 0.000/0.000 then
- return -1
-endi
-
-print =============== step4
-sql insert into d1.t1 values(now + 1s, 1)
-sql insert into d1.t1 values(now + 2s, 2)
-
-sleep 10000
-print no write auth
-sql_error insert into d1.t1 values(now + 3s, 2)
-sql_error insert into d1.t1 values(now + 4s, 2)
-
-sql alter account root pass "taosdata" tseries 10 storage 36 streams 10 dbs 5 users 5
-sleep 10000
-print has write auth
-sql insert into d1.t1 values(now + 5s, 1)
-sql insert into d1.t1 values(now + 6s, 2)
-
-# no write auth
-sleep 10000
-print no write auth
-sql_error insert into d1.t1 values(now + 7s, 2)
-sql_error insert into d1.t1 values(now + 8s, 2)
-
-print =============== step5
-sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all
-sleep 10000
-
-sql insert into d1.t1 values(now + 11s, 1)
-sql insert into d1.t1 values(now + 12s, 2)
-
-sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state no
-sleep 10000
-print no write auth
-sql_error insert into d1.t1 values(now + 13s, 2)
-sql_error insert into d1.t1 values(now + 14s, 2)
-
-sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all
-sleep 10000
-print has write auth
-sql insert into d1.t1 values(now + 15s, 1)
-sql insert into d1.t1 values(now + 16s, 2)
-
-print =============== check grant
-sql_error create database d6
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/user_create.sim b/tests/script/unique/account/user_create.sim
deleted file mode 100644
index e54a380f0dbef8107de452354ea01bc58262d548..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/user_create.sim
+++ /dev/null
@@ -1,84 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-print =============== step1
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-sql create user read PASS 'pass123'
-sql create user read PASS 'pass123' -x step1
- return -1
-step1:
-
-sql show users
-if $rows != 4 then
- return -1
-endi
-
-sql alter user read PASS 'taosdata'
-
-print =============== step2
-sql close
-sql connect read
-sleep 2000
-
-sql alter user read PASS 'taosdata'
-
-print =============== step3
-sql drop user read -x step31
- return -1
-step31:
-sql drop user _root -x step32
- return -1
-step32:
-sql drop user monitor -x step33
- return -1
-step33:
-
-print =============== step4
-sql close
-sql connect
-sleep 2000
-
-sql alter user read privilege read
-sql show users
-print $data1_read
-if $data1_read != readable then
- return -1
-endi
-
-sql_error alter user read privilege super
-sql show users
-print $data1_read
-if $data1_read != readable then
- return -1
-endi
-
-sql alter user read privilege write
-sql show users
-if $data1_read != writable then
- return -1
-endi
-
-sql alter user read privilege 1 -x step43
- return -1
-step43:
-
-sql drop user _root -x step41
- return -1
-step41:
-
-sql drop user monitor -x step42
- return -1
-step42:
-
-sql drop user read
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/account/user_len.sim b/tests/script/unique/account/user_len.sim
deleted file mode 100644
index b8d448f0ffc9e43cbc0f0a5a849bda215e72e790..0000000000000000000000000000000000000000
--- a/tests/script/unique/account/user_len.sim
+++ /dev/null
@@ -1,94 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-$i = 0
-$dbPrefix = lm_us_db
-$tbPrefix = lm_us_tb
-$db = $dbPrefix . $i
-$tb = $tbPrefix . $i
-
-print =============== step1
-sql drop user ac -x step0
- return -1
-step0:
-
-sql create user PASS '123' -x step1
- return -1
-step1:
-
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-print =============== step2
-sql drop user a -x step2
-step2:
-sleep 1000
-sql create user a PASS '123'
-sql show users
-if $rows != 4 then
- return -1
-endi
-
-sql drop user a
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-print =============== step3
-sql drop user abc01234567890123456789 -x step3
-step3:
-
-sql create user abc01234567890123456789 PASS '123'
-sql show users
-if $rows != 4 then
- return -1
-endi
-
-sql drop user abc01234567890123456789
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-print =============== step4
-sql create user abcd0123456789012345678901234567890111 PASS '123' -x step4
- return -1
-step4:
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-print =============== step5
-sql drop user 123 -x step5
-step5:
-sql create user 123 PASS '123' -x step61
- return -1
-step61:
-
-sql create user a123 PASS '123'
-sql show users
-if $rows != 4 then
- return -1
-endi
-
-sql drop user a123
-sql show users
-if $rows != 3 then
- return -1
-endi
-
-sql show accounts
-if $rows != 1 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/http/admin.sim b/tests/script/unique/http/admin.sim
deleted file mode 100644
index ae206744c4e93ab7cebd5f4db7d8d4b84ad5ebbb..0000000000000000000000000000000000000000
--- a/tests/script/unique/http/admin.sim
+++ /dev/null
@@ -1,192 +0,0 @@
-system sh/stop_dnodes.sh
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c http -v 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-#system sh/cfg.sh -n dnode1 -c adminRowLimit -v 10
-system sh/cfg.sh -n dnode1 -c httpDebugFlag -v 135
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-sleep 2000
-
-print ============================ dnode1 start
-
-print =============== step0 - prepare data
-sql create database d1
-sql use d1
-
-sql create table table_admin (ts timestamp, i int)
-
-sql insert into table_admin values('2017-12-25 21:28:41.022', 1)
-sql insert into table_admin values('2017-12-25 21:28:42.022', 2)
-sql insert into table_admin values('2017-12-25 21:28:43.022', 3)
-sql insert into table_admin values('2017-12-25 21:28:44.022', 4)
-sql insert into table_admin values('2017-12-25 21:28:45.022', 5)
-sql insert into table_admin values('2017-12-25 21:28:46.022', 6)
-sql insert into table_admin values('2017-12-25 21:28:47.022', 7)
-sql insert into table_admin values('2017-12-25 21:28:48.022', 8)
-sql insert into table_admin values('2017-12-25 21:28:49.022', 9)
-sql insert into table_admin values('2017-12-25 21:28:50.022', 10)
-
-print =============== step1 - login
-
-system_content curl 127.0.0.1:7111/admin/
-print 1-> $system_content
-if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
- print actual: $system_content
- return -1
-endi
-
-system_content curl 127.0.0.1:7111/admin/xx
-print 2-> $system_content
-if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
- return -1
-endi
-
-system_content curl 127.0.0.1:7111/admin/login
-print 3-> $system_content
-if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
- return -1
-endi
-
-system_content curl 127.0.0.1:7111/admin/login/root
-print 4-> $system_content
-if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
- return -1
-endi
-
-system_content curl 127.0.0.1:7111/admin/login/root/123
-print 5-> $system_content
-if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then
- return -1
-endi
-
-system_content curl 127.0.0.1:7111/admin/login/root/123/1/1/3
-print 6-> $system_content
-if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then
- return -1
-endi
-
-system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.' -d 'show databases' 127.0.0.1:7111/admin/login/root/1
-print 7-> $system_content
-if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then
- return -1
-endi
-
-system_content curl -H 'Authorization: Taosd eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' 127.0.0.1:7111/admin/login/root/1
-print 8-> $system_content
-if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then
- return -1
-endi
-
-sleep 2000
-system_content curl 127.0.0.1:7111/admin/login/root/taosdata
-print 9 -----> $system_content
-
-if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then
- return -1
-endi
-
-#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/admin/login/root/1
-#print 10-> $system_content
-#if $system_content != @{"status":"error","code":29,"desc":"failed to connect to server"}@ then
-# return -1
-#endi
-
-print =============== step2 - logout
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/logout
-print 10 -----> $system_content
-
-if $system_content != @{"status":"succ","code":0,"desc":"logout success"}@ then
- return -1
-endi
-
-system_content curl 127.0.0.1:7111/admin/logout
-print 11 -----> $system_content
-
-if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then
- return -1
-endi
-
-print =============== step3 - info
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info
-print curl 127.0.0.1:7111/admin/info -----> $system_content
-if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then
- return -1
-endi
-
-print =============== step4 - meta
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show mnodes' 127.0.0.1:7111/admin/meta
-print curl 127.0.0.1:7111/admin/meta -----> $system_content
-#if $system_content != @{"status":"succ","head":["column type","column name","column bytes"],"data":[["binary","IP",16],["timestamp","created time",8],["binary","status",10],["binary","role",10],["binary","public ip",16]],"rows":5}@ then
-# return -1
-#endi
-
-print =============== step5 - query data
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql
-print curl 127.0.0.1:7111/admin/all -----> $system_content
-if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then
- return -1
-endi
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql
-print curl 127.0.0.1:7111/admin/sql -----> $system_content
-if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then
- return -1
-endi
-
-print =============== step6 - insert data
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_admin values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/admin/sql
-print curl 127.0.0.1:7111/admin/sql -----> $system_content
-if $system_content != @{"status":"succ","head":["affect_rows"],"data":[[1]],"rows":1}@ then
- return -1
-endi
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/all
-print curl 127.0.0.1:7111/admin/all -----> $system_content
-if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then
- print actual: $system_content
- print expect =======> {"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}
- return -1
-endi
-
-#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql
-#print curl 127.0.0.1:7111/admin/sql -----> $system_content
-#if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:51.022",11],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:42.022",2]],"rows":10}@ then
-# return -1
-#endi
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info
-print curl 127.0.0.1:7111/admin/info -----> $system_content
-if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then
- return -1
-endi
-
-print =============== step7 - use dbs
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1;' 127.0.0.1:7111/admin/all
-print 23-> $system_content
-if $system_content != @{"status":"error","code":4360,"desc":"no need to execute use db cmd"}@ then
- return -1
-endi
-
-print =============== step8 - monitor dbs
-#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes;show mnodes;' 127.0.0.1:7111/admin/sqls
-#print 24-> $system_content
-#if $system_content != @[{"status":"succ","head":["IP","created time","open vnodes","free vnodes","status","balance state"],"data":[["127.0.0.1","2018-09-04 #11:16:13.985",1,3,"ready","balanced"]],"rows":1},{"status":"succ","head":["IP","created time","status","role"],"data":[["127.0.0.1","2018-09-04 11:16:13.371","serving","master"]],"rows":1}]@ then
-# return -1
-# endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/http/opentsdb.sim b/tests/script/unique/http/opentsdb.sim
deleted file mode 100644
index 7d1e6b03d4547a6b0b2a6a7857000a8a6518a002..0000000000000000000000000000000000000000
--- a/tests/script/unique/http/opentsdb.sim
+++ /dev/null
@@ -1,247 +0,0 @@
-system sh/stop_dnodes.sh
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c http -v 1
-system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/exec.sh -n dnode1 -s start
-
-sleep 2000
-sql connect
-
-print ============================ dnode1 start
-
-print =============== step1 - parse
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/
-print $system_content
-if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db123456789012345678901234567890db
-print $system_content
-if $system_content != @{"status":"error","code":4497,"desc":"database name too long"}@ then
- return -1
-endi
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/
-print $system_content
-if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then
- return -1
-endi
-
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put2
-print $system_content
-if $system_content != @{"status":"error","code":4354,"desc":"invalid url format"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4498,"desc":"invalid opentsdb json fromat"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '{}' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-
-if $system_content != @{"status":"error","code":4501,"desc":"metric name not find"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": 1,"timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4502,"desc":"metric name type should be string"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4503,"desc":"metric name length is 0"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "ab1234567890123456789012345678ab1234567890123456789012345678","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"ab1234567890123456789012345678ab1234567890123456789012345678","stable":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb","table":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb_lga_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"status":"error","code":1547,"desc":"Timestamp data out of range"}}],"failed":1,"success":0,"affected_rows":0}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4505,"desc":"timestamp not find"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": "2","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4506,"desc":"timestamp type should be integer"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": -1,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4507,"desc":"timestamp value smaller than 0"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4517,"desc":"value not find"}@ then
- return -1
-endi
-
-#######
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4508,"desc":"tags not find"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": 0}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":866,"desc":"failed to create table"}}],"failed":1,"success":0,"affected_rows":0}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"": "web01"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4512,"desc":"tag name is null"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host01123456789001123456789001123456789001123456789001123456789001123456789": "01"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4513,"desc":"tag name length too long"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web011234567890011234567890011234567890011234567890011234567890011234567890011234567890011234567890"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4516,"desc":"tag value can not more than 64"}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": ""}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"status":"error","code":4515,"desc":"tag value is null"}@ then
- return -1
-endi
-
-sleep 2000
-
-print =============== step2 - insert single data
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/
-print $system_content
-if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000]],"rows":1}@ then
- return -1
-endi
-
-print =============== step3 - multi-query data
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402000,"value": 18,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-
-print $system_content
-
-if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846405000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}},{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web02","timestamp":1346846402000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web02"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":2,"affected_rows":2}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/
-
-print $system_content
-
-if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000],["2012-09-05 20:00:05.000",18.000000000]],"rows":2}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/
-
-print $system_content
-
-if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[3]],"rows":1}@ then
- return -1
-endi
-
-print =============== step4 - summary-put data
-system_content curl -u root:taosdata -d '[{"metric": "sys_mem","timestamp": 1346846400000,"value": 8,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_mem","timestamp": 1346846405000,"value": 9,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put?details=false
-
-print $system_content
-
-if $system_content != @{"failed":0,"success":2}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d 'select * from db.sys_mem_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/
-
-print $system_content
-
-if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",8.000000000],["2012-09-05 20:00:05.000",9.000000000]],"rows":2}@ then
- return -1
-endi
-
-system_content curl -u root:taosdata -d 'select count(*) from db.sys_mem_d_bbb' 127.0.0.1:7111/rest/sql/
-
-print $system_content
-
-if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[2]],"rows":1}@ then
- return -1
-endi
-
-print =============== step5 - prepare data
-
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846402000,"value": 19,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402,"value": 19,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846403000,"value": 20,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846403,"value": 20,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846404000,"value": 21,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846404,"value": 21,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 22,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846405,"value": 22,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846406000,"value": 23,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846406,"value": 23,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put
-
-system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/
-print $system_content
-if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[7]],"rows":1}@ then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
diff --git a/tests/script/unique/http/testSuite.sim b/tests/script/unique/http/testSuite.sim
deleted file mode 100644
index 3a9753e744b84bfea28e40e8b3554cb82d2ebb40..0000000000000000000000000000000000000000
--- a/tests/script/unique/http/testSuite.sim
+++ /dev/null
@@ -1,2 +0,0 @@
-run unique/http/admin.sim
-run general/http/opentsdb.sim
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt20.sim b/tests/script/unique/mnode/mgmt20.sim
deleted file mode 100644
index 8945cffab226ab5dc379057d55e562f5c3ed9cfa..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt20.sim
+++ /dev/null
@@ -1,88 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-
-system sh/cfg.sh -n dnode1 -c monitor -v 1
-system sh/cfg.sh -n dnode2 -c monitor -v 1
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-system sh/exec.sh -n dnode2 -s start
-sql connect
-
-print ============== step2
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-
-print ============== step3
-system sh/exec.sh -n dnode2 -s start
-sleep 10000
-
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-print =============== step4
-sql select * from log.dn1
-$d1_first = $rows
-sql select * from log.dn2
-$d2_first = $rows
-
-$x = 0
-show4:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show4
-endi
-if $data2_2 != slave then
- goto show4
-endi
-
-sleep 2000
-sql select * from log.dn1
-$d1_second = $rows
-sql select * from log.dn2
-$d2_second = $rows
-
-print dnode1 $d1_first $d1_second
-print dnode2 $d2_first $d2_second
-if $d1_first >= $d1_second then
- return -1
-endi
-
-if $d2_first >= $d2_second then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt21.sim b/tests/script/unique/mnode/mgmt21.sim
deleted file mode 100644
index 8409383309dbde5500b9719cd64fd74ca5e384b2..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt21.sim
+++ /dev/null
@@ -1,44 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode2 -s start
-sleep 10000
-
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- return -1
-endi
-
-print ============== step2
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 5 then
- return -1
- endi
-
-sql show mnodes -x show2
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt22.sim b/tests/script/unique/mnode/mgmt22.sim
deleted file mode 100644
index 399805312ba905d55bceffe011cfe074c831684e..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt22.sim
+++ /dev/null
@@ -1,114 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-print ============== step3
-sql_error drop dnode $hostname1 -x error1
-print should not drop master
-
-print ============== step4
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 3000
-sql_error show mnodes
-print error of no master
-
-print ============== step5
-sql_error drop dnode $hostname1
-print error of no master
-
-print ============== step6
-system sh/exec.sh -n dnode1 -s start
-sleep 2000
-sql close
-sql connect
-
-$x = 0
-show6:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes -x show6
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show6
-endi
-if $data2_2 != slave then
- goto show6
-endi
-
-print ============== step7
-system sh/exec.sh -n dnode3 -s start
-sql create dnode $hostname3
-
-$x = 0
-show7:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-print dnode3 ==> $data2_3
-if $data2_1 != master then
- goto show7
-endi
-if $data2_2 != slave then
- goto show7
-endi
-if $data3_3 != null then
- goto show7
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt23.sim b/tests/script/unique/mnode/mgmt23.sim
deleted file mode 100644
index 19c7b4ba762d4bf5a73c10c1afa39e927c7a1c91..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt23.sim
+++ /dev/null
@@ -1,141 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-print ============== step3
-system sh/exec.sh -n dnode3 -s start
-sql create dnode $hostname3
-sleep 8000
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != slave then
- return -1
-endi
-if $dnode3Role != null then
- return -1
-endi
-
-print ============== step4
-sql drop dnode $hostname2
-
-$x = 0
-step4:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step4
-endi
-if $dnode2Role != null then
- goto step4
-endi
-if $dnode3Role != slave then
- goto step4
-endi
-
-system sh/exec.sh -n dnode2 -s stop
-
-print ============== step5
-sleep 2000
-sql create dnode $hostname2
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/exec.sh -n dnode2 -s start
-
-$x = 0
-step5:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step5
-endi
-if $dnode2Role != null then
- goto step5
-endi
-if $dnode3Role != slave then
- goto step5
-endi
-
-print ============== step6
-system sh/exec.sh -n dnode1 -s stop
-sql_error show mnodes
-
-print ============== step7
-sql_error drop dnode $hostname1
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt24.sim b/tests/script/unique/mnode/mgmt24.sim
deleted file mode 100644
index a7bcc59ac0bfa6163d1e2fddfd3a817b102bfa3c..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt24.sim
+++ /dev/null
@@ -1,84 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-print ============== step3
-system sh/exec.sh -n dnode1 -s stop
-sleep 2000
-sql_error show mnodes
-
-print ============== step4
-sql_error drop dnode $hostname1
-
-print ============== step5
-system sh/exec.sh -n dnode1 -s start
-sql_error create dnode $hostname1
-
-sql close
-sql connect
-
-$x = 0
-step5:
- $x = $x + 1
- sleep 2000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes -x step5
-
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto step5
-endi
-if $data2_2 != slave then
- goto step5
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt25.sim b/tests/script/unique/mnode/mgmt25.sim
deleted file mode 100644
index 9cca9c844806b138faf52186ffc3184d4876a1d6..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt25.sim
+++ /dev/null
@@ -1,95 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-print ============== step3
-system sh/exec.sh -n dnode3 -s start
-sql create dnode $hostname3
-sleep 6000
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != slave then
- return -1
-endi
-if $dnode3Role != null then
- return -1
-endi
-
-print ============== step4
-sql drop dnode $hostname2
-sleep 6000
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != null then
- return -1
-endi
-if $dnode3Role != slave then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt26.sim b/tests/script/unique/mnode/mgmt26.sim
deleted file mode 100644
index 2816845052e835cf11e0ec7d4ddc71cbdee0ada1..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt26.sim
+++ /dev/null
@@ -1,123 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-if $data2_1 != master then
- goto show2
-endi
-if $data2_2 != slave then
- goto show2
-endi
-
-print ============== step3
-system sh/exec.sh -n dnode3 -s start
-sql create dnode $hostname3
-sleep 6000
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != slave then
- return -1
-endi
-if $dnode3Role != null then
- return -1
-endi
-
-
-print ============== step4
-sql drop dnode $hostname2
-sleep 6000
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != null then
- return -1
-endi
-if $dnode3Role != slave then
- return -1
-endi
-
-print ============== step5
-system sh/exec.sh -n dnode2 -s stop
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-sleep 3000
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-sleep 6000
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != null then
- return -1
-endi
-if $dnode3Role != slave then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt30.sim b/tests/script/unique/mnode/mgmt30.sim
deleted file mode 100644
index d0858c0d6cdffa1cb1cd7f2ba570ae0521f412d5..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt30.sim
+++ /dev/null
@@ -1,68 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
-
-system sh/cfg.sh -n dnode1 -c balanceInterval -v 3000
-system sh/cfg.sh -n dnode2 -c balanceInterval -v 3000
-system sh/cfg.sh -n dnode3 -c balanceInterval -v 3000
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-print dnode3 ==> $data3_3
-if $data2_1 != master then
- return -1
-endi
-if $data3_2 != null then
- return -1
-endi
-if $data3_3 != null then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-sleep 3000
-
-sql create dnode $hostname2
-sql create dnode $hostname3
-
-$x = 0
-step2:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step2
-endi
-if $dnode2Role != slave then
- goto step2
-endi
-if $dnode3Role != slave then
- goto step2
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt33.sim b/tests/script/unique/mnode/mgmt33.sim
deleted file mode 100644
index ce7cdce35d8c0463564f46d26a0711d39340c8bf..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt33.sim
+++ /dev/null
@@ -1,214 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-print dnode3 ==> $data3_3
-if $data2_1 != master then
- return -1
-endi
-if $data3_2 != null then
- return -1
-endi
-if $data3_3 != null then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-
-$x = 0
-step2:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step2
-endi
-if $dnode2Role != slave then
- goto step2
-endi
-if $dnode3Role != null then
- goto step2
-endi
-
-print ============== step3
-system sh/exec.sh -n dnode3 -s start
-sql create dnode $hostname3
-
-$x = 0
-step3:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step3
-endi
-if $dnode2Role != slave then
- goto step3
-endi
-if $dnode3Role != slave then
- goto step3
-endi
-
-print ============== step4
-sql drop dnode $hostname2
-
-$x = 0
-step4:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step4
-endi
-if $dnode2Role != null then
- goto step4
-endi
-if $dnode3Role != slave then
- goto step4
-endi
-
-system sh/exec.sh -n dnode2 -s stop
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
-system sh/exec.sh -n dnode2 -s start
-
-print ============== step5
-sql create dnode $hostname2
-
-$x = 0
-step5:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_4
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step5
-endi
-if $dnode2Role != slave then
- goto step5
-endi
-if $dnode3Role != slave then
- goto step5
-endi
-
-print ============== step6
-system sh/exec.sh -n dnode1 -s stop
-$x = 0
-step6:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes -x step6
-$dnode1Role = $data2_1
-$dnode2Role = $data2_4
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != offline then
- goto step6
-endi
-#if $dnode2Role != master then
-# return -1
-#endi
-#if $dnode3Role != slave then
-# return -1
-#endi
-
-print ============== step7
-sql drop dnode $hostname1
-$x = 0
-step7:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes -x step7
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != null then
- goto step7
-endi
-#if $dnode2Role != master then
-# return -1
-#endi
-#if $dnode3Role != slave then
-# return -1
-#endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmt34.sim b/tests/script/unique/mnode/mgmt34.sim
deleted file mode 100644
index d8a46b0955f59273279bbbc5c89c07c05db672d7..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmt34.sim
+++ /dev/null
@@ -1,269 +0,0 @@
-system sh/stop_dnodes.sh
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-system sh/deploy.sh -n dnode4 -i 4
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-sql show mnodes
-print dnode1 ==> $data2_1
-print dnode2 ==> $data2_2
-print dnode3 ==> $data3_3
-if $data2_1 != master then
- return -1
-endi
-if $data3_2 != null then
- return -1
-endi
-if $data3_3 != null then
- return -1
-endi
-
-print ============== step2
-system sh/exec.sh -n dnode2 -s start
-sql create dnode $hostname2
-$x = 0
-step2:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != master then
- goto step2
-endi
-if $dnode2Role != slave then
- goto step2
-endi
-if $dnode3Role != null then
- goto step2
-endi
-if $dnode4Role != null then
- goto step2
-endi
-
-print ============== step3
-system sh/exec.sh -n dnode3 -s start
-sql create dnode $hostname3
-
-$x = 0
-step3:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != master then
- goto step3
-endi
-if $dnode2Role != slave then
- goto step3
-endi
-if $dnode3Role != slave then
- goto step3
-endi
-if $dnode4Role != null then
- goto step3
-endi
-
-
-print ============== step4
-system sh/exec.sh -n dnode4 -s start
-sql create dnode $hostname4
-$x = 0
-step4:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != master then
- goto step4
-endi
-if $dnode2Role != slave then
- goto step4
-endi
-if $dnode3Role != slave then
- goto step4
-endi
-if $dnode4Role != null then
- goto step4
-endi
-
-print ============== step5
-sql drop dnode $hostname2
-$x = 0
-step5:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != master then
- goto step5
-endi
-if $dnode2Role != null then
- goto step5
-endi
-if $dnode3Role != slave then
- goto step5
-endi
-if $dnode4Role != slave then
- goto step5
-endi
-
-system sh/exec.sh -n dnode2 -s stop
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
-system sh/exec.sh -n dnode2 -s start
-
-print ============== step6
-sql create dnode $hostname2
-$x = 0
-step6:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != master then
- goto step6
-endi
-if $dnode2Role != null then
- goto step6
-endi
-if $dnode3Role != slave then
- goto step6
-endi
-if $dnode4Role != slave then
- goto step6
-endi
-
-print ============== step7
-system sh/exec.sh -n dnode1 -s stop
-$x = 0
-step7:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes -x step7
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != offline then
- goto step7
-endi
-
-print ============== step8
-sql drop dnode $hostname1
-step8:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- return -1
- endi
-
-sql show mnodes -x step8
-$dnode1Role = $data2_1
-$dnode2Role = $data2_5
-$dnode3Role = $data2_3
-$dnode4Role = $data2_4
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-print dnode4 ==> $dnode4Role
-
-if $dnode1Role != null then
- goto step8
-endi
-if $dnode2Role != slave then
- goto step8
-endi
-#if $dnode3Role != master then
-# return -1
-#endi
-#if $dnode4Role != slave then
-# return -1
-#endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/mgmtr2.sim b/tests/script/unique/mnode/mgmtr2.sim
deleted file mode 100644
index 5afb41905846bff3ce9894e928245a7d34078354..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/mgmtr2.sim
+++ /dev/null
@@ -1,87 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
-
-print ============== step1
-system sh/exec.sh -n dnode1 -s start
-sleep 2000
-sql connect
-
-sql show mnodes
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- return -1
-endi
-if $dnode2Role != null then
- return -1
-endi
-if $dnode3Role != null then
- return -1
-endi
-
-print ============== step2
-sql create dnode $hostname2
-sql create dnode $hostname3
-
-print ============== step3
-print ========= start dnode2 and dnode3
-
-system sh/exec.sh -n dnode2 -s start
-sleep 1000
-system sh/exec.sh -n dnode3 -s start
-
-sleep 8000
-system sh/exec.sh -n dnode2 -s stop
-system sh/exec.sh -n dnode3 -s stop
-sleep 4000
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-sleep 4000
-system sh/exec.sh -n dnode2 -s stop
-system sh/exec.sh -n dnode3 -s stop
-sleep 4000
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-
-print ============== step4
-$x = 0
-step4:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-
-sql show mnodes
-
-$dnode1Role = $data2_1
-$dnode2Role = $data2_2
-$dnode3Role = $data2_3
-print dnode1 ==> $dnode1Role
-print dnode2 ==> $dnode2Role
-print dnode3 ==> $dnode3Role
-
-if $dnode1Role != master then
- goto step4
-endi
-if $dnode2Role != slave then
- goto step4
-endi
-if $dnode3Role != null then
- goto step4
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/testSuite.sim b/tests/script/unique/mnode/testSuite.sim
deleted file mode 100644
index b9adbe06a282548d56d7e7feb8a36f99198d8c0d..0000000000000000000000000000000000000000
--- a/tests/script/unique/mnode/testSuite.sim
+++ /dev/null
@@ -1,9 +0,0 @@
-run unique/mnode/mgmt21.sim
-run unique/mnode/mgmt22.sim
-run unique/mnode/mgmt23.sim
-run unique/mnode/mgmt24.sim
-run unique/mnode/mgmt25.sim
-run unique/mnode/mgmt26.sim
-run unique/mnode/mgmt33.sim
-run unique/mnode/mgmt34.sim
-run unique/mnode/mgmtr2.sim
diff --git a/tests/script/unique/stream/metrics_balance.sim b/tests/script/unique/stream/metrics_balance.sim
deleted file mode 100644
index ff48c2236709635c8d1a790104b0185144a96866..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_balance.sim
+++ /dev/null
@@ -1,312 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c statusInterval -v 1
-system sh/cfg.sh -n dnode2 -c statusInterval -v 1
-system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-
-$dbPrefix = db
-$tbPrefix = tb
-$mtPrefix = mt
-$stPrefix = st
-$tbNum = 3
-$rowNum = 200
-
-print ========= start dnode1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-print ============== step1
-$db = $dbPrefix
-sql create database $db
-sql use $db
-
-$i = 0
-$st = $stPrefix . $i
-$mt = $mtPrefix . $i
-$tbNum = 3
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
- if $i == 0 then
- sleep 2000
- endi
-
- $x = 0
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now + $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-$st = $stPrefix . $i
-$mt = $mtPrefix . $i
-$tbNum = 6
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
- if $i == 0 then
- sleep 2000
- endi
-
- $x = 0
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now + $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-$st = $stPrefix . $i
-$mt = $mtPrefix . $i
-$tbNum = 9
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
- if $i == 0 then
- sleep 2000
- endi
-
- $x = 0
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now + $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-$st = $stPrefix . $i
-$mt = $mtPrefix . $i
-$tbNum = 12
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
- if $i == 0 then
- sleep 2000
- endi
-
- $x = 0
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now + $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-
-print =============== step2
-
-sql show tables
-if $rows != 16 then
- return -1
-endi
-
-print =============== step3
-print sleep 22 seconds
-sleep 22000
-
-$i = 0
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r0 = $rows
-print $st ==> $r0 $data00 $data01 $data10 $data11
-
-$i = 3
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r3 = $rows
-print $st ==> $r3 $data00 $data01 $data10 $data11
-
-$i = 6
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r6 = $rows
-print $st ==> $r6 $data00 $data01 $data10 $data11
-
-$i = 9
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r9 = $rows
-print $st ==> $r9 $data00 $data01 $data10 $data11
-
-print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9
-
-$x = 0
-show1:
- $x = $x + 1
- sleep 2000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show1
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != 0 then
- goto show1
-endi
-if $dnode2Vnodes != NULL then
- goto show1
-endi
-
-print =============== step4 start dnode2
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-sleep 8000
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show2
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != 2 then
- goto show2
-endi
-if $dnode2Vnodes != 2 then
- goto show2
-endi
-
-print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9
-print =============== step5
-print sleep 22 seconds
-sleep 22000
-
-print =============== step6
-$i = 0
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-$i = 3
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-$i = 6
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-$i = 9
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9
-print =============== step7
-$i = 0
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r0 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r0 then
- return -1
-endi
-
-$i = 3
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r3 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r3 then
- return -1
-endi
-
-
-$i = 6
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r6 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r6 then
- return -1
-endi
-
-$i = 9
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r0 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r9 then
- return -1
-endi
-
-print =============== clear
-system sh/exec.sh -n dnode1 -s stop
-system sh/exec.sh -n dnode2 -s stop
-
diff --git a/tests/script/unique/stream/metrics_replica1_dnode2.sim b/tests/script/unique/stream/metrics_replica1_dnode2.sim
deleted file mode 100644
index 20c37cefc39f8fa6393d49934adb046f409fca25..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_replica1_dnode2.sim
+++ /dev/null
@@ -1,260 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-print ======================== dnode1 start
-
-$dbPrefix = m1d_db
-$tbPrefix = m1d_tb
-$mtPrefix = m1d_mt
-$stPrefix = m1d_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $mt interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $mt interval(1d)
-print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $mt interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $mt interval(1d)
-print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $mt interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $mt interval(1d)
-print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $mt interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $mt interval(1d)
-print select min(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $mt interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $mt interval(1d)
-print select max(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $mt interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $mt interval(1d)
-print select first(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $mt interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $mt interval(1d)
-print select last(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $mt interval(1d)
-
-print =============== step11 wh
-sql select count(tbcol) from $mt where ts < now + 4m interval(1d)
-print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d)
-
-print =============== step12 as
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $mt interval(1d)
-
-print =============== step13
-print sleep 22 seconds
-sleep 32000
-
-print =============== step14
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != 200 then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
diff --git a/tests/script/unique/stream/metrics_replica2_dnode2.sim b/tests/script/unique/stream/metrics_replica2_dnode2.sim
deleted file mode 100644
index aa8c1871017982cecc695abc8f64d732a8a7fc4e..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_replica2_dnode2.sim
+++ /dev/null
@@ -1,260 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-
-
-print ======================== dnode1 start
-
-$dbPrefix = m2d_db
-$tbPrefix = m2d_tb
-$mtPrefix = m2d_mt
-$stPrefix = m2d_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $mt interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $mt interval(1d)
-print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $mt interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $mt interval(1d)
-print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $mt interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $mt interval(1d)
-print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $mt interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $mt interval(1d)
-print select min(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $mt interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $mt interval(1d)
-print select max(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $mt interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $mt interval(1d)
-print select first(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $mt interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $mt interval(1d)
-print select last(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $mt interval(1d)
-
-print =============== step11 wh
-sql select count(tbcol) from $mt where ts < now + 4m interval(1d)
-print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d)
-
-print =============== step12 as
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $mt interval(1d)
-
-print =============== step13
-print sleep 22 seconds
-sleep 22000
-
-print =============== step14
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != 200 then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
diff --git a/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim b/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim
deleted file mode 100644
index be2fcefe66ed6ca2e24a44cd22fa072201137b89..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim
+++ /dev/null
@@ -1,261 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-
-print ======================== dnode1 start
-
-$dbPrefix = m2dv_db
-$tbPrefix = m2dv_tb
-$mtPrefix = m2dv_mt
-$stPrefix = m2dv_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $mt interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $mt interval(1d)
-print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $mt interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $mt interval(1d)
-print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $mt interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $mt interval(1d)
-print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $mt interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $mt interval(1d)
-print select min(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $mt interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $mt interval(1d)
-print select max(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $mt interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $mt interval(1d)
-print select first(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $mt interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $mt interval(1d)
-print select last(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $mt interval(1d)
-
-print =============== step11 wh
-sql select count(tbcol) from $mt where ts < now + 4m interval(1d)
-print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d)
-
-print =============== step12 as
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $mt interval(1d)
-
-print =============== step13
-print sleep 22 seconds
-sleep 22000
-
-print =============== step14
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != 200 then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
diff --git a/tests/script/unique/stream/metrics_replica2_dnode3.sim b/tests/script/unique/stream/metrics_replica2_dnode3.sim
deleted file mode 100644
index f7b17610c380d9f90a2cefd4af86ea766facdffa..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_replica2_dnode3.sim
+++ /dev/null
@@ -1,270 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode3 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-sql create dnode $hostname3
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-if $data4_192.168.0.3 == offline then
- goto createDnode
-endi
-
-print ======================== dnode1 start
-
-$dbPrefix = m2d3_db
-$tbPrefix = m2d3_tb
-$mtPrefix = m2d3_mt
-$stPrefix = m2d3_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $mt interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $mt interval(1d)
-print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $mt interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $mt interval(1d)
-print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $mt interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $mt interval(1d)
-print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $mt interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $mt interval(1d)
-print select min(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $mt interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $mt interval(1d)
-print select max(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $mt interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $mt interval(1d)
-print select first(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $mt interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $mt interval(1d)
-print select last(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $mt interval(1d)
-
-print =============== step11 wh
-sql select count(tbcol) from $mt where ts < now + 4m interval(1d)
-print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d)
-
-print =============== step12 as
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $mt interval(1d)
-
-print =============== step13
-print sleep 22 seconds
-sleep 22000
-
-print =============== step14
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != 200 then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
diff --git a/tests/script/unique/stream/metrics_replica3_dnode4.sim b/tests/script/unique/stream/metrics_replica3_dnode4.sim
deleted file mode 100644
index 402712800313ff5b96f970d12ffe007f77bc26f7..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_replica3_dnode4.sim
+++ /dev/null
@@ -1,280 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-system sh/deploy.sh -n dnode4 -i 4
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode3 -c walLevel -v 1
-system sh/cfg.sh -n dnode4 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-sql create dnode $hostname3
-sql create dnode $hostname4
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-system sh/exec.sh -n dnode4 -s start
-
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-if $data4_192.168.0.3 == offline then
- goto createDnode
-endi
-if $data4_192.168.0.4 == offline then
- goto createDnode
-endi
-
-print ======================== dnode1 start
-
-$dbPrefix = m2d3_db
-$tbPrefix = m2d3_tb
-$mtPrefix = m2d3_mt
-$stPrefix = m2d3_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $mt interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $mt interval(1d)
-print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $mt interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $mt interval(1d)
-print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $mt interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $mt interval(1d)
-print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $mt interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $mt interval(1d)
-print select min(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $mt interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $mt interval(1d)
-print select max(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $mt interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $mt interval(1d)
-print select first(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $mt interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $mt interval(1d)
-print select last(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $mt interval(1d)
-
-print =============== step11 wh
-sql select count(tbcol) from $mt where ts < now + 4m interval(1d)
-print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d)
-
-print =============== step12 as
-sql select count(tbcol) from $mt interval(1d)
-print select count(tbcol) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $mt interval(1d)
-
-print =============== step13
-print sleep 22 seconds
-sleep 22000
-
-print =============== step14
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 1900 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != 200 then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
diff --git a/tests/script/unique/stream/metrics_vnode_stop.sim b/tests/script/unique/stream/metrics_vnode_stop.sim
deleted file mode 100644
index cd84cb3cdf5f8096f4986a222cc371db3900f765..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/metrics_vnode_stop.sim
+++ /dev/null
@@ -1,188 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-print ======================== dnode start
-
-$dbPrefix = db
-$tbPrefix = tb
-$mtPrefix = mt
-$stPrefix = st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step3
-system sh/exec.sh -n dnode2 -s stop
-
-print =============== step4
-print sleep 22 seconds
-sleep 22000
-
-print =============== step5
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-print ============= step6
-
-sql close
-system sh/exec.sh -n dnode1 -s stop
-system sh/exec.sh -n dnode2 -s stop
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/exec.sh -n dnode2 -s start
-sleep 2000
-
-$x = 0
-connectTbase2:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql connect -x connectTbase2
-sleep 2000
-
-sql create dnode $hostname1
-system sh/exec.sh -n dnode1 -s start
-sleep 2000
-print ======================== dnode start
-
-$dbPrefix = db
-$tbPrefix = tb
-$mtPrefix = mt
-$stPrefix = st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step7
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step8 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $mt interval(1d)
-print select count(*) from $mt interval(1d) ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $mt interval(1d)
-
-print =============== step9
-system sh/exec.sh -n dnode1 -s stop
-
-print =============== step10
-print sleep 22 seconds
-sleep 22000
-
-print =============== step11
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 200 then
- return -1
-endi
-
-
-
diff --git a/tests/script/unique/stream/table_balance.sim b/tests/script/unique/stream/table_balance.sim
deleted file mode 100644
index 45e054e2efdfbd7f3d01e3a860c5ac227f3327fc..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_balance.sim
+++ /dev/null
@@ -1,238 +0,0 @@
-system sh/stop_dnodes.sh
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c statusInterval -v 1
-system sh/cfg.sh -n dnode2 -c statusInterval -v 1
-system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-
-$dbPrefix = tb_db
-$tbPrefix = tb_tb
-$mtPrefix = tb_mt
-$stPrefix = tb_st
-$tbNum = 10
-$rowNum = 200
-$totalNum = 200
-
-print ========= start dnode1
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-print ============== step1
-$i = 0
-$db = $dbPrefix
-$mt = $mtPrefix
-$st = $stPrefix . $i
-
-sql create database $db
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
- if $i == 0 then
- sleep 2000
- endi
-
- $x = 0
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now + $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2
-
-$i = 1
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-$i = 5
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-$i = 8
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-sql show tables
-if $rows != 13 then
- return -1
-endi
-
-print =============== step3
-print sleep 22 seconds
-sleep 22000
-
-$i = 1
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r1 = $rows
-print $st ==> $r1 $data00 $data01 $data10 $data11
-
-$i = 5
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r5 = $rows
-print $st ==> $r5 $data00 $data01 $data10 $data11
-
-$i = 8
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-$r8 = $rows
-print $st ==> $r8 $data00 $data01 $data10 $data11
-
-print rows1=>$r1 rows5=>$r5 rows8=>$r8
-
-$x = 0
-show1:
- $x = $x + 1
- sleep 2000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show1
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != 0 then
- goto show1
-endi
-if $dnode2Vnodes != NULL then
- goto show1
-endi
-
-print =============== step4 start dnode2
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-sleep 8000
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show2
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != 2 then
- goto show2
-endi
-if $dnode2Vnodes != 2 then
- goto show2
-endi
-
-print rows1=>$r1 rows5=>$r5 rows8=>$r8
-print =============== step5
-print sleep 22 seconds
-sleep 22000
-
-print =============== step6
-$i = 1
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-$i = 5
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-$i = 8
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $tb
-if $rows != $rowNum then
- return -1
-endi
-
-print rows1=>$r1 rows5=>$r5 rows8=>$r8
-print =============== step7
-$i = 1
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r1 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r1 then
- return -1
-endi
-
-$i = 5
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r5 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r5 then
- return -1
-endi
-
-
-$i = 8
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $r8 $rows , $data00 $data01 $data10 $data11
-if $rows == 0 then
- return -1
-endi
-if $rows <= $r8 then
- return -1
-endi
-
-
-if $r1 != $r5 then
- return -1
-endi
-
-if $r8 != $r5 then
- return -1
-endi
-
-print =============== clear
-system sh/exec.sh -n dnode1 -s stop
-system sh/exec.sh -n dnode2 -s stop
-
diff --git a/tests/script/unique/stream/table_move.sim b/tests/script/unique/stream/table_move.sim
deleted file mode 100644
index 964a0c025363fd650e8051312a812fffbddaea7d..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_move.sim
+++ /dev/null
@@ -1,269 +0,0 @@
-system sh/stop_dnodes.sh
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-system sh/deploy.sh -n dnode4 -i 4
-
-system sh/cfg.sh -n dnode1 -c statusInterval -v 1
-system sh/cfg.sh -n dnode2 -c statusInterval -v 1
-system sh/cfg.sh -n dnode3 -c statusInterval -v 1
-system sh/cfg.sh -n dnode4 -c statusInterval -v 1
-
-system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
-system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
-
-system sh/cfg.sh -n dnode1 -c wallevel -v 1
-system sh/cfg.sh -n dnode2 -c wallevel -v 1
-system sh/cfg.sh -n dnode3 -c wallevel -v 1
-system sh/cfg.sh -n dnode4 -c wallevel -v 1
-
-system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 0
-system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 0
-
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
-
-system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
-system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
-system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
-
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
-system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
-
-system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 1000
-system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 1000
-system sh/cfg.sh -n dnode1 -c maxShellConns -v 1000
-system sh/cfg.sh -n dnode1 -c maxMgmtConnections -v 1000
-
-system sh/cfg.sh -n dnode2 -c maxVnodeConnections -v 1000
-system sh/cfg.sh -n dnode2 -c maxMeterConnections -v 1000
-system sh/cfg.sh -n dnode2 -c maxShellConns -v 1000
-system sh/cfg.sh -n dnode2 -c maxMgmtConnections -v 1000
-
-$dbPrefix = db
-$tbPrefix = tb
-$mtPrefix = mt
-$stPrefix = st
-$tbNum = 5
-$rowNum = 20
-$totalNum = 200
-
-print ============== step1
-print ========= start dnode1
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-sleep 2000
-
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql create database $db
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -20
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now $ms , $x , $x )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2
-$i = 0
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-
-sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) ===> $data00 $data01 $data02, $data03
-if $data01 != $rowNum then
- return -1
-endi
-if $data02 != $rowNum then
- return -1
-endi
-if $data03 != $rowNum then
- return -1
-endi
-
-sql show tables
-if $rows != 5 then
- return -1
-endi
-
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s)
-
-sql show tables
-if $rows != 6 then
- return -1
-endi
-
-print =============== step3
-print sleep 22 seconds
-sleep 22000
-
-sql select * from $tb
-if $rows != 20 then
- return -1
-endi
-
-sql select * from $mt
-if $rows != 100 then
- return -1
-endi
-
-sql select * from $st
-print select * from $st => $data01
-if $rows == 0 then
- return -1
-endi
-
-$x = 0
-show1:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show1
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != 6 then
- goto show1
-endi
-if $dnode2Vnodes != NULL then
- goto show1
-endi
-
-print =============== step4 start dnode2
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-sleep 8000
-
-$x = 0
-show2:
- $x = $x + 1
- sleep 2000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show2
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != 7 then
- goto show2
-endi
-if $dnode2Vnodes != 7 then
- goto show2
-endi
-
-print =============== step5 drop dnode1
-system sh/exec.sh -n dnode1 -s stop
-print stop dnode1 and sleep 10000
-sleep 10000
-
-sql drop dnode $hostname1
-print drop dnode1 and sleep 9000
-sleep 9000
-
-$x = 0
-show6:
- $x = $x + 1
- sleep 2000
- if $x == 20 then
- return -1
- endi
-sql show dnodes -x show6
-$dnode1Vnodes = $data3_192.168.0.1
-print dnode1 $dnode1Vnodes
-$dnode2Vnodes = $data3_192.168.0.2
-print dnode2 $dnode2Vnodes
-
-if $dnode1Vnodes != NULL then
- goto show6
-endi
-if $dnode2Vnodes != 6 then
- goto show6
-endi
-
-print =============== step6
-
-print select * from $tb
-sql select * from $tb
-if $rows != 20 then
- return -1
-endi
-
-print select * from $mt
-sql select * from $mt
-if $rows != 80 then
- return -1
-endi
-
-
-print =============== step7
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
-
- $x = 0
- $y = 0
- while $y < $rowNum
- $ms = $x . s
- sql insert into $tb values (now + $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-$i = 0
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-
-print =============== step8
-print sleep 22 seconds
-sleep 22000
-
-print select * from $st
-sql select * from $st
-if $rows == 0 then
- return -1
-endi
-
-
-print =============== clear
-system sh/exec.sh -n dnode1 -s stop
-system sh/exec.sh -n dnode2 -s stop
-
diff --git a/tests/script/unique/stream/table_replica1_dnode2.sim b/tests/script/unique/stream/table_replica1_dnode2.sim
deleted file mode 100644
index ccc6026e9c92975ccdd4fd12366a11f50a818d3f..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_replica1_dnode2.sim
+++ /dev/null
@@ -1,137 +0,0 @@
-system sh/stop_dnodes.sh
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-print ======================== dnode1 start
-
-$dbPrefix = t1d_db
-$tbPrefix = t1d_tb
-$mtPrefix = t1d_mt
-$stPrefix = t1d_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2
-
-$i = 1
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-
-$i = 5
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-
-$i = 8
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03
-sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
-
-sql show tables
-if $rows != 13 then
- return -1
-endi
-
-print =============== step3
-print sleep 22 seconds
-sleep 22000
-
-
-print =============== step4
-$i = 1
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $rows $data00 $data01 $data10 $data11
-$rows1 = $rows
-if $data01 != 20 then
- return -1
-endi
-
-$i = 5
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st => $rows $data00 $data01 $data10 $data11
-$rows5 = $rows
-if $data01 != 20 then
- return -1
-endi
-
-$i = 8
-$tb = $tbPrefix . $i
-$st = $stPrefix . $i
-sql select * from $st
-print $st ==> $rows $data00 $data01 $data10 $data11
-$rows8 = $rows
-if $data01 != 20 then
- return -1
-endi
-
-if $rows8 != $rows5 then
- return -1
-endi
-
-if $rows8 != $rows1 then
- return -1
-endi
\ No newline at end of file
diff --git a/tests/script/unique/stream/table_replica2_dnode2.sim b/tests/script/unique/stream/table_replica2_dnode2.sim
deleted file mode 100644
index 947fa0d2f9093c802a9c99c74edddeffca102d38..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_replica2_dnode2.sim
+++ /dev/null
@@ -1,312 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-print ======================== dnode1 start
-
-$dbPrefix = t2d_db
-$tbPrefix = t2d_tb
-$mtPrefix = t2d_mt
-$stPrefix = t2d_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop database $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $tb interval(1d)
-print select count(*) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $tb interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $tb interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $tb interval(1d)
-print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $tb interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $tb interval(1d)
-print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $tb interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $tb interval(1d)
-print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $tb interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $tb interval(1d)
-print select min(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $tb interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $tb interval(1d)
-print select max(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $tb interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $tb interval(1d)
-print select first(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $tb interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $tb interval(1d)
-print select last(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $tb interval(1d)
-
-print =============== step11 st
-sql select stddev(tbcol) from $tb interval(1d)
-print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql create table $st as select stddev(tbcol) from $tb interval(1d)
-
-print =============== step12 le
-sql select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01
-#if $data01 != @(0.000017, -25362055.126740)@ then
-# return -1
-#endi
-
-$st = $stPrefix . le
-sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-
-print =============== step13 pe
-
-sql select percentile(tbcol, 1) from $tb interval(1d)
-print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . pe
-sql create table $st as select percentile(tbcol, 1) from $tb interval(1d)
-
-print =============== step14 wh
-sql select count(tbcol) from $tb where ts < now + 4m interval(1d)
-print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d)
-
-print =============== step15 as
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $tb interval(1d)
-
-print =============== step16
-print sleep 22 seconds
-sleep 22000
-
-print =============== step17
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . le
-sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != @(0.000017, -25270086.331047)@ then
-# return -1
-#endi
-
-$st = $stPrefix . pe
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != $rowNum then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
diff --git a/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim b/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim
deleted file mode 100644
index 75300362393eaa543740307d4d11f9a4eabbbc50..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim
+++ /dev/null
@@ -1,314 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-print ======================== dnode1 start
-
-$dbPrefix = t2dv_db
-$tbPrefix = t2dv_tb
-$mtPrefix = t2dv_mt
-$stPrefix = t2dv_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $tb interval(1d)
-print select count(*) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $tb interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $tb interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $tb interval(1d)
-print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $tb interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $tb interval(1d)
-print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $tb interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $tb interval(1d)
-print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $tb interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $tb interval(1d)
-print select min(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $tb interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $tb interval(1d)
-print select max(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $tb interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $tb interval(1d)
-print select first(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $tb interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $tb interval(1d)
-print select last(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $tb interval(1d)
-
-print =============== step11 st
-sql select stddev(tbcol) from $tb interval(1d)
-print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql create table $st as select stddev(tbcol) from $tb interval(1d)
-
-print =============== step12 le
-sql select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01
-#if $data01 != @(0.000017, -25362055.126740)@ then
-# return -1
-#endi
-
-$st = $stPrefix . le
-sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-
-print =============== step13 pe
-
-sql select percentile(tbcol, 1) from $tb interval(1d)
-print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . pe
-sql create table $st as select percentile(tbcol, 1) from $tb interval(1d)
-
-print =============== step14 wh
-sql select count(tbcol) from $tb where ts < now + 4m interval(1d)
-print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d)
-
-print =============== step15 as
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $tb interval(1d)
-
-print =============== step16
-print sleep 22 seconds
-sleep 22000
-
-print =============== step17
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . le
-sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != @(0.000017, -25270086.331047)@ then
-# return -1
-#endi
-
-$st = $stPrefix . pe
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != $rowNum then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
diff --git a/tests/script/unique/stream/table_replica2_dnode3.sim b/tests/script/unique/stream/table_replica2_dnode3.sim
deleted file mode 100644
index 49eb3563b3964f05f31d72a8fd1ff12f2b5b3a03..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_replica2_dnode3.sim
+++ /dev/null
@@ -1,325 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode3 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-
-sql connect
-
-sql create dnode $hostname2
-sql create dnode $hostname3
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-if $data4_192.168.0.3 == offline then
- goto createDnode
-endi
-
-print ======================== dnode1 start
-
-$dbPrefix = t2d3_db
-$tbPrefix = t2d3_tb
-$mtPrefix = t2d3_mt
-$stPrefix = t2d3_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $tb interval(1d)
-print select count(*) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $tb interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $tb interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $tb interval(1d)
-print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $tb interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $tb interval(1d)
-print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $tb interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $tb interval(1d)
-print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $tb interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $tb interval(1d)
-print select min(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $tb interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $tb interval(1d)
-print select max(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $tb interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $tb interval(1d)
-print select first(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $tb interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $tb interval(1d)
-print select last(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $tb interval(1d)
-
-print =============== step11 st
-sql select stddev(tbcol) from $tb interval(1d)
-print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql create table $st as select stddev(tbcol) from $tb interval(1d)
-
-print =============== step12 le
-sql select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01
-#if $data01 != @(0.000017, -25362055.126740)@ then
-# return -1
-#endi
-
-$st = $stPrefix . le
-sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-
-print =============== step13 pe
-
-sql select percentile(tbcol, 1) from $tb interval(1d)
-print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . pe
-sql create table $st as select percentile(tbcol, 1) from $tb interval(1d)
-
-print =============== step14 wh
-sql select count(tbcol) from $tb where ts < now + 4m interval(1d)
-print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d)
-
-print =============== step15 as
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $tb interval(1d)
-
-print =============== step16
-print sleep 22 seconds
-sleep 22000
-
-print =============== step17
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . le
-sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != @(0.000017, -25270086.331047)@ then
-# return -1
-#endi
-
-$st = $stPrefix . pe
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != $rowNum then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
diff --git a/tests/script/unique/stream/table_replica3_dnode4.sim b/tests/script/unique/stream/table_replica3_dnode4.sim
deleted file mode 100644
index 2cc443c72fc656b87ca8c1d330381ed5078cd755..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_replica3_dnode4.sim
+++ /dev/null
@@ -1,333 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-system sh/deploy.sh -n dnode4 -i 4
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode3 -c walLevel -v 1
-system sh/cfg.sh -n dnode4 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
-system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-sql create dnode $hostname3
-sql create dnode $hostname4
-system sh/exec.sh -n dnode2 -s start
-system sh/exec.sh -n dnode3 -s start
-system sh/exec.sh -n dnode4 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-if $data4_192.168.0.3 == offline then
- goto createDnode
-endi
-if $data4_192.168.0.4 == offline then
- goto createDnode
-endi
-
-print ======================== dnode1 start
-
-$dbPrefix = t3d_db
-$tbPrefix = t3d_tb
-$mtPrefix = t3d_mt
-$stPrefix = t3d_st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql drop databae $db -x step1
-step1:
-sql create database $db replica 3
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $tb interval(1d)
-print select count(*) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $tb interval(1d)
-
-print =============== step3 c2
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql create table $st as select count(tbcol) from $tb interval(1d)
-
-print =============== step4 c3
-sql select count(tbcol2) from $tb interval(1d)
-print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql create table $st as select count(tbcol2) from $tb interval(1d)
-
-print =============== step5 avg
-sql select avg(tbcol) from $tb interval(1d)
-print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . av
-sql create table $st as select avg(tbcol) from $tb interval(1d)
-
-print =============== step6 su
-sql select sum(tbcol) from $tb interval(1d)
-print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql create table $st as select sum(tbcol) from $tb interval(1d)
-
-print =============== step7 mi
-sql select min(tbcol) from $tb interval(1d)
-print select min(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql create table $st as select min(tbcol) from $tb interval(1d)
-
-print =============== step8 ma
-sql select max(tbcol) from $tb interval(1d)
-print select max(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql create table $st as select max(tbcol) from $tb interval(1d)
-
-print =============== step9 fi
-sql select first(tbcol) from $tb interval(1d)
-print select first(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql create table $st as select first(tbcol) from $tb interval(1d)
-
-print =============== step10 la
-sql select last(tbcol) from $tb interval(1d)
-print select last(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql create table $st as select last(tbcol) from $tb interval(1d)
-
-print =============== step11 st
-sql select stddev(tbcol) from $tb interval(1d)
-print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql create table $st as select stddev(tbcol) from $tb interval(1d)
-
-print =============== step12 le
-sql select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01
-#if $data01 != @(0.000017, -25362055.126740)@ then
-# return -1
-#endi
-
-$st = $stPrefix . le
-sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d)
-
-print =============== step13 pe
-
-sql select percentile(tbcol, 1) from $tb interval(1d)
-print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . pe
-sql create table $st as select percentile(tbcol, 1) from $tb interval(1d)
-
-print =============== step14 wh
-sql select count(tbcol) from $tb where ts < now + 4m interval(1d)
-print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d)
-
-print =============== step15 as
-sql select count(tbcol) from $tb interval(1d)
-print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . as
-sql create table $st as select count(tbcol) as c from $tb interval(1d)
-
-print =============== step16
-print sleep 22 seconds
-sleep 22000
-
-print =============== step17
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c2
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c3
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . av
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 9.500000000 then
- return -1
-endi
-
-$st = $stPrefix . su
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 190 then
- return -1
-endi
-
-$st = $stPrefix . mi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . ma
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . fi
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0 then
- return -1
-endi
-
-$st = $stPrefix . la
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 19 then
- return -1
-endi
-
-$st = $stPrefix . std
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 5.766281297 then
- return -1
-endi
-
-$st = $stPrefix . le
-sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != @(0.000017, -25270086.331047)@ then
-# return -1
-#endi
-
-$st = $stPrefix . pe
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != 0.190000000 then
- return -1
-endi
-
-$st = $stPrefix . wh
-#sql select * from $st
-#print ===> select * from $st ===> $data00 $data01
-#if $data01 != $rowNum then
-# return -1
-#endi
-
-$st = $stPrefix . as
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
diff --git a/tests/script/unique/stream/table_vnode_stop.sim b/tests/script/unique/stream/table_vnode_stop.sim
deleted file mode 100644
index 625de32a8d7a1e5336dd10f313565bdbc0daf0fc..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/table_vnode_stop.sim
+++ /dev/null
@@ -1,189 +0,0 @@
-system sh/stop_dnodes.sh
-
-
-
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
-system sh/exec.sh -n dnode1 -s start
-
-sql connect
-
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-$x = 0
-createDnode:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql show dnodes;
-if $data4_192.168.0.2 == offline then
- goto createDnode
-endi
-
-print ======================== dnode start
-
-$dbPrefix = db
-$tbPrefix = tb
-$mtPrefix = mt
-$stPrefix = st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step1
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step2 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $tb interval(1d)
-print select count(*) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $tb interval(1d)
-
-print =============== step3
-system sh/exec.sh -n dnode2 -s stop
-
-print =============== step4
-print sleep 22 seconds
-sleep 22000
-
-print =============== step5
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-print ============= step6
-
-sql close
-system sh/exec.sh -n dnode1 -s stop
-system sh/exec.sh -n dnode2 -s stop
-system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/cfg.sh -n dnode1 -c walLevel -v 1
-system sh/cfg.sh -n dnode2 -c walLevel -v 1
-sleep 2000
-system sh/exec.sh -n dnode2 -s start
-
-$x = 0
-connectTbase2:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
-sql connect -x connectTbase2
-sleep 2000
-
-sql create dnode $hostname1
-system sh/exec.sh -n dnode1 -s start
-sleep 2000
-print ======================== dnode start
-
-$dbPrefix = db
-$tbPrefix = tb
-$mtPrefix = mt
-$stPrefix = st
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print =============== step7
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-$st = $stPrefix . $i
-
-sql create database $db replica 2
-sql use $db
-sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
-
-$i = 0
-while $i < $tbNum
- $tb = $tbPrefix . $i
- sql create table $tb using $mt tags( $i )
-
- $x = -1440
- $y = 0
- while $y < $rowNum
- $ms = $x . m
- sql insert into $tb values (now $ms , $y , $y )
- $x = $x + 1
- $y = $y + 1
- endw
-
- $i = $i + 1
-endw
-
-sleep 100
-
-print =============== step8 c1
-$i = 1
-$tb = $tbPrefix . $i
-
-sql select count(*) from $tb interval(1d)
-print select count(*) from $tb interval(1d) ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-$st = $stPrefix . c1
-sql create table $st as select count(*) from $tb interval(1d)
-
-print =============== step9
-system sh/exec.sh -n dnode1 -s stop
-
-print =============== step10
-print sleep 22 seconds
-sleep 22000
-
-print =============== step11
-$st = $stPrefix . c1
-sql select * from $st
-print ===> select * from $st ===> $data00 $data01
-if $data01 != $rowNum then
- return -1
-endi
-
-
-
diff --git a/tests/script/unique/stream/testSuite.sim b/tests/script/unique/stream/testSuite.sim
deleted file mode 100644
index bbf5da3d376d9eccc02aa61b1122cadb5fc04813..0000000000000000000000000000000000000000
--- a/tests/script/unique/stream/testSuite.sim
+++ /dev/null
@@ -1,15 +0,0 @@
-#run unique/stream/table_replica1_dnode2.sim
-#run unique/stream/metrics_replica1_dnode2.sim
-#run unique/stream/table_replica2_dnode2.sim
-#run unique/stream/metrics_replica2_dnode2.sim
-#run unique/stream/table_replica2_dnode2_vnoden.sim
-#run unique/stream/metrics_replica2_dnode2_vnoden.sim
-#run unique/stream/table_replica2_dnode3.sim
-#run unique/stream/metrics_replica2_dnode3.sim
-#run unique/stream/table_replica3_dnode4.sim
-#run unique/stream/metrics_replica3_dnode4.sim
-#run unique/stream/table_vnode_stop.sim
-#run unique/stream/metrics_vnode_stop.sim
-##run unique/stream/table_balance.sim
-##run unique/stream/metrics_balance.sim
-##run unique/stream/table_move.sim
\ No newline at end of file
diff --git a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c56511d2717167d243e162776d4ffe75fb056f5
--- /dev/null
+++ b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
@@ -0,0 +1,1489 @@
+###################################################################
+# Copyright (c) 2021 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import traceback
+import random
+from taos.error import SchemalessError
+import time
+import numpy as np
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import tdCom
+from util.types import TDSmlProtocolType, TDSmlTimestampType
+import threading
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+ self._conn = conn
+ self.smlChildTableName_value = "id"
+
+ def createDb(self, name="test", db_update_tag=0, protocol=None):
+ if protocol == "telnet-tcp":
+ name = "opentsdb_telnet"
+
+ if db_update_tag == 0:
+ tdSql.execute(f"drop database if exists {name}")
+ tdSql.execute(f"create database if not exists {name} precision 'ms'")
+ else:
+ tdSql.execute(f"drop database if exists {name}")
+ tdSql.execute(f"create database if not exists {name} precision 'ms' update 1")
+ tdSql.execute(f'use {name}')
+
+ def timeTrans(self, time_value, ts_type):
+ if int(time_value) == 0:
+ ts = time.time()
+ else:
+ if ts_type == TDSmlTimestampType.MILLI_SECOND.value or ts_type == None:
+ ts = int(''.join(list(filter(str.isdigit, time_value))))/1000
+ elif ts_type == TDSmlTimestampType.SECOND.value:
+ ts = int(''.join(list(filter(str.isdigit, time_value))))/1
+ ulsec = repr(ts).split('.')[1][:6]
+ if len(ulsec) < 6 and int(ulsec) != 0:
+ ulsec = int(ulsec) * (10 ** (6 - len(ulsec)))
+ elif int(ulsec) == 0:
+ ulsec *= 6
+ # * follow two rows added for tsCheckCase
+ td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts))
+ return td_ts
+ #td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts))
+ td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts))
+ return td_ts
+ #return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f"))
+
+ def dateToTs(self, datetime_input):
+ return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
+
+ def getTdTypeValue(self, value, vtype="col"):
+ if vtype == "col":
+ if value.lower().endswith("i8"):
+ td_type = "TINYINT"
+ td_tag_value = ''.join(list(value)[:-2])
+ elif value.lower().endswith("i16"):
+ td_type = "SMALLINT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("i32"):
+ td_type = "INT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("i64"):
+ td_type = "BIGINT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("u64"):
+ td_type = "BIGINT UNSIGNED"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("f32"):
+ td_type = "FLOAT"
+ td_tag_value = ''.join(list(value)[:-3])
+ td_tag_value = '{}'.format(np.float32(td_tag_value))
+ elif value.lower().endswith("f64"):
+ td_type = "DOUBLE"
+ td_tag_value = ''.join(list(value)[:-3])
+ if "e" in value.lower():
+ td_tag_value = str(float(td_tag_value))
+ elif value.lower().startswith('l"'):
+ td_type = "NCHAR"
+ td_tag_value = ''.join(list(value)[2:-1])
+ elif value.startswith('"') and value.endswith('"'):
+ td_type = "BINARY"
+ td_tag_value = ''.join(list(value)[1:-1])
+ elif value.lower() == "t" or value.lower() == "true":
+ td_type = "BOOL"
+ td_tag_value = "True"
+ elif value.lower() == "f" or value.lower() == "false":
+ td_type = "BOOL"
+ td_tag_value = "False"
+ elif value.isdigit():
+ td_type = "DOUBLE"
+ td_tag_value = str(float(value))
+ else:
+ td_type = "DOUBLE"
+ if "e" in value.lower():
+ td_tag_value = str(float(value))
+ else:
+ td_tag_value = value
+ elif vtype == "tag":
+ td_type = "NCHAR"
+ td_tag_value = str(value)
+ return td_type, td_tag_value
+
+ def typeTrans(self, type_list):
+ type_num_list = []
+ for tp in type_list:
+ if tp.upper() == "TIMESTAMP":
+ type_num_list.append(9)
+ elif tp.upper() == "BOOL":
+ type_num_list.append(1)
+ elif tp.upper() == "TINYINT":
+ type_num_list.append(2)
+ elif tp.upper() == "SMALLINT":
+ type_num_list.append(3)
+ elif tp.upper() == "INT":
+ type_num_list.append(4)
+ elif tp.upper() == "BIGINT":
+ type_num_list.append(5)
+ elif tp.upper() == "FLOAT":
+ type_num_list.append(6)
+ elif tp.upper() == "DOUBLE":
+ type_num_list.append(7)
+ elif tp.upper() == "BINARY":
+ type_num_list.append(8)
+ elif tp.upper() == "NCHAR":
+ type_num_list.append(10)
+ elif tp.upper() == "BIGINT UNSIGNED":
+ type_num_list.append(14)
+ return type_num_list
+
+ def inputHandle(self, input_sql, ts_type, protocol=None):
+ input_sql_split_list = input_sql.split(" ")
+ if protocol == "telnet-tcp":
+ input_sql_split_list.pop(0)
+ stb_name = input_sql_split_list[0]
+ stb_tag_list = input_sql_split_list[3:]
+ stb_tag_list[-1] = stb_tag_list[-1].strip()
+ stb_col_value = input_sql_split_list[2]
+ ts_value = self.timeTrans(input_sql_split_list[1], ts_type)
+
+ tag_name_list = []
+ tag_value_list = []
+ td_tag_value_list = []
+ td_tag_type_list = []
+
+ col_name_list = []
+ col_value_list = []
+ td_col_value_list = []
+ td_col_type_list = []
+
+ for elm in stb_tag_list:
+ if self.smlChildTableName_value == "ID":
+ if "id=" in elm.lower():
+ tb_name = elm.split('=')[1]
+ else:
+ tag_name_list.append(elm.split("=")[0].lower())
+ tag_value_list.append(elm.split("=")[1])
+ tb_name = ""
+ td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1])
+ td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0])
+ else:
+ if "id" == elm.split("=")[0].lower():
+ tag_name_list.insert(0, elm.split("=")[0])
+ tag_value_list.insert(0, elm.split("=")[1])
+ td_tag_value_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[1])
+ td_tag_type_list.insert(0, self.getTdTypeValue(elm.split("=")[1], "tag")[0])
+ else:
+ tag_name_list.append(elm.split("=")[0])
+ tag_value_list.append(elm.split("=")[1])
+ tb_name = ""
+ td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1])
+ td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0])
+
+ col_name_list.append('_value')
+ col_value_list.append(stb_col_value)
+
+ td_col_value_list.append(self.getTdTypeValue(stb_col_value)[1])
+ td_col_type_list.append(self.getTdTypeValue(stb_col_value)[0])
+
+ final_field_list = []
+ final_field_list.extend(col_name_list)
+ final_field_list.extend(tag_name_list)
+
+ final_type_list = []
+ final_type_list.append("TIMESTAMP")
+ final_type_list.extend(td_col_type_list)
+ final_type_list.extend(td_tag_type_list)
+ final_type_list = self.typeTrans(final_type_list)
+
+ final_value_list = []
+ final_value_list.append(ts_value)
+ final_value_list.extend(td_col_value_list)
+ final_value_list.extend(td_tag_value_list)
+ return final_value_list, final_field_list, final_type_list, stb_name, tb_name
+
+ def genFullTypeSql(self, stb_name="", tb_name="", value="", t0="", t1="127i8", t2="32767i16", t3="2147483647i32",
+ t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"",
+ t8="L\"ncharTagValue\"", ts="1626006833641",
+ id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None,
+ t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None,
+ chinese_tag=None, multi_field_tag=None, point_trans_tag=None, protocol=None, tcp_keyword_tag=None):
+ if stb_name == "":
+ stb_name = tdCom.getLongName(len=6, mode="letters")
+ if tb_name == "":
+ tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}'
+ if t0 == "":
+ t0 = "t"
+ if value == "":
+ value = random.choice(["f", "F", "false", "False", "t", "T", "true", "True", "TRUE", "FALSE"])
+ if id_upper_tag is not None:
+ id = "ID"
+ else:
+ id = "id"
+ if id_mixul_tag is not None:
+ id = random.choice(["iD", "Id"])
+ else:
+ id = "id"
+ sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
+ if id_noexist_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
+ if t_add_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t9={t8}'
+ if id_change_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} t0={t0} {id}={tb_name} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
+ if id_double_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}_1\" t0={t0} t1={t1} {id}=\"{tb_name}_2\" t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
+ if t_add_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t11={t1} t10={t8}'
+ if t_mul_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}'
+ if id_noexist_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}'
+ if c_multi_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}'
+ if c_blank_tag is not None:
+ sql_seq = f'{stb_name} {ts} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
+ if t_blank_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value}'
+ if chinese_tag is not None:
+ sql_seq = f'{stb_name} {ts} L"涛思数据" t0={t0} t1=L"涛思数据"'
+ if multi_field_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} {value}'
+ if point_trans_tag is not None:
+ sql_seq = f'.point.trans.test {ts} {value} t0={t0}'
+ if tcp_keyword_tag is not None:
+ sql_seq = f'put {ts} {value} t0={t0}'
+ if protocol == "telnet-tcp":
+ sql_seq = 'put ' + sql_seq + '\n'
+ return sql_seq, stb_name
+
+ def genMulTagColStr(self, genType, count=1):
+ """
+ genType must be tag/col
+ """
+ tag_str = ""
+ col_str = ""
+ if genType == "tag":
+ for i in range(0, count):
+ if i < (count-1):
+ tag_str += f't{i}=f '
+ else:
+ tag_str += f't{i}=f'
+ return tag_str
+ if genType == "col":
+ col_str = "t"
+ return col_str
+
+ def genLongSql(self, tag_count):
+ stb_name = tdCom.getLongName(7, mode="letters")
+ tag_str = self.genMulTagColStr("tag", tag_count)
+ col_str = self.genMulTagColStr("col")
+ ts = "1626006833641"
+ long_sql = stb_name + ' ' + ts + ' ' + col_str + ' ' + ' ' + tag_str
+ return long_sql, stb_name
+
+ def getNoIdTbName(self, stb_name, protocol=None):
+ query_sql = f"select tbname from {stb_name}"
+ tb_name = self.resHandle(query_sql, True, protocol)[0][0]
+ return tb_name
+
+ def resHandle(self, query_sql, query_tag, protocol=None):
+ tdSql.execute('reset query cache')
+ if protocol == "telnet-tcp":
+ time.sleep(0.5)
+ row_info = tdSql.query(query_sql, query_tag)
+ col_info = tdSql.getColNameList(query_sql, query_tag)
+ res_row_list = []
+ sub_list = []
+ for row_mem in row_info:
+ for i in row_mem:
+ sub_list.append(str(i))
+ res_row_list.append(sub_list)
+ res_field_list_without_ts = col_info[0][1:]
+ res_type_list = col_info[1]
+ return res_row_list, res_field_list_without_ts, res_type_list
+
+ def resCmp(self, input_sql, stb_name, query_sql="select * from", condition="", ts=None, ts_type=None, id=True, none_check_tag=None, precision=None, protocol=None):
+ expect_list = self.inputHandle(input_sql, ts_type, protocol)
+ if protocol == "telnet-tcp":
+ tdCom.tcpClient(input_sql)
+ else:
+ if precision == None:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, ts_type)
+ else:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, precision)
+ query_sql = f"{query_sql} {stb_name} {condition}"
+ res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True, protocol)
+ if ts == 0:
+ res_ts = self.dateToTs(res_row_list[0][0])
+ current_time = time.time()
+ if current_time - res_ts < 60:
+ tdSql.checkEqual(res_row_list[0][1:], expect_list[0][1:])
+ else:
+ print("timeout")
+ tdSql.checkEqual(res_row_list[0], expect_list[0])
+ else:
+ if none_check_tag is not None:
+ none_index_list = [i for i,x in enumerate(res_row_list[0]) if x=="None"]
+ none_index_list.reverse()
+ for j in none_index_list:
+ res_row_list[0].pop(j)
+ expect_list[0].pop(j)
+ tdSql.checkEqual(res_row_list[0], expect_list[0])
+ tdSql.checkEqual(res_field_list_without_ts, expect_list[1])
+ for i in range(len(res_type_list)):
+ tdSql.checkEqual(res_type_list[i], expect_list[2][i])
+
+ def initCheckCase(self, protocol=None):
+ """
+ normal tags and cols, one for every elm
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+
+ def boolTypeCheckCase(self, protocol=None):
+ """
+ check all normal type
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
+ for t_type in full_type_list:
+ input_sql, stb_name = self.genFullTypeSql(t0=t_type, protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+
+ def symbolsCheckCase(self, protocol=None):
+ """
+ check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/?
+ """
+ '''
+ please test :
+ binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
+ '''
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
+ nchar_symbols = f'L{binary_symbols}'
+ input_sql1, stb_name1 = self.genFullTypeSql(value=binary_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol)
+ input_sql2, stb_name2 = self.genFullTypeSql(value=nchar_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol)
+ self.resCmp(input_sql1, stb_name1, protocol=protocol)
+ self.resCmp(input_sql2, stb_name2, protocol=protocol)
+
+ def tsCheckCase(self):
+ """
+ test ts list --> ["1626006833640ms", "1626006834s", "1626006822639022"]
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006833640)
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value)
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006833640)
+ self.resCmp(input_sql, stb_name, ts_type=None)
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006834)
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.SECOND.value)
+
+ tdSql.execute(f"drop database if exists test_ts")
+ tdSql.execute(f"create database if not exists test_ts precision 'ms'")
+ tdSql.execute("use test_ts")
+ input_sql = ['test_ms 1626006833640 t t0=t', 'test_ms 1626006833641 f t0=t']
+ self._conn.schemaless_insert(input_sql, TDSmlProtocolType.TELNET.value, None)
+ res = tdSql.query('select * from test_ms', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.640000")
+ tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.641000")
+
+ def openTstbTelnetTsCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 0 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
+ stb_name = input_sql.split(" ")[0]
+ self.resCmp(input_sql, stb_name, ts=0)
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006833640 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
+ stb_name = input_sql.split(" ")[0]
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value)
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006834 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
+ stb_name = input_sql.split(" ")[0]
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.SECOND.value)
+ for ts in [1, 12, 123, 1234, 12345, 123456, 1234567, 12345678, 162600683, 16260068341, 162600683412, 16260068336401]:
+ try:
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")} {ts} 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
+ self._conn.schemaless_insert(input_sql, TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def idSeqCheckCase(self, protocol=None):
+ """
+ check id.index in tags
+ eg: t0=**,id=**,t1=**
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+
+ def idLetterCheckCase(self, protocol=None):
+ """
+ check id param
+ eg: id and ID
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True, protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+ input_sql, stb_name = self.genFullTypeSql(id_mixul_tag=True, protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+ input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True, protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+
+ def noIdCheckCase(self, protocol=None):
+ """
+ id not exist
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True, protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+ query_sql = f"select tbname from {stb_name}"
+ res_row_list = self.resHandle(query_sql, True)[0]
+ if len(res_row_list[0][0]) > 0:
+ tdSql.checkColNameList(res_row_list, res_row_list)
+ else:
+ tdSql.checkColNameList(res_row_list, "please check noIdCheckCase")
+
+ def maxColTagCheckCase(self):
+ """
+ max tag count is 128
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ for input_sql in [self.genLongSql(128)[0]]:
+ tdCom.cleanTb()
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ for input_sql in [self.genLongSql(129)[0]]:
+ tdCom.cleanTb()
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def stbTbNameCheckCase(self, protocol=None):
+ """
+ test illegal id name
+ mix "`~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?"
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ rstr = list("~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?")
+ for i in rstr:
+ input_sql, stb_name = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"", protocol=protocol)
+ self.resCmp(input_sql, f'`{stb_name}`', protocol=protocol)
+ tdSql.execute(f'drop table if exists `{stb_name}`')
+
+ def idStartWithNumCheckCase(self, protocol=None):
+ """
+ id is start with num
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(tb_name="1aaabbb", protocol=protocol)
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+
+ def nowTsCheckCase(self):
+ """
+ check now unsupported
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(ts="now")[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def dateFormatTsCheckCase(self):
+ """
+ check date format ts unsupported
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def illegalTsCheckCase(self):
+ """
+ check ts format like 16260068336390us19
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(ts="16260068336390us19")[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def tbnameCheckCase(self):
+ """
+ check length 192
+ check upper tbname
+ chech upper tag
+ length of stb_name tb_name <= 192
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ stb_name_192 = tdCom.getLongName(len=192, mode="letters")
+ tb_name_192 = tdCom.getLongName(len=192, mode="letters")
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name_192, tb_name=tb_name_192)
+ self.resCmp(input_sql, stb_name)
+ tdSql.query(f'select * from {stb_name}')
+ tdSql.checkRows(1)
+ if self.smlChildTableName_value == "ID":
+ for input_sql in [self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0], self.genFullTypeSql(tb_name=tdCom.getLongName(len=193, mode="letters"))[0]]:
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ input_sql = 'Abcdffgg 1626006833640 False T1=127i8 id=Abcddd'
+ else:
+ input_sql = self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ input_sql = 'Abcdffgg 1626006833640 False T1=127i8'
+ stb_name = f'`{input_sql.split(" ")[0]}`'
+ self.resCmp(input_sql, stb_name)
+ tdSql.execute('drop table `Abcdffgg`')
+
+ def tagNameLengthCheckCase(self):
+ """
+ check tag name limit <= 62
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tag_name = tdCom.getLongName(61, "letters")
+ tag_name = f'T{tag_name}'
+ stb_name = tdCom.getLongName(7, "letters")
+ input_sql = f'{stb_name} 1626006833640 L"bcdaaa" {tag_name}=f'
+ self.resCmp(input_sql, stb_name)
+ input_sql = f'{stb_name} 1626006833640 L"gggcdaaa" {tdCom.getLongName(65, "letters")}=f'
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def tagValueLengthCheckCase(self):
+ """
+ check full type tag value limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # nchar
+ # * legal nchar could not be larger than 16374/4
+ stb_name = tdCom.getLongName(7, "letters")
+ input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4093, "letters")}'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+
+ input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4094, "letters")}'
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def colValueLengthCheckCase(self):
+ """
+ check full type col value limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # i8
+ for value in ["-128i8", "127i8"]:
+ input_sql, stb_name = self.genFullTypeSql(value=value)
+ self.resCmp(input_sql, stb_name)
+ tdCom.cleanTb()
+ for value in ["-129i8", "128i8"]:
+ input_sql = self.genFullTypeSql(value=value)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ # i16
+ tdCom.cleanTb()
+ for value in ["-32768i16"]:
+ input_sql, stb_name = self.genFullTypeSql(value=value)
+ self.resCmp(input_sql, stb_name)
+ tdCom.cleanTb()
+ for value in ["-32769i16", "32768i16"]:
+ input_sql = self.genFullTypeSql(value=value)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # i32
+ tdCom.cleanTb()
+ for value in ["-2147483648i32"]:
+ input_sql, stb_name = self.genFullTypeSql(value=value)
+ self.resCmp(input_sql, stb_name)
+ tdCom.cleanTb()
+ for value in ["-2147483649i32", "2147483648i32"]:
+ input_sql = self.genFullTypeSql(value=value)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # i64
+ tdCom.cleanTb()
+ for value in ["-9223372036854775808i64"]:
+ input_sql, stb_name = self.genFullTypeSql(value=value)
+ self.resCmp(input_sql, stb_name)
+ tdCom.cleanTb()
+ for value in ["-9223372036854775809i64", "9223372036854775808i64"]:
+ input_sql = self.genFullTypeSql(value=value)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f32
+ tdCom.cleanTb()
+ for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]:
+ input_sql, stb_name = self.genFullTypeSql(value=value)
+ self.resCmp(input_sql, stb_name)
+ # * limit set to 4028234664*(10**38)
+ tdCom.cleanTb()
+ for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]:
+ input_sql = self.genFullTypeSql(value=value)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f64
+ tdCom.cleanTb()
+ for value in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']:
+ input_sql, stb_name = self.genFullTypeSql(value=value)
+ self.resCmp(input_sql, stb_name)
+ # # * limit set to 1.797693134862316*(10**308)
+ # tdCom.cleanTb()
+ # for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']:
+ # input_sql = self.genFullTypeSql(value=value)[0]
+ # try:
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ # raise Exception("should not reach here")
+ # except SchemalessError as err:
+ # tdSql.checkNotEqual(err.errno, 0)
+
+ # # # binary
+ # tdCom.cleanTb()
+ # stb_name = tdCom.getLongName(7, "letters")
+ # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t'
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+
+ # tdCom.cleanTb()
+ # input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t'
+ # try:
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ # raise Exception("should not reach here")
+ # except SchemalessError as err:
+ # tdSql.checkNotEqual(err.errno, 0)
+
+ # # nchar
+ # # * legal nchar could not be larger than 16374/4
+ # tdCom.cleanTb()
+ # stb_name = tdCom.getLongName(7, "letters")
+ # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4093, "letters")}" t0=t'
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+
+ # tdCom.cleanTb()
+ # input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4094, "letters")}" t0=t'
+ # try:
+ # self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ # raise Exception("should not reach here")
+ # except SchemalessError as err:
+ # tdSql.checkNotEqual(err.errno, 0)
+
+ def tagColIllegalValueCheckCase(self):
+
+ """
+ test illegal tag col value
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # bool
+ for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
+ input_sql1, stb_name = self.genFullTypeSql(t0=i)
+ self.resCmp(input_sql1, stb_name)
+ input_sql2, stb_name = self.genFullTypeSql(value=i)
+ self.resCmp(input_sql2, stb_name)
+
+ # i8 i16 i32 i64 f32 f64
+ for input_sql in [
+ self.genFullTypeSql(value="1s2i8")[0],
+ self.genFullTypeSql(value="1s2i16")[0],
+ self.genFullTypeSql(value="1s2i32")[0],
+ self.genFullTypeSql(value="1s2i64")[0],
+ self.genFullTypeSql(value="11.1s45f32")[0],
+ self.genFullTypeSql(value="11.1s45f64")[0],
+ ]:
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # check accepted binary and nchar symbols
+ # # * ~!@#$¥%^&*()-+={}|[]、「」:;
+ for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'):
+ input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc{symbol}aaa" t0=t'
+ input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=t t1="abc{symbol}aaa"'
+ self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.TELNET.value, None)
+ # self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.TELNET.value, None)
+
+ def blankCheckCase(self):
+ '''
+ check blank case
+ '''
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc aaa" t0=t',
+ # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"',
+ # f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"',
+ # f'{tdCom.getLongName(7, "letters")} 1626006833640 L"aba aa" t0=L"abcaaa3" ']
+ input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"',
+ f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"']
+ for input_sql in input_sql_list:
+ stb_name = input_sql.split(" ")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ tdSql.query(f'select * from {stb_name}')
+ tdSql.checkRows(1)
+
+ def duplicateIdTagColInsertCheckCase(self):
+ """
+ check duplicate Id Tag Col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql_id = self.genFullTypeSql(id_double_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ input_sql = self.genFullTypeSql()[0]
+ input_sql_tag = input_sql.replace("t5", "t6")
+ try:
+ self._conn.schemaless_insert([input_sql_tag], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ ##### stb exist #####
+ @tdCom.smlPass
+ def noIdStbExistCheckCase(self):
+ """
+ case no id when stb exist
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f")
+ self.resCmp(input_sql, stb_name)
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", value="f")
+ self.resCmp(input_sql, stb_name, condition='where tbname like "t_%"')
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+
+ def duplicateInsertExistCheckCase(self):
+ """
+ check duplicate insert when stb exist
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql()
+ self.resCmp(input_sql, stb_name)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ self.resCmp(input_sql, stb_name)
+
+ @tdCom.smlPass
+ def tagColBinaryNcharLengthCheckCase(self):
+ """
+ check length increase
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql()
+ self.resCmp(input_sql, stb_name)
+ tb_name = tdCom.getLongName(5, "letters")
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name,t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"")
+ self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"')
+
+ @tdCom.smlPass
+ def tagColAddDupIDCheckCase(self):
+ """
+ check tag count add, stb and tb duplicate
+ * tag: alter table ...
+ * col: when update==0 and ts is same, unchange
+ * so this case tag&&value will be added,
+ * col is added without value when update==0
+ * col is added with value when update==1
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ for db_update_tag in [0, 1]:
+ if db_update_tag == 1 :
+ self.createDb("test_update", db_update_tag=db_update_tag)
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="t", value="t")
+ self.resCmp(input_sql, stb_name)
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="t", value="f", t_add_tag=True)
+ if db_update_tag == 1 :
+ self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
+ tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
+ else:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
+ self.createDb()
+
+ @tdCom.smlPass
+ def tagColAddCheckCase(self):
+ """
+ check tag count add
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f")
+ self.resCmp(input_sql, stb_name)
+ tb_name_1 = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name_1, t0="f", value="f", t_add_tag=True)
+ self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name_1}"')
+ res_row_list = self.resHandle(f"select t10,t11 from {tb_name}", True)[0]
+ tdSql.checkEqual(res_row_list[0], ['None', 'None'])
+ self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
+
+ def tagMd5Check(self):
+ """
+ condition: stb not change
+ insert two table, keep tag unchange, change col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(t0="f", value="f", id_noexist_tag=True)
+ self.resCmp(input_sql, stb_name)
+ tb_name1 = self.getNoIdTbName(stb_name)
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True)
+ self.resCmp(input_sql, stb_name)
+ tb_name2 = self.getNoIdTbName(stb_name)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(1)
+ tdSql.checkEqual(tb_name1, tb_name2)
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True, t_add_tag=True)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ tb_name3 = self.getNoIdTbName(stb_name)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ tdSql.checkNotEqual(tb_name1, tb_name3)
+
+ # * tag nchar max is 16374/4, col+ts nchar max 49151
+ def tagColNcharMaxLengthCheckCase(self):
+ """
+ check nchar length limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(7, "letters")
+ input_sql = f'{stb_name} 1626006833640 f t2={tdCom.getLongName(1, "letters")}'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+
+ # * legal nchar could not be larger than 16374/4
+ input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(1, "letters")}'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(2, "letters")}'
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+
+ def batchInsertCheckCase(self):
+ """
+ test batch insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
+
+ lines = ["st123456 1626006833640 1i64 t1=3i64 t2=4f64 t3=\"t3\"",
+ "st123456 1626006833641 2i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
+ f'{stb_name} 1626006833642 3i64 t2=5f64 t3=L\"ste\"',
+ "stf567890 1626006833643 4i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
+ "st123456 1626006833644 5i64 t1=4i64 t2=5f64 t3=\"t4\"",
+ f'{stb_name} 1626006833645 6i64 t2=5f64 t3=L\"ste2\"',
+ f'{stb_name} 1626006833646 7i64 t2=5f64 t3=L\"ste2\"',
+ "st123456 1626006833647 8i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
+ "st123456 1626006833648 9i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64"
+ ]
+ self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.MILLI_SECOND.value)
+ tdSql.query('show stables')
+ tdSql.checkRows(3)
+ tdSql.query('show tables')
+ tdSql.checkRows(6)
+ tdSql.query('select * from st123456')
+ tdSql.checkRows(5)
+
+ def multiInsertCheckCase(self, count):
+ """
+ test multi insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ sql_list = []
+ stb_name = tdCom.getLongName(8, "letters")
+ tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 nchar(10))')
+ for i in range(count):
+ input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0]
+ sql_list.append(input_sql)
+ self._conn.schemaless_insert(sql_list, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.MILLI_SECOND.value)
+ tdSql.query('show tables')
+ tdSql.checkRows(count)
+
+ def batchErrorInsertCheckCase(self):
+ """
+ test batch error insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ lines = ["st123456 1626006833640 3i 64 t1=3i64 t2=4f64 t3=\"t3\"",
+ f"{stb_name} 1626056811823316532ns tRue t2=5f64 t3=L\"ste\""]
+ try:
+ self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def multiColsInsertCheckCase(self):
+ """
+ test multi cols insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(c_multi_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def blankColInsertCheckCase(self):
+ """
+ test blank col insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(c_blank_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def blankTagInsertCheckCase(self):
+ """
+ test blank tag insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(t_blank_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def chineseCheckCase(self):
+ """
+ check nchar ---> chinese
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(chinese_tag=True)
+ self.resCmp(input_sql, stb_name)
+
+ def multiFieldCheckCase(self):
+ '''
+ multi_field
+ '''
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(multi_field_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def spellCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ input_sql_list = [f'{stb_name}_1 1626006833640 127I8 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_2 1626006833640 32767I16 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_3 1626006833640 2147483647I32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_4 1626006833640 9223372036854775807I64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_5 1626006833640 11.12345027923584F32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_6 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_7 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_8 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_9 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_10 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64']
+ for input_sql in input_sql_list:
+ stb_name = input_sql.split(' ')[0]
+ self.resCmp(input_sql, stb_name)
+
+ def pointTransCheckCase(self, protocol=None):
+ """
+ metric value "." trans to "_"
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(point_trans_tag=True, protocol=protocol)[0]
+ if protocol == 'telnet-tcp':
+ stb_name = f'`{input_sql.split(" ")[1]}`'
+ else:
+ stb_name = f'`{input_sql.split(" ")[0]}`'
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+ tdSql.execute("drop table `.point.trans.test`")
+
+ def defaultTypeCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ input_sql_list = [f'{stb_name}_1 1626006833640 9223372036854775807 t0=f t1=127 t2=32767i16 t3=2147483647i32 t4=9223372036854775807 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
+ f'{stb_name}_2 1626006833641 22.123456789 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789 t7="vozamcts" t8=L"ncharTagValue"', \
+ f'{stb_name}_3 1626006833642 10e5F32 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10e5F64 t7="vozamcts" t8=L"ncharTagValue"', \
+ f'{stb_name}_4 1626006833643 10.0e5F64 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10.0e5F32 t7="vozamcts" t8=L"ncharTagValue"', \
+ f'{stb_name}_5 1626006833644 -10.0e5 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=-10.0e5 t7="vozamcts" t8=L"ncharTagValue"']
+ for input_sql in input_sql_list:
+ stb_name = input_sql.split(" ")[0]
+ self.resCmp(input_sql, stb_name)
+
+ def tbnameTagsColsNameCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ if self.smlChildTableName_value == "ID":
+ input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ query_sql = 'select * from `rFa$sta`'
+ query_res = tdSql.query(query_sql, True)
+ tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, 'true', '127Ii8', '32767i16', '2147483647i32', '9223372036854775807i64', '11.12345f32', '22.123456789f64', '"ddzhiksj"', 'L"ncharTagValue"')])
+ col_tag_res = tdSql.getColNameList(query_sql)
+ tdSql.checkEqual(col_tag_res, ['ts', '_value', 'tt!0', 'tt@1', 't#2', '"t$3"', 't%4', 't^5', 't&6', 't*7', 't!@#$%^&*()_+[];:<>?,9'])
+ tdSql.execute('drop table `rFa$sta`')
+ else:
+ input_sql = 'rFa$sta 1626006834 9223372036854775807 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ query_sql = 'select * from `rFa$sta`'
+ query_res = tdSql.query(query_sql, True)
+ tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, '2147483647i32', 'L"ncharTagValue"', '32767i16', '9223372036854775807i64', '22.123456789f64', '"ddzhiksj"', '11.12345f32', 'true', '127Ii8')])
+ col_tag_res = tdSql.getColNameList(query_sql)
+ tdSql.checkEqual(col_tag_res, ['_ts', '_value', '"t$3"', 't!@#$%^&*()_+[];:<>?,9', 't#2', 't%4', 't&6', 't*7', 't^5', 'Tt!0', 'tT@1'])
+ tdSql.execute('drop table `rFa$sta`')
+
+ def tcpKeywordsCheckCase(self, protocol="telnet-tcp"):
+ """
+ stb = "put"
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(tcp_keyword_tag=True, protocol=protocol)[0]
+ stb_name = f'`{input_sql.split(" ")[1]}`'
+ self.resCmp(input_sql, stb_name, protocol=protocol)
+
+ def genSqlList(self, count=5, stb_name="", tb_name=""):
+ """
+ stb --> supertable
+ tb --> table
+ ts --> timestamp, same default
+ col --> column, same default
+ tag --> tag, same default
+ d --> different
+ s --> same
+ a --> add
+ m --> minus
+ """
+ d_stb_d_tb_list = list()
+ s_stb_s_tb_list = list()
+ s_stb_s_tb_a_tag_list = list()
+ s_stb_s_tb_m_tag_list = list()
+ s_stb_d_tb_list = list()
+ s_stb_d_tb_m_tag_list = list()
+ s_stb_d_tb_a_tag_list = list()
+ s_stb_s_tb_d_ts_list = list()
+ s_stb_s_tb_d_ts_m_tag_list = list()
+ s_stb_s_tb_d_ts_a_tag_list = list()
+ s_stb_d_tb_d_ts_list = list()
+ s_stb_d_tb_d_ts_m_tag_list = list()
+ s_stb_d_tb_d_ts_a_tag_list = list()
+ for i in range(count):
+ d_stb_d_tb_list.append(self.genFullTypeSql(t0="f", value="f"))
+ s_stb_s_tb_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"'))
+ s_stb_s_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_add_tag=True))
+ s_stb_s_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_mul_tag=True))
+ s_stb_d_tb_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True))
+ s_stb_d_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_mul_tag=True))
+ s_stb_d_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_add_tag=True))
+ s_stb_s_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0))
+ s_stb_s_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_mul_tag=True))
+ s_stb_s_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_add_tag=True))
+ s_stb_d_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0))
+ s_stb_d_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_mul_tag=True))
+ s_stb_d_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_add_tag=True))
+
+ return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_tag_list, s_stb_s_tb_m_tag_list, \
+ s_stb_d_tb_list, s_stb_d_tb_m_tag_list, s_stb_d_tb_a_tag_list, s_stb_s_tb_d_ts_list, \
+ s_stb_s_tb_d_ts_m_tag_list, s_stb_s_tb_d_ts_a_tag_list, s_stb_d_tb_d_ts_list, \
+ s_stb_d_tb_d_ts_m_tag_list, s_stb_d_tb_d_ts_a_tag_list
+
+
+ def genMultiThreadSeq(self, sql_list):
+ tlist = list()
+ for insert_sql in sql_list:
+ t = threading.Thread(target=self._conn.schemaless_insert,args=([insert_sql[0]], TDSmlProtocolType.TELNET.value, None))
+ tlist.append(t)
+ return tlist
+
+ def multiThreadRun(self, tlist):
+ for t in tlist:
+ t.start()
+ for t in tlist:
+ t.join()
+
+ def stbInsertMultiThreadCheckCase(self):
+ """
+ thread input different stb
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genSqlList()[0]
+ print(input_sql)
+ self.multiThreadRun(self.genMultiThreadSeq(input_sql))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(5)
+
+ def sStbStbDdataInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different data, result keep first data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[1]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
+ if self.smlChildTableName_value == "ID":
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
+
+ def sStbStbDdataAtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different data, add columes and tags, result keep first data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[2]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
+ if self.smlChildTableName_value == "ID":
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
+
+ def sStbStbDdataMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different data, minus columes and tags, result keep first data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[3]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2)
+ if self.smlChildTableName_value == "ID":
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2)
+
+ def sStbDtbDdataInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, different data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbDtbDdataMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, different data, add col, mul tag
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833640 "omfdhyom" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
+ (f'{stb_name} 1626006833640 "vqowydbc" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
+ (f'{stb_name} 1626006833640 "plgkckpv" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
+ (f'{stb_name} 1626006833640 "cujyqvlj" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
+ (f'{stb_name} 1626006833640 "twjxisat" t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(3)
+
+ def sStbDtbDdataAtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, different data, add tag, mul col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name)[6]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbStbDdataDtsInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_d_ts_list = [(f'{stb_name} 0 "hkgjiwdj" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', 'dwpthv'), \
+ (f'{stb_name} 0 "rljjrrul" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="bmcanhbs" t8=L"ncharTagValue"', 'dwpthv'), \
+ (f'{stb_name} 0 "basanglx" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enqkyvmb" t8=L"ncharTagValue"', 'dwpthv'), \
+ (f'{stb_name} 0 "clsajzpp" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="eivaegjk" t8=L"ncharTagValue"', 'dwpthv'), \
+ (f'{stb_name} 0 "jitwseso" id={tb_name} t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="yhlwkddq" t8=L"ncharTagValue"', 'dwpthv')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+
+ def sStbStbDdataDtsMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts, add col, mul tag
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_d_ts_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[8]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(2)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+ tdSql.query(f"select * from {stb_name} where t8 is not NULL")
+ tdSql.checkRows(6) if self.smlChildTableName_value == "ID" else tdSql.checkRows(1)
+
+ def sStbStbDdataDtsAtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts, add tag, mul col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_s_tb_d_ts_a_tag_list = [(f'{stb_name} 0 "clummqfy" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="hpxzrdiw" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
+ (f'{stb_name} 0 "yqeztggb" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="gdtblmrc" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
+ (f'{stb_name} 0 "gbkinqdk" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="iqniuvco" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
+ (f'{stb_name} 0 "ldxxejbd" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vxkipags" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
+ (f'{stb_name} 0 "tlvzwjes" id={tb_name} t0=true t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enwrlrtj" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+ for t in ["t10", "t11"]:
+ tdSql.query(f"select * from {stb_name} where {t} is not NULL;")
+ tdSql.checkRows(0) if self.smlChildTableName_value == "ID" else tdSql.checkRows(5)
+
+ def sStbDtbDdataDtsInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, data, ts
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbDtbDdataDtsMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, data, ts, add col, mul tag
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
+ self.resCmp(input_sql, stb_name)
+ s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
+ (f'{stb_name} 0 "zbvwckcd" t0=True t1=126i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
+ (f'{stb_name} 0 "vymcjfwc" t0=False t1=125i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
+ (f'{stb_name} 0 "laumkwfn" t0=False t1=124i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
+ (f'{stb_name} 0 "nyultzxr" t0=false t1=123i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def test(self):
+ try:
+ input_sql = f'test_nchar 0 L"涛思数据" t0=f t1=L"涛思数据" t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ except SchemalessError as err:
+ print(err.errno)
+
+ def runAll(self):
+ self.initCheckCase()
+ self.boolTypeCheckCase()
+ self.symbolsCheckCase()
+ self.tsCheckCase()
+ self.openTstbTelnetTsCheckCase()
+ self.idSeqCheckCase()
+ self.idLetterCheckCase()
+ self.noIdCheckCase()
+ self.maxColTagCheckCase()
+ self.stbTbNameCheckCase()
+ self.idStartWithNumCheckCase()
+ self.nowTsCheckCase()
+ self.dateFormatTsCheckCase()
+ self.illegalTsCheckCase()
+ self.tbnameCheckCase()
+ self.tagNameLengthCheckCase()
+ # self.tagValueLengthCheckCase()
+ self.colValueLengthCheckCase()
+ self.tagColIllegalValueCheckCase()
+ self.blankCheckCase()
+ self.duplicateIdTagColInsertCheckCase()
+ self.noIdStbExistCheckCase()
+ self.duplicateInsertExistCheckCase()
+ self.tagColBinaryNcharLengthCheckCase()
+ self.tagColAddDupIDCheckCase()
+ self.tagColAddCheckCase()
+ self.tagMd5Check()
+ # self.tagColNcharMaxLengthCheckCase()
+ # self.batchInsertCheckCase()
+ # self.multiInsertCheckCase(10)
+ self.batchErrorInsertCheckCase()
+ self.multiColsInsertCheckCase()
+ self.blankColInsertCheckCase()
+ self.blankTagInsertCheckCase()
+ self.chineseCheckCase()
+ self.multiFieldCheckCase()
+ self.spellCheckCase()
+ self.pointTransCheckCase()
+ self.defaultTypeCheckCase()
+ self.tbnameTagsColsNameCheckCase()
+ # # # MultiThreads
+ # self.stbInsertMultiThreadCheckCase()
+ # self.sStbStbDdataInsertMultiThreadCheckCase()
+ # self.sStbStbDdataAtInsertMultiThreadCheckCase()
+ # self.sStbStbDdataMtInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataMtInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataAtInsertMultiThreadCheckCase()
+ # self.sStbStbDdataDtsInsertMultiThreadCheckCase()
+ # # self.sStbStbDdataDtsMtInsertMultiThreadCheckCase()
+ # self.sStbStbDdataDtsAtInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataDtsInsertMultiThreadCheckCase()
+ # self.sStbDtbDdataDtsMtInsertMultiThreadCheckCase()
+
+ def run(self):
+ print("running {}".format(__file__))
+
+ try:
+ self.createDb()
+ self.runAll()
+ # self.createDb(protocol="telnet-tcp")
+ # self.initCheckCase('telnet-tcp')
+ # self.boolTypeCheckCase('telnet-tcp')
+ # self.symbolsCheckCase('telnet-tcp')
+ # self.idSeqCheckCase('telnet-tcp')
+ # self.idLetterCheckCase('telnet-tcp')
+ # self.noIdCheckCase('telnet-tcp')
+ # self.stbTbNameCheckCase('telnet-tcp')
+ # self.idStartWithNumCheckCase('telnet-tcp')
+ # self.pointTransCheckCase('telnet-tcp')
+ # self.tcpKeywordsCheckCase()
+ except Exception as err:
+ print(''.join(traceback.format_exception(None, err, err.__traceback__)))
+ raise err
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/nestedQuery_str.py b/tests/system-test/2-query/nestedQuery_str.py
new file mode 100755
index 0000000000000000000000000000000000000000..8214c98c5cc8526874db5f40df22f8e587ea36f4
--- /dev/null
+++ b/tests/system-test/2-query/nestedQuery_str.py
@@ -0,0 +1,5753 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import random
+import os
+import time
+import taos
+import subprocess
+from faker import Faker
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+from util.dnodes import tdDnodes
+from util.dnodes import *
+
+class TDTestCase:
+ updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
+ "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143}
+
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.testcasePath = os.path.split(__file__)[0]
+ self.testcaseFilename = os.path.split(__file__)[-1]
+ os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
+
+ self.num = 10
+ self.fornum = 5
+
+ self.db_nest = "nest"
+ self.dropandcreateDB_random("%s" %self.db_nest, 1)
+
+ # regular column select
+ #q_select= ['ts' , '*' , 'q_int', 'q_bigint' , 'q_bigint' , 'q_smallint' , 'q_tinyint' , 'q_bool' , 'q_binary' , 'q_nchar' ,'q_float' , 'q_double' ,'q_ts ']
+ self.q_select= ['ts' , 'q_int', 'q_bigint' , 'q_bigint' , 'q_smallint' , 'q_tinyint' , 'q_bool' , 'q_binary' , 'q_nchar' ,'q_float' , 'q_double' ,'q_ts ', 'q_int_null ', 'q_bigint_null ' , 'q_bigint_null ' , 'q_smallint_null ' , 'q_tinyint_null ' , 'q_bool_null ' , 'q_binary_null ' , 'q_nchar_null ' ,'q_float_null ' , 'q_double_null ' ,'q_ts_null ']
+
+ # tag column select
+ #t_select= ['*' , 'loc' ,'t_int', 't_bigint' , 't_bigint' , 't_smallint' , 't_tinyint' , 't_bool' , 't_binary' , 't_nchar' ,'t_float' , 't_double' ,'t_ts ']
+ self.t_select= ['loc','t_int', 't_bigint' , 't_bigint' , 't_smallint' , 't_tinyint' , 't_bool' , 't_binary' , 't_nchar' ,'t_float' , 't_double' ,'t_ts ']
+
+ # regular and tag column select
+ self.qt_select= self.q_select + self.t_select
+
+ # distinct regular column select
+ self.dq_select= ['distinct q_int', 'distinct q_bigint' , 'distinct q_smallint' , 'distinct q_tinyint' ,
+ 'distinct q_bool' , 'distinct q_binary' , 'distinct q_nchar' ,'distinct q_float' , 'distinct q_double' ,'distinct q_ts ']
+
+ # distinct tag column select
+ self.dt_select= ['distinct loc', 'distinct t_int', 'distinct t_bigint' , 'distinct t_smallint' , 'distinct t_tinyint' ,
+ 'distinct t_bool' , 'distinct t_binary' , 'distinct t_nchar' ,'distinct t_float' , 'distinct t_double' ,'distinct t_ts ']
+
+ # distinct regular and tag column select
+ self.dqt_select= self.dq_select + self.dt_select
+
+ # special column select
+ self.s_r_select= ['_c0', '_rowts' , '_C0' ]
+ self.s_s_select= ['tbname' , '_rowts' , '_c0', '_C0' ]
+ self.unionall_or_union= [ ' union ' , ' union all ' ]
+
+ # regular column where
+ self.q_where = ['ts < now +1s','q_bigint >= -9223372036854775807 and q_bigint <= 9223372036854775807', 'q_int <= 2147483647 and q_int >= -2147483647',
+ 'q_smallint >= -32767 and q_smallint <= 32767','q_tinyint >= -127 and q_tinyint <= 127','q_float >= -1.7E308 and q_float <= 1.7E308',
+ 'q_double >= -1.7E308 and q_double <= 1.7E308', 'q_binary like \'binary%\' or q_binary = \'0\' ' , 'q_nchar like \'nchar%\' or q_nchar = \'0\' ' ,
+ 'q_bool = true or q_bool = false' , 'q_bool in (0 , 1)' , 'q_bool in ( true , false)' , 'q_bool = 0 or q_bool = 1',
+ 'q_bigint between -9223372036854775807 and 9223372036854775807',' q_int between -2147483647 and 2147483647','q_smallint between -32767 and 32767',
+ 'q_bigint not between 9223372036854775807 and -9223372036854775807','q_int not between 2147483647 and -2147483647','q_smallint not between 32767 and -32767',
+ 'q_tinyint between -127 and 127 ','q_float >= -3.4E38 ','q_float <= 3.4E38 ','q_double >= -1.7E308 ',
+ 'q_double <= 1.7E308 ','q_float between -3.4E38 and 3.4E38 ','q_double between -1.7E308 and 1.7E308 ' ,'q_float not between 3.4E38 and -3.4E38 ','q_double not between 1.7E308 and -1.7E308 ',
+ 'q_float is not null ' ,'q_double is not null ' ,'q_binary match \'binary\' ','q_binary nmatch \'binarynchar\' ','q_nchar match \'nchar\' ','q_nchar nmatch \'binarynchar\' ',
+ 'q_binary like \'binary%\' ','(q_binary like \'binary%\' or q_nchar = \'0\' or q_binary = \'binary_\' ) ','q_nchar like \'nchar%\' ','(q_nchar like \'nchar%\' or q_binary = \'0\' or q_nchar = \'nchar_\' ) ',]
+ #TD-6201 ,'q_bool between 0 and 1'
+
+ # regular column where for test union,join
+ self.q_u_where = ['t1.ts < now +1s' , 't2.ts < now +1s','t1.q_bigint >= -9223372036854775807 and t1.q_bigint <= 9223372036854775807 and t2.q_bigint >= -9223372036854775807 and t2.q_bigint <= 9223372036854775807',
+ 't1.q_int <= 2147483647 and t1.q_int >= -2147483647 and t2.q_int <= 2147483647 and t2.q_int >= -2147483647',
+ 't1.q_smallint >= -32767 and t1.q_smallint <= 32767 and t2.q_smallint >= -32767 and t2.q_smallint <= 32767',
+ 't1.q_tinyint >= -127 and t1.q_tinyint <= 127 and t2.q_tinyint >= -127 and t2.q_tinyint <= 127',
+ 't1.q_float >= - 1.7E308 and t1.q_float <= 1.7E308 and t2.q_float >= - 1.7E308 and t2.q_float <= 1.7E308',
+ 't1.q_double >= - 1.7E308 and t1.q_double <= 1.7E308 and t2.q_double >= - 1.7E308 and t2.q_double <= 1.7E308',
+ 't1.q_binary like \'binary%\' and t2.q_binary like \'binary%\' ' ,
+ 't1.q_nchar like \'nchar%\' and t2.q_nchar like \'nchar%\' ' ,
+ 't1.q_bool in (0 , 1) and t2.q_bool in (0 , 1)' , 't1.q_bool in ( true , false) and t2.q_bool in ( true , false)' ,
+ 't1.q_bigint between -9223372036854775807 and 9223372036854775807 and t2.q_bigint between -9223372036854775807 and 9223372036854775807',
+ 't1.q_int between -2147483647 and 2147483647 and t2.q_int between -2147483647 and 2147483647',
+ 't1.q_smallint between -32767 and 32767 and t2.q_smallint between -32767 and 32767',
+ 't1.q_tinyint between -127 and 127 and t2.q_tinyint between -127 and 127 ','t1.q_float between -1.7E308 and 1.7E308 and t2.q_float between -1.7E308 and 1.7E308',
+ 't1.q_double between -1.7E308 and 1.7E308 and t2.q_double between -1.7E308 and 1.7E308',
+ 't1.q_bigint not between 9223372036854775807 and -9223372036854775807 and t2.q_bigint not between 9223372036854775807 and -9223372036854775807',
+ 't1.q_int not between 2147483647 and -2147483647 and t2.q_int not between 2147483647 and -2147483647',
+ 't1.q_smallint not between 32767 and -32767 and t2.q_smallint not between 32767 and -32767',
+ 't1.q_tinyint not between 127 and -127 and t2.q_tinyint not between 127 and -127 ','t1.q_float not between -1.7E308 and -1.7E308 and t2.q_float not between 1.7E308 and -1.7E308',
+ 't1.q_double not between 1.7E308 and -1.7E308 and t2.q_double not between 1.7E308 and -1.7E308']
+ #TD-6201 ,'t1.q_bool between 0 and 1 or t2.q_bool between 0 and 1']
+ #'t1.q_bool = true and t1.q_bool = false and t2.q_bool = true and t2.q_bool = false' , 't1.q_bool = 0 and t1.q_bool = 1 and t2.q_bool = 0 and t2.q_bool = 1' ,
+
+ self.q_u_or_where = ['(t1.q_binary like \'binary%\' or t1.q_binary = \'0\' or t2.q_binary like \'binary%\' or t2.q_binary = \'0\' )' ,
+ '(t1.q_nchar like \'nchar%\' or t1.q_nchar = \'0\' or t2.q_nchar like \'nchar%\' or t2.q_nchar = \'0\' )' , '(t1.q_bool = true or t1.q_bool = false or t2.q_bool = true or t2.q_bool = false)' ,
+ '(t1.q_bool in (0 , 1) or t2.q_bool in (0 , 1))' , '(t1.q_bool in ( true , false) or t2.q_bool in ( true , false))' , '(t1.q_bool = 0 or t1.q_bool = 1 or t2.q_bool = 0 or t2.q_bool = 1)' ,
+ '(t1.q_bigint between -9223372036854775807 and 9223372036854775807 or t2.q_bigint between -9223372036854775807 and 9223372036854775807)',
+ '(t1.q_int between -2147483647 and 2147483647 or t2.q_int between -2147483647 and 2147483647)',
+ '(t1.q_smallint between -32767 and 32767 or t2.q_smallint between -32767 and 32767)',
+ '(t1.q_tinyint between -127 and 127 or t2.q_tinyint between -127 and 127 )','(t1.q_float between -1.7E308 and 1.7E308 or t2.q_float between -1.7E308 and 1.7E308)',
+ '(t1.q_double between -1.7E308 and 1.7E308 or t2.q_double between -1.7E308 and 1.7E308)']
+
+ # tag column where
+ self.t_where = ['ts < now +1s','t_bigint >= -9223372036854775807 and t_bigint <= 9223372036854775807','t_int <= 2147483647 and t_int >= -2147483647',
+ 't_smallint >= -32767 and t_smallint <= 32767','q_tinyint >= -127 and t_tinyint <= 127','t_float >= -1.7E308 and t_float <= 1.7E308',
+ 't_double >= -1.7E308 and t_double <= 1.7E308', 't_binary like \'binary%\' or t_binary = \'0\' ' , 't_nchar like \'nchar%\' or t_nchar = \'0\'' ,
+ 't_bool = true or t_bool = false' , 't_bool in (0 , 1)' , 't_bool in ( true , false)' , 't_bool = 0 or t_bool = 1',
+ 't_bigint between -9223372036854775807 and 9223372036854775807',' t_int between -2147483647 and 2147483647','t_smallint between -32767 and 32767',
+ 't_tinyint between -127 and 127 ','t_float between -1.7E308 and 1.7E308','t_double between -1.7E308 and 1.7E308',
+ 't_binary match \'binary\' ','t_binary nmatch \'binarynchar\' ','t_nchar match \'nchar\' ','t_nchar nmatch \'binarynchar\' ',
+ 't_binary like \'binary%\' ','t_nchar like \'nchar%\' ','(t_binary like \'binary%\' or t_nchar = \'0\' ) ','(t_nchar like \'nchar%\' or t_binary = \'0\' ) ',]
+ #TD-6201,'t_bool between 0 and 1'
+
+ # tag column where for test union,join | this is not support
+ self.t_u_where = ['t1.ts < now +1s' , 't2.ts < now +1s','t1.t_bigint >= -9223372036854775807 and t1.t_bigint <= 9223372036854775807 and t2.t_bigint >= -9223372036854775807 and t2.t_bigint <= 9223372036854775807',
+ 't1.t_int <= 2147483647 and t1.t_int >= -2147483647 and t2.t_int <= 2147483647 and t2.t_int >= -2147483647',
+ 't1.t_smallint >= -32767 and t1.t_smallint <= 32767 and t2.t_smallint >= -32767 and t2.t_smallint <= 32767',
+ 't1.t_tinyint >= -127 and t1.t_tinyint <= 127 and t2.t_tinyint >= -127 and t2.t_tinyint <= 127',
+ 't1.t_float >= -1.7E308 and t1.t_float <= 1.7E308 and t2.t_float >= -1.7E308 and t2.t_float <= 1.7E308',
+ 't1.t_double >= -1.7E308 and t1.t_double <= 1.7E308 and t2.t_double >= -1.7E308 and t2.t_double <= 1.7E308',
+ '(t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\') ' ,
+ '(t1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' )' , '(t1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false)' ,
+ 't1.t_bool in (0 , 1) and t2.t_bool in (0 , 1)' , 't1.t_bool in ( true , false) and t2.t_bool in ( true , false)' , '(t1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1)',
+ 't1.t_bigint between -9223372036854775807 and 9223372036854775807 and t2.t_bigint between -9223372036854775807 and 9223372036854775807',
+ 't1.t_int between -2147483647 and 2147483647 and t2.t_int between -2147483647 and 2147483647',
+ 't1.t_smallint between -32767 and 32767 and t2.t_smallint between -32767 and 32767',
+ '(t1.t_tinyint between -127 and 127 and t2.t_tinyint between -127 and 127) ','t1.t_float between -1.7E308 and 1.7E308 and t2.t_float between -1.7E308 and 1.7E308',
+ '(t1.t_double between -1.7E308 and 1.7E308 and t2.t_double between -1.7E308 and 1.7E308)']
+ #TD-6201,'t1.t_bool between 0 and 1 or t2.q_bool between 0 and 1']
+
+ self.t_u_or_where = ['(t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\' )' ,
+ '(t1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' )' , '(t1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false)' ,
+ '(t1.t_bool in (0 , 1) or t2.t_bool in (0 , 1))' , '(t1.t_bool in ( true , false) or t2.t_bool in ( true , false))' , '(t1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1)',
+ '(t1.t_bigint between -9223372036854775807 and 9223372036854775807 or t2.t_bigint between -9223372036854775807 and 9223372036854775807)',
+ '(t1.t_int between -2147483647 and 2147483647 or t2.t_int between -2147483647 and 2147483647)',
+ '(t1.t_smallint between -32767 and 32767 or t2.t_smallint between -32767 and 32767)',
+ '(t1.t_tinyint between -127 and 127 or t2.t_tinyint between -127 and 127 )','(t1.t_float between -1.7E308 and 1.7E308 or t2.t_float between -1.7E308 and 1.7E308)',
+ '(t1.t_double between -1.7E308 and 1.7E308 or t2.t_double between -1.7E308 and 1.7E308)']
+
+ # regular and tag column where
+ self.qt_where = self.q_where + self.t_where
+ self.qt_u_where = self.q_u_where + self.t_u_where
+ # now,qt_u_or_where is not support
+ self.qt_u_or_where = self.q_u_or_where + self.t_u_or_where
+
+ # tag column where for test super join | this is support , 't1.t_bool = t2.t_bool ' ???
+ self.t_join_where = ['t1.t_bigint = t2.t_bigint ', 't1.t_int = t2.t_int ', 't1.t_smallint = t2.t_smallint ', 't1.t_tinyint = t2.t_tinyint ',
+ 't1.t_float = t2.t_float ', 't1.t_double = t2.t_double ', 't1.t_binary = t2.t_binary ' , 't1.t_nchar = t2.t_nchar ' ]
+
+ # session && fill
+ self.session_where = ['session(ts,10a)' , 'session(ts,10s)', 'session(ts,10m)' , 'session(ts,10h)','session(ts,10d)' , 'session(ts,10w)']
+ self.session_u_where = ['session(t1.ts,10a)' , 'session(t1.ts,10s)', 'session(t1.ts,10m)' , 'session(t1.ts,10h)','session(t1.ts,10d)' , 'session(t1.ts,10w)',
+ 'session(t2.ts,10a)' , 'session(t2.ts,10s)', 'session(t2.ts,10m)' , 'session(t2.ts,10h)','session(t2.ts,10d)' , 'session(t2.ts,10w)']
+
+ self.fill_where = ['FILL(NONE)','FILL(PREV)','FILL(NULL)','FILL(LINEAR)','FILL(NEXT)','FILL(VALUE, 1.23)']
+
+ self.state_window = ['STATE_WINDOW(q_tinyint)','STATE_WINDOW(q_bigint)','STATE_WINDOW(q_int)','STATE_WINDOW(q_bool)','STATE_WINDOW(q_smallint)']
+ self.state_u_window = ['STATE_WINDOW(t1.q_tinyint)','STATE_WINDOW(t1.q_bigint)','STATE_WINDOW(t1.q_int)','STATE_WINDOW(t1.q_bool)','STATE_WINDOW(t1.q_smallint)',
+ 'STATE_WINDOW(t2.q_tinyint)','STATE_WINDOW(t2.q_bigint)','STATE_WINDOW(t2.q_int)','STATE_WINDOW(t2.q_bool)','STATE_WINDOW(t2.q_smallint)']
+
+ # order by where
+ self.order_where = ['order by ts' , 'order by ts asc']
+ self.order_u_where = ['order by t1.ts' , 'order by t1.ts asc' , 'order by t2.ts' , 'order by t2.ts asc']
+ self.order_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' ]
+ self.orders_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' , 'order by loc' , 'order by loc asc' , 'order by loc desc']
+
+ self.group_where = ['group by tbname , loc' , 'group by tbname', 'group by tbname, t_bigint', 'group by tbname,t_int', 'group by tbname, t_smallint', 'group by tbname,t_tinyint',
+ 'group by tbname,t_float', 'group by tbname,t_double' , 'group by tbname,t_binary', 'group by tbname,t_nchar', 'group by tbname,t_bool' ,'group by tbname ,loc ,t_bigint',
+ 'group by tbname,t_binary ,t_nchar ,t_bool' , 'group by tbname,t_int ,t_smallint ,t_tinyint' , 'group by tbname,t_float ,t_double ' ,
+ 'PARTITION BY tbname , loc' , 'PARTITION BY tbname', 'PARTITION BY tbname, t_bigint', 'PARTITION BY tbname,t_int', 'PARTITION BY tbname, t_smallint', 'PARTITION BY tbname,t_tinyint',
+ 'PARTITION BY tbname,t_float', 'PARTITION BY tbname,t_double' , 'PARTITION BY tbname,t_binary', 'PARTITION BY tbname,t_nchar', 'PARTITION BY tbname,t_bool' ,'PARTITION BY tbname ,loc ,t_bigint',
+ 'PARTITION BY tbname,t_binary ,t_nchar ,t_bool' , 'PARTITION BY tbname,t_int ,t_smallint ,t_tinyint' , 'PARTITION BY tbname,t_float ,t_double ']
+ self.group_where_j = ['group by t1.loc' , 'group by t1.t_bigint', 'group by t1.t_int', 'group by t1.t_smallint', 'group by t1.t_tinyint',
+ 'group by t1.t_float', 'group by t1.t_double' , 'group by t1.t_binary', 'group by t1.t_nchar', 'group by t1.t_bool' ,'group by t1.loc ,t1.t_bigint',
+ 'group by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'group by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'group by t1.t_float ,t1.t_double ' ,
+ 'PARTITION BY t1.loc' , 'PARTITION by t1.t_bigint', 'PARTITION by t1.t_int', 'PARTITION by t1.t_smallint', 'PARTITION by t1.t_tinyint',
+ 'PARTITION by t1.t_float', 'PARTITION by t1.t_double' , 'PARTITION by t1.t_binary', 'PARTITION by t1.t_nchar', 'PARTITION by t1.t_bool' ,'PARTITION BY t1.loc ,t1.t_bigint',
+ 'PARTITION by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'PARTITION by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'PARTITION by t1.t_float ,t1.t_double ',
+ 'group by t2.loc' , 'group by t2.t_bigint', 'group by t2.t_int', 'group by t2.t_smallint', 'group by t2.t_tinyint',
+ 'group by t2.t_float', 'group by t2.t_double' , 'group by t2.t_binary', 'group by t2.t_nchar', 'group by t2.t_bool' ,'group by t2.loc ,t2.t_bigint',
+ 'group by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'group by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'group by t2.t_float ,t2.t_double ' ,
+ 'PARTITION BY t2.loc' , 'PARTITION by t2.t_bigint', 'PARTITION by t2.t_int', 'PARTITION by t2.t_smallint', 'PARTITION by t2.t_tinyint',
+ 'PARTITION by t2.t_float', 'PARTITION by t2.t_double' , 'PARTITION by t2.t_binary', 'PARTITION by t2.t_nchar', 'PARTITION by t2.t_bool' ,'PARTITION BY t2.loc ,t2.t_bigint',
+ 'PARTITION by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'PARTITION by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'PARTITION by t2.t_float ,t2.t_double ']
+
+ self.partiton_where = ['PARTITION BY tbname , loc' , 'PARTITION BY tbname', 'PARTITION BY tbname, t_bigint', 'PARTITION BY tbname,t_int', 'PARTITION BY tbname, t_smallint', 'PARTITION BY tbname,t_tinyint',
+ 'PARTITION BY tbname,t_float', 'PARTITION BY tbname,t_double' , 'PARTITION BY tbname,t_binary', 'PARTITION BY tbname,t_nchar', 'PARTITION BY tbname,t_bool' ,'PARTITION BY tbname ,loc ,t_bigint',
+ 'PARTITION BY tbname,t_binary ,t_nchar ,t_bool' , 'PARTITION BY tbname,t_int ,t_smallint ,t_tinyint' , 'PARTITION BY tbname,t_float ,t_double ']
+ self.partiton_where_j = ['PARTITION BY t1.loc' , 'PARTITION by t1.t_bigint', 'PARTITION by t1.t_int', 'PARTITION by t1.t_smallint', 'PARTITION by t1.t_tinyint',
+ 'PARTITION by t1.t_float', 'PARTITION by t1.t_double' , 'PARTITION by t1.t_binary', 'PARTITION by t1.t_nchar', 'PARTITION by t1.t_bool' ,'PARTITION BY t1.loc ,t1.t_bigint',
+ 'PARTITION by t1.t_binary ,t1.t_nchar ,t1.t_bool' , 'PARTITION by t1.t_int ,t1.t_smallint ,t1.t_tinyint' , 'PARTITION by t1.t_float ,t1.t_double ',
+ 'PARTITION BY t2.loc' , 'PARTITION by t2.t_bigint', 'PARTITION by t2.t_int', 'PARTITION by t2.t_smallint', 'PARTITION by t2.t_tinyint',
+ 'PARTITION by t2.t_float', 'PARTITION by t2.t_double' , 'PARTITION by t2.t_binary', 'PARTITION by t2.t_nchar', 'PARTITION by t2.t_bool' ,'PARTITION BY t2.loc ,t2.t_bigint',
+ 'PARTITION by t2.t_binary ,t2.t_nchar ,t2.t_bool' , 'PARTITION by t2.t_int ,t2.t_smallint ,t2.t_tinyint' , 'PARTITION by t2.t_float ,t2.t_double ']
+
+
+ self.group_where_regular = ['group by tbname ' , 'group by tbname', 'group by tbname, q_bigint', 'group by tbname,q_int', 'group by tbname, q_smallint', 'group by tbname,q_tinyint',
+ 'group by tbname,q_float', 'group by tbname,q_double' , 'group by tbname,q_binary', 'group by tbname,q_nchar', 'group by tbname,q_bool' ,'group by tbname ,q_bigint',
+ 'group by tbname,q_binary ,q_nchar ,q_bool' , 'group by tbname,q_int ,q_smallint ,q_tinyint' , 'group by tbname,q_float ,q_double ' ,
+ 'PARTITION BY tbname ' , 'PARTITION BY tbname', 'PARTITION BY tbname, q_bigint', 'PARTITION BY tbname,q_int', 'PARTITION BY tbname, q_smallint', 'PARTITION BY tbname,q_tinyint',
+ 'PARTITION BY tbname,q_float', 'PARTITION BY tbname,q_double' , 'PARTITION BY tbname,q_binary', 'PARTITION BY tbname,q_nchar', 'PARTITION BY tbname,q_bool' ,'PARTITION BY tbname ,q_bigint',
+ 'PARTITION BY tbname,q_binary ,q_nchar ,q_bool' , 'PARTITION BY tbname,q_int ,q_smallint ,q_tinyint' , 'PARTITION BY tbname,q_float ,q_double ']
+ self.group_where_regular_j = ['group by t1.q_bigint', 'group by t1.q_int', 'group by t1.q_smallint', 'group by t1.q_tinyint',
+ 'group by t1.q_float', 'group by t1.q_double' , 'group by t1.q_binary', 'group by t1.q_nchar', 'group by t1.q_bool' ,'group by t1.q_bigint',
+ 'group by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'group by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'group by t1.q_float ,t1.q_double ' ,
+ 'PARTITION by t1.q_bigint', 'PARTITION by t1.q_int', 'PARTITION by t1.q_smallint', 'PARTITION by t1.q_tinyint',
+ 'PARTITION by t1.q_float', 'PARTITION by t1.q_double' , 'PARTITION by t1.q_binary', 'PARTITION by t1.q_nchar', 'PARTITION by t1.q_bool' ,'PARTITION BY t1.q_bigint',
+ 'PARTITION by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'PARTITION by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'PARTITION by t1.q_float ,t1.q_double ',
+ 'group by t2.q_bigint', 'group by t2.q_int', 'group by t2.q_smallint', 'group by t2.q_tinyint',
+ 'group by t2.q_float', 'group by t2.q_double' , 'group by t2.q_binary', 'group by t2.q_nchar', 'group by t2.q_bool' ,'group by t2.q_bigint',
+ 'group by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'group by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'group by t2.q_float ,t2.q_double ' ,
+ 'PARTITION by t2.q_bigint', 'PARTITION by t2.q_int', 'PARTITION by t2.q_smallint', 'PARTITION by t2.q_tinyint',
+ 'PARTITION by t2.q_float', 'PARTITION by t2.q_double' , 'PARTITION by t2.q_binary', 'PARTITION by t2.q_nchar', 'PARTITION by t2.q_bool' ,'PARTITION BY t2.q_bigint',
+ 'PARTITION by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'PARTITION by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'PARTITION by t2.q_float ,t2.q_double ']
+
+ self.partiton_where_regular = ['PARTITION BY tbname ' , 'PARTITION BY tbname', 'PARTITION BY tbname, q_bigint', 'PARTITION BY tbname,q_int', 'PARTITION BY tbname, q_smallint', 'PARTITION BY tbname,q_tinyint',
+ 'PARTITION BY tbname,q_float', 'PARTITION BY tbname,q_double' , 'PARTITION BY tbname,q_binary', 'PARTITION BY tbname,q_nchar', 'PARTITION BY tbname,q_bool' ,'PARTITION BY tbname ,q_bigint',
+ 'PARTITION BY tbname,q_binary ,q_nchar ,q_bool' , 'PARTITION BY tbname,q_int ,q_smallint ,q_tinyint' , 'PARTITION BY tbname,q_float ,q_double ']
+ self.partiton_where_regular_j = ['PARTITION by t1.q_bigint', 'PARTITION by t1.q_int', 'PARTITION by t1.q_smallint', 'PARTITION by t1.q_tinyint',
+ 'PARTITION by t1.q_float', 'PARTITION by t1.q_double' , 'PARTITION by t1.q_binary', 'PARTITION by t1.q_nchar', 'PARTITION by t1.q_bool' ,'PARTITION BY t1.q_bigint',
+ 'PARTITION by t1.q_binary ,t1.q_nchar ,t1.q_bool' , 'PARTITION by t1.q_int ,t1.q_smallint ,t1.q_tinyint' , 'PARTITION by t1.q_float ,t1.q_double ',
+ 'PARTITION by t2.q_bigint', 'PARTITION by t2.q_int', 'PARTITION by t2.q_smallint', 'PARTITION by t2.q_tinyint',
+ 'PARTITION by t2.q_float', 'PARTITION by t2.q_double' , 'PARTITION by t2.q_binary', 'PARTITION by t2.q_nchar', 'PARTITION by t2.q_bool' ,'PARTITION BY t2.q_bigint',
+ 'PARTITION by t2.q_binary ,t2.q_nchar ,t2.q_bool' , 'PARTITION by t2.q_int ,t2.q_smallint ,t2.q_tinyint' , 'PARTITION by t2.q_float ,t2.q_double ']
+
+ self.having_support = ['having count(q_int) > 0','having count(q_bigint) > 0','having count(q_smallint) > 0','having count(q_tinyint) > 0','having count(q_float) > 0','having count(q_double) > 0','having count(q_bool) > 0',
+ 'having avg(q_int) > 0','having avg(q_bigint) > 0','having avg(q_smallint) > 0','having avg(q_tinyint) > 0','having avg(q_float) > 0','having avg(q_double) > 0',
+ 'having sum(q_int) > 0','having sum(q_bigint) > 0','having sum(q_smallint) > 0','having sum(q_tinyint) > 0','having sum(q_float) > 0','having sum(q_double) > 0',
+ 'having STDDEV(q_int) > 0','having STDDEV(q_bigint) > 0','having STDDEV(q_smallint) > 0','having STDDEV(q_tinyint) > 0','having STDDEV(q_float) > 0','having STDDEV(q_double) > 0',
+ 'having TWA(q_int) > 0','having TWA(q_bigint) > 0','having TWA(q_smallint) > 0','having TWA(q_tinyint) > 0','having TWA(q_float) > 0','having TWA(q_double) > 0',
+ 'having IRATE(q_int) > 0','having IRATE(q_bigint) > 0','having IRATE(q_smallint) > 0','having IRATE(q_tinyint) > 0','having IRATE(q_float) > 0','having IRATE(q_double) > 0',
+ 'having MIN(q_int) > 0','having MIN(q_bigint) > 0','having MIN(q_smallint) > 0','having MIN(q_tinyint) > 0','having MIN(q_float) > 0','having MIN(q_double) > 0',
+ 'having MAX(q_int) > 0','having MAX(q_bigint) > 0','having MAX(q_smallint) > 0','having MAX(q_tinyint) > 0','having MAX(q_float) > 0','having MAX(q_double) > 0',
+ 'having FIRST(q_int) > 0','having FIRST(q_bigint) > 0','having FIRST(q_smallint) > 0','having FIRST(q_tinyint) > 0','having FIRST(q_float) > 0','having FIRST(q_double) > 0',
+ 'having LAST(q_int) > 0','having LAST(q_bigint) > 0','having LAST(q_smallint) > 0','having LAST(q_tinyint) > 0','having LAST(q_float) > 0','having LAST(q_double) > 0',
+ 'having APERCENTILE(q_int,10) > 0','having APERCENTILE(q_bigint,10) > 0','having APERCENTILE(q_smallint,10) > 0','having APERCENTILE(q_tinyint,10) > 0','having APERCENTILE(q_float,10) > 0','having APERCENTILE(q_double,10) > 0']
+ self.having_not_support = ['having TOP(q_int,10) > 0','having TOP(q_bigint,10) > 0','having TOP(q_smallint,10) > 0','having TOP(q_tinyint,10) > 0','having TOP(q_float,10) > 0','having TOP(q_double,10) > 0','having TOP(q_bool,10) > 0',
+ 'having BOTTOM(q_int,10) > 0','having BOTTOM(q_bigint,10) > 0','having BOTTOM(q_smallint,10) > 0','having BOTTOM(q_tinyint,10) > 0','having BOTTOM(q_float,10) > 0','having BOTTOM(q_double,10) > 0','having BOTTOM(q_bool,10) > 0',
+ 'having LEASTSQUARES(q_int) > 0','having LEASTSQUARES(q_bigint) > 0','having LEASTSQUARES(q_smallint) > 0','having LEASTSQUARES(q_tinyint) > 0','having LEASTSQUARES(q_float) > 0','having LEASTSQUARES(q_double) > 0','having LEASTSQUARES(q_bool) > 0',
+ 'having FIRST(q_bool) > 0','having IRATE(q_bool) > 0','having PERCENTILE(q_bool,10) > 0','having avg(q_bool) > 0','having LAST_ROW(q_bool) > 0','having sum(q_bool) > 0','having STDDEV(q_bool) > 0','having APERCENTILE(q_bool,10) > 0','having TWA(q_bool) > 0','having LAST(q_bool) > 0',
+ 'having PERCENTILE(q_int,10) > 0','having PERCENTILE(q_bigint,10) > 0','having PERCENTILE(q_smallint,10) > 0','having PERCENTILE(q_tinyint,10) > 0','having PERCENTILE(q_float,10) > 0','having PERCENTILE(q_double,10) > 0']
+ self.having_tagnot_support = ['having LAST_ROW(q_int) > 0','having LAST_ROW(q_bigint) > 0','having LAST_ROW(q_smallint) > 0','having LAST_ROW(q_tinyint) > 0','having LAST_ROW(q_float) > 0','having LAST_ROW(q_double) > 0']
+
+ self.having_support_j = ['having count(t1.q_int) > 0','having count(t1.q_bigint) > 0','having count(t1.q_smallint) > 0','having count(t1.q_tinyint) > 0','having count(t1.q_float) > 0','having count(t1.q_double) > 0','having count(t1.q_bool) > 0',
+ 'having avg(t1.q_int) > 0','having avg(t1.q_bigint) > 0','having avg(t1.q_smallint) > 0','having avg(t1.q_tinyint) > 0','having avg(t1.q_float) > 0','having avg(t1.q_double) > 0',
+ 'having sum(t1.q_int) > 0','having sum(t1.q_bigint) > 0','having sum(t1.q_smallint) > 0','having sum(t1.q_tinyint) > 0','having sum(t1.q_float) > 0','having sum(t1.q_double) > 0',
+ 'having STDDEV(t1.q_int) > 0','having STDDEV(t1.q_bigint) > 0','having STDDEV(t1.q_smallint) > 0','having STDDEV(t1.q_tinyint) > 0','having STDDEV(t1.q_float) > 0','having STDDEV(t1.q_double) > 0',
+ 'having TWA(t1.q_int) > 0','having TWA(t1.q_bigint) > 0','having TWA(t1.q_smallint) > 0','having TWA(t1.q_tinyint) > 0','having TWA(t1.q_float) > 0','having TWA(t1.q_double) > 0',
+ 'having IRATE(t1.q_int) > 0','having IRATE(t1.q_bigint) > 0','having IRATE(t1.q_smallint) > 0','having IRATE(t1.q_tinyint) > 0','having IRATE(t1.q_float) > 0','having IRATE(t1.q_double) > 0',
+ 'having MIN(t1.q_int) > 0','having MIN(t1.q_bigint) > 0','having MIN(t1.q_smallint) > 0','having MIN(t1.q_tinyint) > 0','having MIN(t1.q_float) > 0','having MIN(t1.q_double) > 0',
+ 'having MAX(t1.q_int) > 0','having MAX(t1.q_bigint) > 0','having MAX(t1.q_smallint) > 0','having MAX(t1.q_tinyint) > 0','having MAX(t1.q_float) > 0','having MAX(t1.q_double) > 0',
+ 'having FIRST(t1.q_int) > 0','having FIRST(t1.q_bigint) > 0','having FIRST(t1.q_smallint) > 0','having FIRST(t1.q_tinyint) > 0','having FIRST(t1.q_float) > 0','having FIRST(t1.q_double) > 0',
+ 'having LAST(t1.q_int) > 0','having LAST(t1.q_bigint) > 0','having LAST(t1.q_smallint) > 0','having LAST(t1.q_tinyint) > 0','having LAST(t1.q_float) > 0','having LAST(t1.q_double) > 0',
+ 'having APERCENTILE(t1.q_int,10) > 0','having APERCENTILE(t1.q_bigint,10) > 0','having APERCENTILE(t1.q_smallint,10) > 0','having APERCENTILE(t1.q_tinyint,10) > 0','having APERCENTILE(t1.q_float,10) > 0','having APERCENTILE(t1.q_double,10) > 0']
+
+ # limit offset where
+ self.limit_where = ['limit 1 offset 1' , 'limit 1' , 'limit 2 offset 1' , 'limit 2', 'limit 12 offset 1' , 'limit 20', 'limit 20 offset 10' , 'limit 200']
+ self.limit1_where = ['limit 1 offset 1' , 'limit 1' ]
+ self.limit_u_where = ['limit 100 offset 10' , 'limit 50' , 'limit 100' , 'limit 10' ]
+
+ # slimit soffset where
+ self.slimit_where = ['slimit 1 soffset 1' , 'slimit 1' , 'slimit 2 soffset 1' , 'slimit 2']
+ self.slimit1_where = ['slimit 2 soffset 1' , 'slimit 1' ]
+
+ # aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\]
+ # select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile]
+ # calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\]
+ # **_ns_** express is not support stable, therefore, separated from regular tables
+ # calc_select_all calc_select_regular calc_select_in_ts calc_select_fill calc_select_not_interval
+ # calc_aggregate_all calc_aggregate_regular calc_aggregate_groupbytbname
+ # calc_calculate_all calc_calculate_regular calc_calculate_groupbytbname
+
+ # calc_select_all calc_select_regular calc_select_in_ts calc_select_fill calc_select_not_interval
+ # select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile]
+
+ self.calc_select_all = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' ,
+ 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' ,
+ 'first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' ,
+ 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' ,
+ 'min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' ,
+ 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' ,
+ 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' ,
+ 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' ,
+ 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)']
+
+ self.calc_select_in_ts = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' ,
+ 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' ,
+ 'first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' ,
+ 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' ]
+
+ self.calc_select_in = ['min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' ,
+ 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' ,
+ 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' ,
+ 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' ,
+ 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)']
+
+ self.calc_select_not_support_ts = ['first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' ,
+ 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' ,
+ 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' ,
+ 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)',
+ 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)']
+
+ self.calc_select_support_ts = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' ,
+ 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' ,
+ 'min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' ,
+ 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' ]
+
+ self.calc_select_regular = [ 'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)']
+
+
+ self.calc_select_fill = ['INTERP(q_int)' ,'INTERP(q_bigint)' ,'INTERP(q_smallint)' ,'INTERP(q_tinyint)', 'INTERP(q_float)' ,'INTERP(q_double)']
+ self.interp_where = ['ts = now' , 'ts = \'2020-09-13 20:26:40.000\'' , 'ts = \'2020-09-13 20:26:40.009\'' ,'tbname in (\'table_1\') and ts = now' ,'tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and ts = \'2020-09-13 20:26:40.000\'','tbname like \'table%\' and ts = \'2020-09-13 20:26:40.002\'']
+
+ #two table join
+ self.calc_select_in_ts_j = ['bottom(t1.q_int,20)' , 'bottom(t1.q_bigint,20)' , 'bottom(t1.q_smallint,20)' , 'bottom(t1.q_tinyint,20)' ,'bottom(t1.q_float,20)' , 'bottom(t1.q_double,20)' ,
+ 'top(t1.q_int,20)' , 'top(t1.q_bigint,20)' , 'top(t1.q_smallint,20)' ,'top(t1.q_tinyint,20)' ,'top(t1.q_float,20)' ,'top(t1.q_double,20)' ,
+ 'first(t1.q_int)' , 'first(t1.q_bigint)' , 'first(t1.q_smallint)' , 'first(t1.q_tinyint)' , 'first(t1.q_float)' ,'first(t1.q_double)' ,'first(t1.q_binary)' ,'first(t1.q_nchar)' ,'first(t1.q_bool)' ,'first(t1.q_ts)' ,
+ 'last(t1.q_int)' , 'last(t1.q_bigint)' , 'last(t1.q_smallint)' , 'last(t1.q_tinyint)' , 'last(t1.q_float)' ,'last(t1.q_double)' , 'last(t1.q_binary)' ,'last(t1.q_nchar)' ,'last(t1.q_bool)' ,'last(t1.q_ts)' ,
+ 'bottom(t2.q_int,20)' , 'bottom(t2.q_bigint,20)' , 'bottom(t2.q_smallint,20)' , 'bottom(t2.q_tinyint,20)' ,'bottom(t2.q_float,20)' , 'bottom(t2.q_double,20)' ,
+ 'top(t2.q_int,20)' , 'top(t2.q_bigint,20)' , 'top(t2.q_smallint,20)' ,'top(t2.q_tinyint,20)' ,'top(t2.q_float,20)' ,'top(t2.q_double,20)' ,
+ 'first(t2.q_int)' , 'first(t2.q_bigint)' , 'first(t2.q_smallint)' , 'first(t2.q_tinyint)' , 'first(t2.q_float)' ,'first(t2.q_double)' ,'first(t2.q_binary)' ,'first(t2.q_nchar)' ,'first(t2.q_bool)' ,'first(t2.q_ts)' ,
+ 'last(t2.q_int)' , 'last(t2.q_bigint)' , 'last(t2.q_smallint)' , 'last(t2.q_tinyint)' , 'last(t2.q_float)' ,'last(t2.q_double)' , 'last(t2.q_binary)' ,'last(t2.q_nchar)' ,'last(t2.q_bool)' ,'last(t2.q_ts)']
+
+ self.calc_select_in_support_ts_j = ['bottom(t1.q_int,20)' , 'bottom(t1.q_bigint,20)' , 'bottom(t1.q_smallint,20)' , 'bottom(t1.q_tinyint,20)' ,'bottom(t1.q_float,20)' , 'bottom(t1.q_double,20)' ,
+ 'top(t1.q_int,20)' , 'top(t1.q_bigint,20)' , 'top(t1.q_smallint,20)' ,'top(t1.q_tinyint,20)' ,'top(t1.q_float,20)' ,'top(t1.q_double,20)' ,
+ 'min(t1.q_int)' , 'min(t1.q_bigint)' , 'min(t1.q_smallint)' , 'min(t1.q_tinyint)' , 'min(t1.q_float)' ,'min(t1.q_double)' ,
+ 'max(t1.q_int)' , 'max(t1.q_bigint)' , 'max(t1.q_smallint)' , 'max(t1.q_tinyint)' ,'max(t1.q_float)' ,'max(t1.q_double)' ,
+ 'bottom(t2.q_int,20)' , 'bottom(t2.q_bigint,20)' , 'bottom(t2.q_smallint,20)' , 'bottom(t2.q_tinyint,20)' ,'bottom(t2.q_float,20)' , 'bottom(t2.q_double,20)' ,
+ 'top(t2.q_int,20)' , 'top(t2.q_bigint,20)' , 'top(t2.q_smallint,20)' ,'top(t2.q_tinyint,20)' ,'top(t2.q_float,20)' ,'top(t2.q_double,20)' ,
+ 'min(t2.q_int)' , 'min(t2.q_bigint)' , 'min(t2.q_smallint)' , 'min(t2.q_tinyint)' , 'min(t2.q_float)' ,'min(t2.q_double)' ,
+ 'max(t2.q_int)' , 'max(t2.q_bigint)' , 'max(t2.q_smallint)' , 'max(t2.q_tinyint)' ,'max(t2.q_float)' ,'max(t2.q_double)' ,
+ ]
+
+ self.calc_select_in_not_support_ts_j = ['apercentile(t1.q_int,20)' , 'apercentile(t1.q_bigint,20)' ,'apercentile(t1.q_smallint,20)' ,'apercentile(t1.q_tinyint,20)' ,'apercentile(t1.q_float,20)' ,'apercentile(t1.q_double,20)' ,
+ 'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' ,
+ 'last_row(t1.q_double)' , 'last_row(t1.q_bool)' ,'last_row(t1.q_binary)' ,'last_row(t1.q_nchar)' ,'last_row(t1.q_ts)' ,
+ 'apercentile(t2.q_int,20)' , 'apercentile(t2.q_bigint,20)' ,'apercentile(t2.q_smallint,20)' ,'apercentile(t2.q_tinyint,20)' ,'apercentile(t2.q_float,20)' ,'apercentile(t2.q_double,20)' ,
+ 'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' ,
+ 'last_row(t2.q_double)' , 'last_row(t2.q_bool)' ,'last_row(t2.q_binary)' ,'last_row(t2.q_nchar)' ,'last_row(t2.q_ts)']
+
+ self.calc_select_in_j = ['min(t1.q_int)' , 'min(t1.q_bigint)' , 'min(t1.q_smallint)' , 'min(t1.q_tinyint)' , 'min(t1.q_float)' ,'min(t1.q_double)' ,
+ 'max(t1.q_int)' , 'max(t1.q_bigint)' , 'max(t1.q_smallint)' , 'max(t1.q_tinyint)' ,'max(t1.q_float)' ,'max(t1.q_double)' ,
+ 'apercentile(t1.q_int,20)' , 'apercentile(t1.q_bigint,20)' ,'apercentile(t1.q_smallint,20)' ,'apercentile(t1.q_tinyint,20)' ,'apercentile(t1.q_float,20)' ,'apercentile(t1.q_double,20)' ,
+ 'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' ,
+ 'last_row(t1.q_double)' , 'last_row(t1.q_bool)' ,'last_row(t1.q_binary)' ,'last_row(t1.q_nchar)' ,'last_row(t1.q_ts)' ,
+ 'min(t2.q_int)' , 'min(t2.q_bigint)' , 'min(t2.q_smallint)' , 'min(t2.q_tinyint)' , 'min(t2.q_float)' ,'min(t2.q_double)' ,
+ 'max(t2.q_int)' , 'max(t2.q_bigint)' , 'max(t2.q_smallint)' , 'max(t2.q_tinyint)' ,'max(t2.q_float)' ,'max(t2.q_double)' ,
+ 'apercentile(t2.q_int,20)' , 'apercentile(t2.q_bigint,20)' ,'apercentile(t2.q_smallint,20)' ,'apercentile(t2.q_tinyint,20)' ,'apercentile(t2.q_float,20)' ,'apercentile(t2.q_double,20)' ,
+ 'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' ,
+ 'last_row(t2.q_double)' , 'last_row(t2.q_bool)' ,'last_row(t2.q_binary)' ,'last_row(t2.q_nchar)' ,'last_row(t2.q_ts)']
+ self.calc_select_all_j = self.calc_select_in_ts_j + self.calc_select_in_j
+
+ self.calc_select_regular_j = [ 'PERCENTILE(t1.q_int,10)' ,'PERCENTILE(t1.q_bigint,20)' , 'PERCENTILE(t1.q_smallint,30)' ,'PERCENTILE(t1.q_tinyint,40)' ,'PERCENTILE(t1.q_float,50)' ,'PERCENTILE(t1.q_double,60)' ,
+ 'PERCENTILE(t2.q_int,10)' ,'PERCENTILE(t2.q_bigint,20)' , 'PERCENTILE(t2.q_smallint,30)' ,'PERCENTILE(t2.q_tinyint,40)' ,'PERCENTILE(t2.q_float,50)' ,'PERCENTILE(t2.q_double,60)']
+
+
+ self.calc_select_fill_j = ['INTERP(t1.q_int)' ,'INTERP(t1.q_bigint)' ,'INTERP(t1.q_smallint)' ,'INTERP(t1.q_tinyint)', 'INTERP(t1.q_float)' ,'INTERP(t1.q_double)' ,
+ 'INTERP(t2.q_int)' ,'INTERP(t2.q_bigint)' ,'INTERP(t2.q_smallint)' ,'INTERP(t2.q_tinyint)', 'INTERP(t2.q_float)' ,'INTERP(t2.q_double)']
+ self.interp_where_j = ['t1.ts = now' , 't1.ts = \'2020-09-13 20:26:40.000\'' , 't1.ts = \'2020-09-13 20:26:40.009\'' ,'t2.ts = now' , 't2.ts = \'2020-09-13 20:26:40.000\'' , 't2.ts = \'2020-09-13 20:26:40.009\'' ,
+ 't1.tbname in (\'table_1\') and t1.ts = now' ,'t1.tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and t1.ts = \'2020-09-13 20:26:40.000\'','t1.tbname like \'table%\' and t1.ts = \'2020-09-13 20:26:40.002\'',
+ 't2.tbname in (\'table_1\') and t2.ts = now' ,'t2.tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and t2.ts = \'2020-09-13 20:26:40.000\'','t2.tbname like \'table%\' and t2.ts = \'2020-09-13 20:26:40.002\'']
+
+ # calc_aggregate_all calc_aggregate_regular calc_aggregate_groupbytbname APERCENTILE\PERCENTILE
+ # aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\]
+ self.calc_aggregate_all = ['count(*)' , 'count(q_int)' ,'count(q_bigint)' , 'count(q_smallint)' ,'count(q_tinyint)' ,'count(q_float)' ,
+ 'count(q_double)' ,'count(q_binary)' ,'count(q_nchar)' ,'count(q_bool)' ,'count(q_ts)' ,
+ 'avg(q_int)' ,'avg(q_bigint)' , 'avg(q_smallint)' ,'avg(q_tinyint)' ,'avg(q_float)' ,'avg(q_double)' ,
+ 'sum(q_int)' ,'sum(q_bigint)' , 'sum(q_smallint)' ,'sum(q_tinyint)' ,'sum(q_float)' ,'sum(q_double)' ,
+ 'STDDEV(q_int)' ,'STDDEV(q_bigint)' , 'STDDEV(q_smallint)' ,'STDDEV(q_tinyint)' ,'STDDEV(q_float)' ,'STDDEV(q_double)',
+ 'APERCENTILE(q_int,10)' ,'APERCENTILE(q_bigint,20)' , 'APERCENTILE(q_smallint,30)' ,'APERCENTILE(q_tinyint,40)' ,'APERCENTILE(q_float,50)' ,'APERCENTILE(q_double,60)']
+
+ self.calc_aggregate_regular = ['twa(q_int)' ,'twa(q_bigint)' , 'twa(q_smallint)' ,'twa(q_tinyint)' ,'twa (q_float)' ,'twa(q_double)' ,
+ 'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' ,
+ 'LEASTSQUARES(q_int,15,3)' , 'LEASTSQUARES(q_bigint,10,1)' , 'LEASTSQUARES(q_smallint,20,3)' ,'LEASTSQUARES(q_tinyint,10,4)' ,'LEASTSQUARES(q_float,6,4)' ,'LEASTSQUARES(q_double,3,1)' ,
+ 'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)']
+
+ self.calc_aggregate_groupbytbname = ['twa(q_int)' ,'twa(q_bigint)' , 'twa(q_smallint)' ,'twa(q_tinyint)' ,'twa (q_float)' ,'twa(q_double)' ,
+ 'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' ]
+
+ #two table join
+ self.calc_aggregate_all_j = ['count(t1.*)' , 'count(t1.q_int)' ,'count(t1.q_bigint)' , 'count(t1.q_smallint)' ,'count(t1.q_tinyint)' ,'count(t1.q_float)' ,
+ 'count(t1.q_double)' ,'count(t1.q_binary)' ,'count(t1.q_nchar)' ,'count(t1.q_bool)' ,'count(t1.q_ts)' ,
+ 'avg(t1.q_int)' ,'avg(t1.q_bigint)' , 'avg(t1.q_smallint)' ,'avg(t1.q_tinyint)' ,'avg(t1.q_float)' ,'avg(t1.q_double)' ,
+ 'sum(t1.q_int)' ,'sum(t1.q_bigint)' , 'sum(t1.q_smallint)' ,'sum(t1.q_tinyint)' ,'sum(t1.q_float)' ,'sum(t1.q_double)' ,
+ 'STDDEV(t1.q_int)' ,'STDDEV(t1.q_bigint)' , 'STDDEV(t1.q_smallint)' ,'STDDEV(t1.q_tinyint)' ,'STDDEV(t1.q_float)' ,'STDDEV(t1.q_double)',
+ 'APERCENTILE(t1.q_int,10)' ,'APERCENTILE(t1.q_bigint,20)' , 'APERCENTILE(t1.q_smallint,30)' ,'APERCENTILE(t1.q_tinyint,40)' ,'APERCENTILE(t1.q_float,50)' ,'APERCENTILE(t1.q_double,60)' ,
+ 'count(t2.*)' , 'count(t2.q_int)' ,'count(t2.q_bigint)' , 'count(t2.q_smallint)' ,'count(t2.q_tinyint)' ,'count(t2.q_float)' ,
+ 'count(t2.q_double)' ,'count(t2.q_binary)' ,'count(t2.q_nchar)' ,'count(t2.q_bool)' ,'count(t2.q_ts)' ,
+ 'avg(t2.q_int)' ,'avg(t2.q_bigint)' , 'avg(t2.q_smallint)' ,'avg(t2.q_tinyint)' ,'avg(t2.q_float)' ,'avg(t2.q_double)' ,
+ 'sum(t2.q_int)' ,'sum(t2.q_bigint)' , 'sum(t2.q_smallint)' ,'sum(t2.q_tinyint)' ,'sum(t2.q_float)' ,'sum(t2.q_double)' ,
+ 'STDDEV(t2.q_int)' ,'STDDEV(t2.q_bigint)' , 'STDDEV(t2.q_smallint)' ,'STDDEV(t2.q_tinyint)' ,'STDDEV(t2.q_float)' ,'STDDEV(t2.q_double)',
+ 'APERCENTILE(t2.q_int,10)' ,'APERCENTILE(t2.q_bigint,20)' , 'APERCENTILE(t2.q_smallint,30)' ,'APERCENTILE(t2.q_tinyint,40)' ,'APERCENTILE(t2.q_float,50)' ,'APERCENTILE(t2.q_double,60)']
+
+ self.calc_aggregate_regular_j = ['twa(t1.q_int)' ,'twa(t1.q_bigint)' , 'twa(t1.q_smallint)' ,'twa(t1.q_tinyint)' ,'twa (t1.q_float)' ,'twa(t1.q_double)' ,
+ 'IRATE(t1.q_int)' ,'IRATE(t1.q_bigint)' , 'IRATE(t1.q_smallint)' ,'IRATE(t1.q_tinyint)' ,'IRATE (t1.q_float)' ,'IRATE(t1.q_double)' ,
+ 'LEASTSQUARES(t1.q_int,15,3)' , 'LEASTSQUARES(t1.q_bigint,10,1)' , 'LEASTSQUARES(t1.q_smallint,20,3)' ,'LEASTSQUARES(t1.q_tinyint,10,4)' ,'LEASTSQUARES(t1.q_float,6,4)' ,'LEASTSQUARES(t1.q_double,3,1)' ,
+ 'PERCENTILE(t1.q_int,10)' ,'PERCENTILE(t1.q_bigint,20)' , 'PERCENTILE(t1.q_smallint,30)' ,'PERCENTILE(t1.q_tinyint,40)' ,'PERCENTILE(t1.q_float,50)' ,'PERCENTILE(t1.q_double,60)' ,
+ 'twa(t2.q_int)' ,'twa(t2.q_bigint)' , 'twa(t2.q_smallint)' ,'twa(t2.q_tinyint)' ,'twa (t2.q_float)' ,'twa(t2.q_double)' ,
+ 'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)',
+ 'LEASTSQUARES(t2.q_int,15,3)' , 'LEASTSQUARES(t2.q_bigint,10,1)' , 'LEASTSQUARES(t2.q_smallint,20,3)' ,'LEASTSQUARES(t2.q_tinyint,10,4)' ,'LEASTSQUARES(t2.q_float,6,4)' ,'LEASTSQUARES(t2.q_double,3,1)' ,
+ 'PERCENTILE(t2.q_int,10)' ,'PERCENTILE(t2.q_bigint,20)' , 'PERCENTILE(t2.q_smallint,30)' ,'PERCENTILE(t2.q_tinyint,40)' ,'PERCENTILE(t2.q_float,50)' ,'PERCENTILE(t2.q_double,60)']
+
+ self.calc_aggregate_groupbytbname_j = ['twa(t1.q_int)' ,'twa(t1.q_bigint)' , 'twa(t1.q_smallint)' ,'twa(t1.q_tinyint)' ,'twa (t1.q_float)' ,'twa(t1.q_double)' ,
+ 'IRATE(t1.q_int)' ,'IRATE(t1.q_bigint)' , 'IRATE(t1.q_smallint)' ,'IRATE(t1.q_tinyint)' ,'IRATE (t1.q_float)' ,'IRATE(t1.q_double)' ,
+ 'twa(t2.q_int)' ,'twa(t2.q_bigint)' , 'twa(t2.q_smallint)' ,'twa(t2.q_tinyint)' ,'twa (t2.q_float)' ,'twa(t2.q_double)' ,
+ 'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)' ]
+
+ # calc_calculate_all calc_calculate_regular calc_calculate_groupbytbname
+ # calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\]
+ self.calc_calculate_all = ['SPREAD(ts)' , 'SPREAD(q_ts)' , 'SPREAD(q_int)' ,'SPREAD(q_bigint)' , 'SPREAD(q_smallint)' ,'SPREAD(q_tinyint)' ,'SPREAD(q_float)' ,'SPREAD(q_double)' ,
+ '(SPREAD(q_int) + SPREAD(q_bigint))' , '(SPREAD(q_smallint) - SPREAD(q_float))', '(SPREAD(q_double) * SPREAD(q_tinyint))' , '(SPREAD(q_double) / SPREAD(q_float))']
+ self.calc_calculate_regular = ['DIFF(q_int)' ,'DIFF(q_bigint)' , 'DIFF(q_smallint)' ,'DIFF(q_tinyint)' ,'DIFF(q_float)' ,'DIFF(q_double)' ,
+ 'DIFF(q_int,0)' ,'DIFF(q_bigint,0)' , 'DIFF(q_smallint,0)' ,'DIFF(q_tinyint,0)' ,'DIFF(q_float,0)' ,'DIFF(q_double,0)' ,
+ 'DIFF(q_int,1)' ,'DIFF(q_bigint,1)' , 'DIFF(q_smallint,1)' ,'DIFF(q_tinyint,1)' ,'DIFF(q_float,1)' ,'DIFF(q_double,1)' ,
+ 'DERIVATIVE(q_int,15s,0)' , 'DERIVATIVE(q_bigint,10s,1)' , 'DERIVATIVE(q_smallint,20s,0)' ,'DERIVATIVE(q_tinyint,10s,1)' ,'DERIVATIVE(q_float,6s,0)' ,'DERIVATIVE(q_double,3s,1)' ]
+ self.calc_calculate_groupbytbname = self.calc_calculate_regular
+
+ #two table join
+ self.calc_calculate_all_j = ['SPREAD(t1.ts)' , 'SPREAD(t1.q_ts)' , 'SPREAD(t1.q_int)' ,'SPREAD(t1.q_bigint)' , 'SPREAD(t1.q_smallint)' ,'SPREAD(t1.q_tinyint)' ,'SPREAD(t1.q_float)' ,'SPREAD(t1.q_double)' ,
+ 'SPREAD(t2.ts)' , 'SPREAD(t2.q_ts)' , 'SPREAD(t2.q_int)' ,'SPREAD(t2.q_bigint)' , 'SPREAD(t2.q_smallint)' ,'SPREAD(t2.q_tinyint)' ,'SPREAD(t2.q_float)' ,'SPREAD(t2.q_double)' ,
+ '(SPREAD(t1.q_int) + SPREAD(t1.q_bigint))' , '(SPREAD(t1.q_tinyint) - SPREAD(t1.q_float))', '(SPREAD(t1.q_double) * SPREAD(t1.q_tinyint))' , '(SPREAD(t1.q_double) / SPREAD(t1.q_tinyint))',
+ '(SPREAD(t2.q_int) + SPREAD(t2.q_bigint))' , '(SPREAD(t2.q_smallint) - SPREAD(t2.q_float))', '(SPREAD(t2.q_double) * SPREAD(t2.q_tinyint))' , '(SPREAD(t2.q_double) / SPREAD(t2.q_tinyint))',
+ '(SPREAD(t1.q_int) + SPREAD(t1.q_smallint))' , '(SPREAD(t2.q_smallint) - SPREAD(t2.q_float))', '(SPREAD(t1.q_double) * SPREAD(t1.q_tinyint))' , '(SPREAD(t1.q_double) / SPREAD(t1.q_float))']
+ self.calc_calculate_regular_j = ['DIFF(t1.q_int)' ,'DIFF(t1.q_bigint)' , 'DIFF(t1.q_smallint)' ,'DIFF(t1.q_tinyint)' ,'DIFF(t1.q_float)' ,'DIFF(t1.q_double)' ,
+ 'DIFF(t1.q_int,0)' ,'DIFF(t1.q_bigint,0)' , 'DIFF(t1.q_smallint,0)' ,'DIFF(t1.q_tinyint,0)' ,'DIFF(t1.q_float,0)' ,'DIFF(t1.q_double,0)' ,
+ 'DIFF(t1.q_int,1)' ,'DIFF(t1.q_bigint,1)' , 'DIFF(t1.q_smallint,1)' ,'DIFF(t1.q_tinyint,1)' ,'DIFF(t1.q_float,1)' ,'DIFF(t1.q_double,1)' ,
+ 'DERIVATIVE(t1.q_int,15s,0)' , 'DERIVATIVE(t1.q_bigint,10s,1)' , 'DERIVATIVE(t1.q_smallint,20s,0)' ,'DERIVATIVE(t1.q_tinyint,10s,1)' ,'DERIVATIVE(t1.q_float,6s,0)' ,'DERIVATIVE(t1.q_double,3s,1)' ,
+ 'DIFF(t2.q_int)' ,'DIFF(t2.q_bigint)' , 'DIFF(t2.q_smallint)' ,'DIFF(t2.q_tinyint)' ,'DIFF(t2.q_float)' ,'DIFF(t2.q_double)' ,
+ 'DIFF(t2.q_int,0)' ,'DIFF(t2.q_bigint,0)' , 'DIFF(t2.q_smallint,0)' ,'DIFF(t2.q_tinyint,0)' ,'DIFF(t2.q_float,0)' ,'DIFF(t2.q_double,0)' ,
+ 'DIFF(t2.q_int,1)' ,'DIFF(t2.q_bigint,1)' , 'DIFF(t2.q_smallint,1)' ,'DIFF(t2.q_tinyint,1)' ,'DIFF(t2.q_float,1)' ,'DIFF(t2.q_double,1)' ,
+ 'DERIVATIVE(t2.q_int,15s,0)' , 'DERIVATIVE(t2.q_bigint,10s,1)' , 'DERIVATIVE(t2.q_smallint,20s,0)' ,'DERIVATIVE(t2.q_tinyint,10s,1)' ,'DERIVATIVE(t2.q_float,6s,0)' ,'DERIVATIVE(t2.q_double,3s,1)' ]
+ self.calc_calculate_groupbytbname_j = self.calc_calculate_regular_j
+
+ #inter && calc_aggregate_all\calc_aggregate_regular\calc_select_all
+ self.interval_sliding = ['interval(4w) sliding(1w) ','interval(1w) sliding(1d) ','interval(1d) sliding(1h) ' ,
+ 'interval(1h) sliding(1m) ','interval(1m) sliding(1s) ','interval(1s) sliding(10a) ',
+ 'interval(1y) ','interval(1n) ','interval(1w) ','interval(1d) ','interval(1h) ','interval(1m) ','interval(1s) ' ,'interval(10a)',
+ 'interval(1y,1n) ','interval(1n,1w) ','interval(1w,1d) ','interval(1d,1h) ','interval(1h,1m) ','interval(1m,1s) ','interval(1s,10a) ' ,'interval(100a,30a)']
+
+ self.conn1 = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos/")
+ self.cur1 = self.conn1.cursor()
+ print(self.cur1)
+ self.cur1.execute("use %s ;" %self.db_nest)
+ sql = 'select * from stable_1 limit 5;'
+ self.cur1.execute(sql)
+
+
+ def data_matrix_equal(self, sql1,row1_s,row1_e,col1_s,col1_e, sql2,row2_s,row2_e,col2_s,col2_e):
+ # ----row1_start----col1_start----
+ # - - - - 是一个矩阵内的数据相等- - -
+ # - - - - - - - - - - - - - - - -
+ # ----row1_end------col1_end------
+ self.sql1 = sql1
+ list1 =[]
+ tdSql.query(sql1)
+ for i1 in range(row1_s-1,row1_e):
+ #print("iiii=%d"%i1)
+ for j1 in range(col1_s-1,col1_e):
+ #print("jjjj=%d"%j1)
+ #print("data=%s" %(tdSql.getData(i1,j1)))
+ list1.append(tdSql.getData(i1,j1))
+ print("=====list1-------list1---=%s" %set(list1))
+
+ tdSql.execute("reset query cache;")
+ self.sql2 = sql2
+ list2 =[]
+ tdSql.query(sql2)
+ for i2 in range(row2_s-1,row2_e):
+ #print("iiii222=%d"%i2)
+ for j2 in range(col2_s-1,col2_e):
+ #print("jjjj222=%d"%j2)
+ #print("data=%s" %(tdSql.getData(i2,j2)))
+ list2.append(tdSql.getData(i2,j2))
+ print("=====list2-------list2---=%s" %set(list2))
+
+ if (list1 == list2) and len(list2)>0:
+ # print(("=====matrix===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2))
+ tdLog.info(("===matrix===sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2))
+ elif (set(list2)).issubset(set(list1)):
+ # 解决不同子表排列结果乱序
+ # print(("=====list_issubset==matrix2in1-true===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2))
+ tdLog.info(("===matrix_issubset===sql1:'%s' matrix_set_result = sql2:'%s' matrix_set_result") %(sql1,sql2))
+ #elif abs(float(str(list1).replace("]","").replace("[","").replace("e+","")) - float(str(list2).replace("]","").replace("[","").replace("e+",""))) <= 0.0001:
+ elif abs(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-","")) - float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-",""))) <= 0.0001:
+ print(("=====matrix_abs+e+===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2))
+ print(("=====matrix_abs+e+replace_after===sql1.list1:'%s',sql2.list2:'%s'") %(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-","")),float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace("e+","").replace(", ","").replace("(","").replace(")","").replace("-",""))))
+ tdLog.info(("===matrix_abs+e+===sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2))
+ elif abs(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")) - float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-",""))) <= 0.1:
+ #{datetime.datetime(2021, 8, 27, 1, 46, 40), -441.46841430664057}replace
+ print(("=====matrix_abs+replace===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2))
+ print(("=====matrix_abs+replace_after===sql1.list1:'%s',sql2.list2:'%s'") %(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")),float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-",""))))
+ tdLog.info(("===matrix_abs+replace===sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2))
+ elif abs(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")) - float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-",""))) <= 0.5:
+ print(("=====matrix_abs===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2))
+ print(("=====matrix_abs===sql1.list1:'%s',sql2.list2:'%s'") %(float(str(list1).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-","")),float(str(list2).replace("datetime.datetime","").replace("]","").replace("[","").replace(", ","").replace("(","").replace(")","").replace("-",""))))
+ tdLog.info(("===matrix_abs======sql1:'%s' matrix_result = sql2:'%s' matrix_result") %(sql1,sql2))
+ else:
+ print(("=====matrix_error===sql1.list1:'%s',sql2.list2:'%s'") %(list1,list2))
+ tdLog.info(("sql1:'%s' matrix_result != sql2:'%s' matrix_result") %(sql1,sql2))
+ return tdSql.checkEqual(list1,list2)
+
+ def restartDnodes(self):
+ pass
+ # tdDnodes.stop(1)
+ # tdDnodes.start(1)
+
+ def dropandcreateDB_random(self,database,n):
+ ts = 1630000000000
+ num_random = 100
+ fake = Faker('zh_CN')
+ tdSql.execute('''drop database if exists %s ;''' %database)
+ tdSql.execute('''create database %s keep 36500;'''%database)
+ tdSql.execute('''use %s;'''%database)
+
+ tdSql.execute('''create stable stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\
+ q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
+ tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''')
+ tdSql.execute('''create stable stable_2 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\
+ q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
+ tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''')
+
+ tdSql.execute('''create stable stable_null_data (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\
+ q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
+ tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''')
+
+ tdSql.execute('''create stable stable_null_childtable (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\
+ q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
+ tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''')
+
+ #tdSql.execute('''create table stable_1_1 using stable_1 tags('stable_1_1', '0' , '0' , '0' , '0' , 0 , 'binary1' , 'nchar1' , '0' , '0' ,'0') ;''')
+ tdSql.execute('''create table stable_1_1 using stable_1 tags('stable_1_1', '%d' , '%d', '%d' , '%d' , 0 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table stable_1_2 using stable_1 tags('stable_1_2', '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , 'binary2' , 'nchar2' , '2' , '22' , \'1999-09-09 09:09:09.090\') ;''')
+ tdSql.execute('''create table stable_1_3 using stable_1 tags('stable_1_3', '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , 'binary3' , 'nchar3nchar3' , '-3.3' , '-33.33' , \'2099-09-09 09:09:09.090\') ;''')
+ #tdSql.execute('''create table stable_1_4 using stable_1 tags('stable_1_4', '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''')
+ tdSql.execute('''create table stable_1_4 using stable_1 tags('stable_1_4', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+
+ # tdSql.execute('''create table stable_2_1 using stable_2 tags('stable_2_1' , '0' , '0' , '0' , '0' , 0 , 'binary21' , 'nchar21' , '0' , '0' ,'0') ;''')
+ # tdSql.execute('''create table stable_2_2 using stable_2 tags('stable_2_2' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''')
+
+ # tdSql.execute('''create table stable_null_data_1 using stable_null_data tags('stable_null_data_1', '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0') ;''')
+
+ tdSql.execute('''create table stable_2_1 using stable_2 tags('stable_2_1' , '0' , '0' , '0' , '0' , 0 , 'binary21' , 'nchar21' , '0' , '0' ,\'2099-09-09 09:09:09.090\') ;''')
+ tdSql.execute('''create table stable_2_2 using stable_2 tags('stable_2_2' , '%d' , '%d', '%d' , '%d' , 0 , 'binary2.%s' , 'nchar2.%s' , '%f', '%f' ,'%d') ;'''
+ %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+
+ tdSql.execute('''create table stable_null_data_1 using stable_null_data tags('stable_null_data_1', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+
+ #regular table
+ tdSql.execute('''create table regular_table_1 \
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\
+ q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''')
+ tdSql.execute('''create table regular_table_2 \
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\
+ q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''')
+ tdSql.execute('''create table regular_table_3 \
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\
+ q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''')
+
+ tdSql.execute('''create table regular_table_null \
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\
+ q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;''')
+
+
+ for i in range(num_random*n):
+ tdSql.execute('''insert into stable_1_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double , q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1),
+ fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() ,
+ fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address()))
+ tdSql.execute('''insert into regular_table_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1) ,
+ fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1) ,
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() ,
+ fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address()))
+
+ tdSql.execute('''insert into stable_1_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8)\
+ values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1),
+ fake.random_int(min=0, max=9223372036854775807, step=1),
+ fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() ,
+ fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address()))
+ tdSql.execute('''insert into regular_table_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1),
+ fake.random_int(min=0, max=9223372036854775807, step=1),
+ fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() ,
+ fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address()))
+
+ tdSql.execute('''insert into stable_1_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000 +1, fake.random_int(min=-2147483647, max=0, step=1),
+ fake.random_int(min=-9223372036854775807, max=0, step=1),
+ fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i +1, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() ,
+ fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address()))
+ tdSql.execute('''insert into regular_table_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000 +1, fake.random_int(min=-2147483647, max=0, step=1),
+ fake.random_int(min=-9223372036854775807, max=0, step=1),
+ fake.random_int(min=-32767, max=0, step=1) , fake.random_int(min=-127, max=0, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i +1, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() ,
+ fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address()))
+
+ tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000, fake.random_int(min=-0, max=2147483647, step=1),
+ fake.random_int(min=-0, max=9223372036854775807, step=1),
+ fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() ,
+ fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address()))
+
+ tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000 +1, fake.random_int(min=-0, max=2147483647, step=1),
+ fake.random_int(min=-0, max=9223372036854775807, step=1),
+ fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() ,
+ fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address()))
+
+ tdSql.execute('''insert into stable_2_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000 +10, fake.random_int(min=-0, max=2147483647, step=1),
+ fake.random_int(min=-0, max=9223372036854775807, step=1),
+ fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.address() , ts + i, fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() ,
+ fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address() , fake.pystr() , fake.address()))
+
+ tdSql.query("select count(*) from stable_1;")
+ tdSql.checkData(0,0,3*num_random*n)
+ tdSql.query("select count(*) from regular_table_1;")
+ tdSql.checkData(0,0,num_random*n)
+
+ def math_nest(self,mathlist):
+
+ print("==========%s===start=============" %mathlist)
+ os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
+
+ self.dropandcreateDB_random("%s" %self.db_nest, 1)
+
+ if (mathlist == ['ABS','SQRT']) or (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['FLOOR','CEIL','ROUND']) \
+ or (mathlist == ['CSUM']) or (mathlist == ['']):
+ math_functions = mathlist
+ fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)']
+ fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1)
+ math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+ fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1)
+ math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)',
+ '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)']
+ fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1)
+ math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+ fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1)
+ math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ elif (mathlist == ['UNIQUE']) or (mathlist == ['HYPERLOGLOG']):
+ math_functions = mathlist
+ fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_binary)','(q_nchar)','(q_bool)','(q_ts)',
+ '(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)','(q_binary_null)','(q_nchar_null)','(q_bool_null)','(q_ts_null)']
+ fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1)
+ math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+ fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1)
+ math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)',
+ '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)']
+ fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1)
+ math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+ fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1)
+ math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ elif (mathlist == ['POW','LOG']) or (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']):
+ math_functions = mathlist
+ num = random.randint(0, 1000)
+ fun_fix_column = ['(q_bigint,num)','(q_smallint,num)','(q_tinyint,num)','(q_int,num)','(q_float,num)','(q_double,num)',
+ '(q_bigint_null,num)','(q_smallint_null,num)','(q_tinyint_null,num)','(q_int_null,num)','(q_float_null,num)','(q_double_null,num)']
+ fun_column_1 = random.sample(math_functions,1)+random.sample(fun_fix_column,1)
+ math_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num))
+ fun_column_2 = random.sample(math_functions,1)+random.sample(fun_fix_column,1)
+ math_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num))
+
+ fun_fix_column_j = ['(t1.q_bigint,num)','(t1.q_smallint,num)','(t1.q_tinyint,num)','(t1.q_int,num)','(t1.q_float,num)','(t1.q_double,num)',
+ '(t1.q_bigint_null,num)','(t1.q_smallint_null,num)','(t1.q_tinyint_null,num)','(t1.q_int_null,num)','(t1.q_float_null,num)','(t1.q_double_null,num)',
+ '(t2.q_bigint,num)','(t2.q_smallint,num)','(t2.q_tinyint,num)','(t2.q_int,num)','(t2.q_float,num)','(t2.q_double,num)',
+ '(t2.q_bigint_null,num)','(t2.q_smallint_null,num)','(t2.q_tinyint_null,num)','(t2.q_int_null,num)','(t2.q_float_null,num)','(t2.q_double_null,num)']
+ fun_column_join_1 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1)
+ math_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num))
+ fun_column_join_2 = random.sample(math_functions,1)+random.sample(fun_fix_column_j,1)
+ math_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",str(num))
+
+ tdSql.query("select 1-1 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts , floor(asct1) from ( select "
+ sql += "%s as asct1, " % math_fun_1
+ sql += "%s as asct2, " % math_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select floor(asct1) from ( select "
+ sql += "%s as asct1 " % math_fun_1
+ # sql += "%s as asct2, " % math_fun_2
+ # sql += "%s, " % random.choice(self.s_s_select)
+ # sql += "%s, " % random.choice(self.q_select)
+ sql += " from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ #sql += "%s " % random.choice(self.order_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ #tdSql.checkRows(100)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-2 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts , abs(asct1) from ( select "
+ sql += "%s as asct1, " % math_fun_1
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s )" % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.unionall_or_union)
+ sql += "select ts , asct2 from ( select "
+ sql += "%s as asct2, " % math_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ #sql += "%s " % random.choice(having_support)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15437 tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select abs(asct1) from ( select "
+ sql += "%s as asct1 " % math_fun_1
+ # sql += "%s, " % random.choice(self.s_s_select)
+ # sql += "%s, " % random.choice(self.q_select)
+ sql += "from regular_table_1 where "
+ sql += "%s )" % random.choice(self.q_where)
+ #sql += "%s )" % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.unionall_or_union)
+ sql += "select floor(asct2) from ( select "
+ sql += "%s as asct2 " % math_fun_2
+ # sql += "%s, " % random.choice(self.s_s_select)
+ # sql += "%s, " % random.choice(self.q_select)
+ sql += " from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ #sql += "%s " % random.choice(having_support)
+ #sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15473 tdSql.query(sql)
+ #TD-15473 self.cur1.execute(sql)
+
+ tdSql.query("select 1-3 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts , min(asct1) from ( select "
+ sql += "%s as asct1, ts ," % math_fun_1
+ sql += "%s as asct2, " % math_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s select " % random.choice(self.unionall_or_union)
+ sql += "%s as asct2, ts ," % math_fun_2
+ sql += "%s as asct1, " % math_fun_1
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15473 tdSql.query(sql)
+ #self.cur1.execute(sql)
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select ts , min(asct1) from ( select "
+ sql += "%s as asct1, ts ," % math_fun_1
+ sql += "%s as asct2, " % math_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s select " % random.choice(self.unionall_or_union)
+ sql += "%s as asct2, ts ," % math_fun_2
+ sql += "%s as asct1, " % math_fun_1
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15473 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 1-4 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts , asct1 from ( select t1.ts as ts,"
+ sql += "%s, " % math_fun_join_1
+ sql += "%s as asct1, " % math_fun_join_2
+ sql += "%s, " % math_fun_join_1
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select count(asct1) from ( select "
+ sql += "%s as asct1 " % math_fun_join_2
+ sql += "from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s " % random.choice(self.q_u_or_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-5 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts ,"
+ sql += "%s, " % math_fun_1
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s " % math_fun_2
+ sql += " from ( select * from regular_table_1 ) where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += " ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select "
+ # sql += "%s, " % math_fun_1
+ # sql += "%s, " % random.choice(self.q_select)
+ # sql += "%s, " % random.choice(self.q_select)
+ sql += "%s " % math_fun_2
+ sql += " from ( select * from regular_table_1 ) where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += " ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15973 tdSql.query(sql)
+ #TD-15973 self.cur1.execute(sql)
+
+ tdSql.query("select 1-6 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts , max(asct1) from ( select t1.ts as ts,"
+ sql += "%s, " % math_fun_join_1
+ sql += "%s as asct1, " % math_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "%s, " % math_fun_join_1
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s )" % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select max(asct1) from ( select "
+ #sql += "%s, " % math_fun_join_1
+ sql += "%s as asct1 " % math_fun_join_2
+ # sql += "t1.%s, " % random.choice(self.q_select)
+ # sql += "t2.%s, " % random.choice(self.q_select)
+ # sql += "%s, " % math_fun_join_1
+ sql += "from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s )" % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-7 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts , abs(asct1) from ( select "
+ sql += "%s as asct1, ts ," % math_fun_1
+ sql += "%s as asct2, " % math_fun_2
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # tdSql.checkRows(300)
+ # self.cur1.execute(sql)# TD-16039
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select abs(asct1) from ( select "
+ sql += "%s as asct1 " % math_fun_1
+ sql += "from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql)# TD-16039
+
+ tdSql.query("select 1-8 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts,floor(asct1) "
+ sql += "from ( select "
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s as asct1, ts ," % math_fun_1
+ sql += "%s as asct2, " % math_fun_2
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #tdSql.query(sql)
+ # tdSql.checkRows(300)
+ # self.cur1.execute(sql)# TD-16039
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select floor(asct1) "
+ sql += "from ( select "
+ sql += "%s as asct1 " % math_fun_1
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql)# TD-16039
+
+ tdSql.query("select 1-9 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts , max(asct1) from ( select t1.ts as ts,"
+ sql += "%s, " % math_fun_join_1
+ sql += "%s as asct1, " % math_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "and %s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql)# TD-16039
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select max(asct1) from ( select "
+ sql += "%s as asct1 " % math_fun_join_2
+ # sql += "t1.%s, " % random.choice(self.q_select)
+ # sql += "t1.%s, " % random.choice(self.q_select)
+ # sql += "t2.%s, " % random.choice(self.q_select)
+ # sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "and %s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)# TD-16039
+ # self.cur1.execute(sql)
+
+ self.restartDnodes()
+ tdSql.query("select 1-10 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts , min(asct1) from ( select "
+ sql += "%s as asct1, ts ," % math_fun_1
+ sql += "%s as asct2, " % math_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") %s " % random.choice(self.unionall_or_union)
+ sql += "select ts , max(asct2) from ( select "
+ sql += "%s as asct1, ts ," % math_fun_1
+ sql += "%s as asct2, " % math_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15437 tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select min(asct1) from ( select "
+ sql += "%s as asct1 " % math_fun_1
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += ") %s " % random.choice(self.unionall_or_union)
+ sql += "select max(asct2) from ( select "
+ sql += "%s as asct2 " % math_fun_2
+ sql += "from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15437 tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+
+ #3 inter union not support
+ tdSql.query("select 1-11 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts , min(asct1), max(asct2) from ( select "
+ sql += "%s as asct1, ts ," % math_fun_1
+ sql += "%s as asct2, " % math_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ #sql += "%s " % random.choice(limit1_where)
+ sql += " %s " % random.choice(self.unionall_or_union)
+ sql += " select "
+ sql += "%s as asct1, ts ," % math_fun_1
+ sql += "%s as asct2, " % math_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15837 tdSql.query(sql)
+ # self.cur1.execute(sql)
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select min(asct1) from ( select "
+ sql += "%s as asct1 " % math_fun_1
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += " %s " % random.choice(self.unionall_or_union)
+ sql += " select "
+ sql += "%s as asct2 " % math_fun_2
+ sql += " from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15837 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 1-12 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts , max(asct1) from ( select t1.ts as ts,"
+ sql += "%s, " % math_fun_join_1
+ sql += "%s as asct1, " % math_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql)# TD-16039
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select max(asct1) from ( select "
+ sql += "%s as asct1 " % math_fun_join_2
+ sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql)# TD-16039
+
+ tdSql.query("select 1-13 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts ,"
+ sql += "%s, " % math_fun_1
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s " % math_fun_2
+ sql += "%s " % random.choice(self.t_select)
+ sql += " from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # tdSql.checkRows(300)
+ # self.cur1.execute(sql) # TD-16039
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM']) \
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select "
+ sql += "%s " % math_fun_2
+ sql += "%s " % random.choice(self.t_select)
+ sql += " from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD15973 tdSql.query(sql)
+ #TD15973 self.cur1.execute(sql)
+
+ tdSql.query("select 1-14 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select avg(asct1),count(asct2) from ( select "
+ sql += "%s as asct1, " % math_fun_1
+ sql += "%s as asct2" % math_fun_2
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.partiton_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += " ) ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE'])or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select avg(asct1) from ( select "
+ sql += "%s as asct1 " % math_fun_1
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.partiton_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += " ) ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+
+ tdSql.query("select 1-15 as math_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (mathlist == ['SIN','COS','TAN','ASIN','ACOS','ATAN']) or (mathlist == ['ABS','SQRT']) \
+ or (mathlist == ['POW','LOG']) or (mathlist == ['FLOOR','CEIL','ROUND']) :
+ sql = "select ts , max(asct1) from ( select t1.ts as ts,"
+ sql += "%s, " % math_fun_join_1
+ sql += "%s as asct1, " % math_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s " % random.choice(self.q_select)
+ sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += " and %s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.partiton_where_j)
+ sql += "%s " % random.choice(self.slimit1_where)
+ sql += ") "
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+ elif (mathlist == ['MAVG']) or (mathlist == ['SAMPLE']) or (mathlist == ['TAIL']) or (mathlist == ['CSUM'])\
+ or (mathlist == ['HYPERLOGLOG']) or (mathlist == ['UNIQUE']):
+ sql = "select max(asct1) from ( select "
+ sql += "%s as asct1 " % math_fun_join_2
+ sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += " and %s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.partiton_where_j)
+ sql += "%s " % random.choice(self.slimit1_where)
+ sql += ") "
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+
+ #taos -f sql
+ startTime_taos_f = time.time()
+ print("taos -f %s sql start!" %mathlist)
+ taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename)
+ _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8")
+ print("taos -f %s sql over!" %mathlist)
+ endTime_taos_f = time.time()
+ print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f))
+
+ print("=========%s====over=============" %mathlist)
+
+
+ def str_nest(self,strlist):
+
+ print("==========%s===start=============" %strlist)
+ os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
+
+ self.dropandcreateDB_random("%s" %self.db_nest, 1)
+
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['LENGTH','CHAR_LENGTH']) \
+ or (strlist == ['']):
+ str_functions = strlist
+ fun_fix_column = ['(q_nchar)','(q_binary)','(q_nchar_null)','(q_binary_null)']
+ fun_column_1 = random.sample(str_functions,1)+random.sample(fun_fix_column,1)
+ str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+ fun_column_2 = random.sample(str_functions,1)+random.sample(fun_fix_column,1)
+ str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)','(t1.q_nchar_null)','(t1.q_binary_null)',
+ '(t2.q_nchar)','(t2.q_binary)','(t2.q_nchar_null)','(t2.q_binary_null)']
+ fun_column_join_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1)
+ str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+ fun_column_join_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1)
+ str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ fun_fix_column_s = ['(q_nchar)','(q_binary)','(q_nchar_null)','(q_binary_null)','(loc)','(tbname)']
+ fun_column_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1)
+ str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+ fun_column_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1)
+ str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_binary)','(t1.q_nchar_null)','(t1.q_binary_null)','(t1.loc)','(t1.tbname)',
+ '(t2.q_nchar)','(t2.q_binary)','(t2.q_nchar_null)','(t2.q_binary_null)','(t2.loc)','(t2.tbname)']
+ fun_column_join_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1)
+ str_fun_join_s_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+ fun_column_join_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1)
+ str_fun_join_s_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ elif (strlist == ['SUBSTR']) :
+ str_functions = strlist
+ pos = random.randint(1, 20)
+ sub_len = random.randint(1, 10)
+ fun_fix_column = ['(q_nchar,pos)','(q_binary,pos)','(q_nchar_null,pos)','(q_binary_null,pos)',
+ '(q_nchar,pos,sub_len)','(q_binary,pos,sub_len)','(q_nchar_null,pos,sub_len)','(q_binary_null,pos,sub_len)',]
+ fun_column_1 = random.sample(str_functions,1)+random.sample(fun_fix_column,1)
+ str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len))
+ fun_column_2 = random.sample(str_functions,1)+random.sample(fun_fix_column,1)
+ str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len))
+
+ fun_fix_column_j = ['(t1.q_nchar,pos)','(t1.q_binary,pos)','(t1.q_nchar_null,pos)','(t1.q_binary_null,pos)',
+ '(t1.q_nchar,pos,sub_len)','(t1.q_binary,pos,sub_len)','(t1.q_nchar_null,pos,sub_len)','(t1.q_binary_null,pos,sub_len)',
+ '(t2.q_nchar,pos)','(t2.q_binary,pos)','(t2.q_nchar_null,pos)','(t2.q_binary_null,pos)',
+ '(t2.q_nchar,pos,sub_len)','(t2.q_binary,pos,sub_len)','(t2.q_nchar_null,pos,sub_len)','(t2.q_binary_null,pos,sub_len)']
+ fun_column_join_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1)
+ str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len))
+ fun_column_join_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_j,1)
+ str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len))
+
+ fun_fix_column_s = ['(q_nchar,pos)','(q_binary,pos)','(q_nchar_null,pos)','(q_binary_null,pos)','(loc,pos)',
+ '(q_nchar,pos,sub_len)','(q_binary,pos,sub_len)','(q_nchar_null,pos,sub_len)','(q_binary_null,pos,sub_len)','(loc,pos,sub_len)',]
+ fun_column_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1)
+ str_fun_s_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len))
+ fun_column_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s,1)
+ str_fun_s_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len))
+
+ fun_fix_column_s_j = ['(t1.q_nchar,pos)','(t1.q_binary,pos)','(t1.q_nchar_null,pos)','(t1.q_binary_null,pos)','(t1.loc,pos)',
+ '(t1.q_nchar,pos,sub_len)','(t1.q_binary,pos,sub_len)','(t1.q_nchar_null,pos,sub_len)','(t1.q_binary_null,pos,sub_len)','(t1.loc,pos,sub_len)',
+ '(t2.q_nchar,pos)','(t2.q_binary,pos)','(t2.q_nchar_null,pos)','(t2.q_binary_null,pos)','(t2.loc,pos)',
+ '(t2.q_nchar,pos,sub_len)','(t2.q_binary,pos,sub_len)','(t2.q_nchar_null,pos,sub_len)','(t2.q_binary_null,pos,sub_len)','(t2.loc,pos,sub_len)']
+ fun_column_join_s_1 = random.sample(str_functions,1)+random.sample(fun_fix_column_s_j,1)
+ str_fun_join_s_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len))
+ fun_column_join_s_2 = random.sample(str_functions,1)+random.sample(fun_fix_column_s_j,1)
+ str_fun_join_s_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("pos",str(pos)).replace("sub_len",str(sub_len))
+
+ elif (strlist == ['CONCAT']) :
+ str_functions = strlist
+ i = random.randint(2,8)
+ fun_fix_column = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','q_nchar_null',
+ 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null']
+
+ column1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_1 = str(random.sample(str_functions,1))+'('+column1+')'
+ str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","")
+
+ column2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_2 = str(random.sample(str_functions,1))+'('+column2+')'
+ str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","")
+
+ fun_fix_column_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)',
+ '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)',
+ '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)',
+ '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)']
+
+ column_j1 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_1 = str(random.sample(str_functions,1))+'('+column_j1+')'
+ str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","")
+
+ column_j2 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_2 = str(random.sample(str_functions,1))+'('+column_j2+')'
+ str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","")
+
+ fun_fix_column_s = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','loc','q_nchar_null',
+ 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null']
+
+ column_s1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_s_1 = str(random.sample(str_functions,1))+'('+column_s1+')'
+ str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","")
+
+ column_s2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_s_2 = str(random.sample(str_functions,1))+'('+column_s2+')'
+ str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","")
+
+ fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)','(t1.loc)',
+ '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)','(t2.loc)',
+ '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)',
+ '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)']
+
+ column_j_s1 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_s_1 = str(random.sample(str_functions,1))+'('+column_j_s1+')'
+ str_fun_join_s_1 = str(fun_column_join_s_1).replace("[","").replace("]","").replace("'","")
+
+ column_j_s2 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_s_2 = str(random.sample(str_functions,1))+'('+column_j_s2+')'
+ str_fun_join_s_2 = str(fun_column_join_s_2).replace("[","").replace("]","").replace("'","")
+
+ elif (strlist == ['CONCAT_WS']):
+ str_functions = strlist
+ i = random.randint(2,8)
+ fun_fix_column = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','q_nchar_null',
+ 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null']
+
+ separators = ['',' ','abc','123','!','@','#','$','%','^','&','*','(',')','-','_','+','=','{',
+ '[','}',']','|',';',':',',','.','<','>','?','/','~','`','taos','涛思']
+ separator = str(random.sample(separators,i)).replace("[","").replace("]","")
+
+ column1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column1+')'
+ str_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","")
+
+ column2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column2+')'
+ str_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","")
+
+ fun_fix_column_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)',
+ '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)',
+ '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)',
+ '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)']
+
+ column_j1 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j1+')'
+ str_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","")
+
+ column_j2 = str(random.sample(fun_fix_column_j,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j2+')'
+ str_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","")
+
+ fun_fix_column_s = ['q_nchar','q_nchar1','q_nchar2','q_nchar3','q_nchar4','q_nchar5','q_nchar6','q_nchar7','q_nchar8','loc','q_nchar_null',
+ 'q_binary','q_binary1','q_binary2','q_binary3','q_binary4','q_binary5','q_binary6','q_binary7','q_binary8','q_binary_null']
+
+ column_s1 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_s_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_s1+')'
+ str_fun_s_1 = str(fun_column_s_1).replace("[","").replace("]","").replace("'","")
+
+ column_s2 = str(random.sample(fun_fix_column,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_s_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_s2+')'
+ str_fun_s_2 = str(fun_column_s_2).replace("[","").replace("]","").replace("'","")
+
+ fun_fix_column_s_j = ['(t1.q_nchar)','(t1.q_nchar1)','(t1.q_nchar2)','(t1.q_nchar3)','(t1.q_nchar4)','(t1.q_nchar5)','(t1.q_nchar6)','(t1.q_nchar7)','(t1.q_nchar8)','(t1.q_nchar_null)','(t1.loc)',
+ '(t2.q_nchar)','(t2.q_nchar1)','(t2.q_nchar2)','(t2.q_nchar3)','(t2.q_nchar4)','(t2.q_nchar5)','(t2.q_nchar6)','(t2.q_nchar7)','(t2.q_nchar8)','(t2.q_nchar_null)','(t2.loc)',
+ '(t1.q_binary)','(t1.q_binary1)','(t1.q_binary2)','(t1.q_binary3)','(t1.q_binary4)','(t1.q_binary5)','(t1.q_binary6)','(t1.q_binary7)','(t1.q_binary8)','(t1.q_binary_null)',
+ '(t2.q_binary)','(t2.q_binary1)','(t2.q_binary2)','(t2.q_binary3)','(t2.q_binary4)','(t2.q_binary5)','(t2.q_binary6)','(t2.q_binary7)','(t2.q_binary8)','(t2.q_binary_null)']
+
+ column_j_s1 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_s_1 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j_s1+')'
+ str_fun_join_s_1 = str(fun_column_join_s_1).replace("[","").replace("]","").replace("'","")
+
+ column_j_s2 = str(random.sample(fun_fix_column_s_j,i)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_s_2 = str(random.sample(str_functions,1))+'('+'\"'+separator+'\",'+column_j_s2+')'
+ str_fun_join_s_2 = str(fun_column_join_s_2).replace("[","").replace("]","").replace("'","")
+
+
+ tdSql.query("select 1-1 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']) :
+ sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select "
+ sql += "%s as asct1, " % str_fun_1
+ sql += "%s as asct2, " % str_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select "
+ sql += "%s as asct1, " % str_fun_1
+ sql += "%s as asct2, " % str_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-2 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']) :
+ sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select "
+ sql += "%s as asct1, " % str_fun_1
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s )" % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.unionall_or_union)
+ sql += "select ts , asct2 from ( select "
+ sql += "%s as asct2, " % str_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ #sql += "%s " % random.choice(having_support)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15437 tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select "
+ sql += "%s as asct1, " % str_fun_1
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s )" % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.unionall_or_union)
+ sql += "select ts , asct2 from ( select "
+ sql += "%s as asct2, " % str_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ #sql += "%s " % random.choice(having_support)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15437 tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+
+ tdSql.query("select 1-3 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']):
+ sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select "
+ sql += "%s as asct1, ts ," % str_fun_1
+ sql += "%s as asct2, " % str_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s select " % random.choice(self.unionall_or_union)
+ sql += "%s as asct2, ts ," % str_fun_2
+ sql += "%s as asct1, " % str_fun_1
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15473 tdSql.query(sql)
+ #self.cur1.execute(sql)
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select "
+ sql += "%s as asct1, ts ," % str_fun_1
+ sql += "%s as asct2, " % str_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s select " % random.choice(self.unionall_or_union)
+ sql += "%s as asct2, ts ," % str_fun_2
+ sql += "%s as asct1, " % str_fun_1
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15473 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 1-4 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']):
+ sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts,"
+ sql += "%s as asct2, " % str_fun_join_1
+ sql += "%s as asct1, " % str_fun_join_2
+ sql += "%s, " % str_fun_join_1
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts,"
+ sql += "%s as asct2, " % str_fun_join_1
+ sql += "%s as asct1, " % str_fun_join_2
+ sql += "%s, " % str_fun_join_1
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-5 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']):
+ sql = "select ts ,"
+ sql += "%s, " % str_fun_1
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s " % str_fun_2
+ sql += " from ( select * from regular_table_1 ) where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += " ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select ts ,"
+ sql += "%s, " % str_fun_1
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s " % str_fun_2
+ sql += " from ( select * from regular_table_1 ) where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += " ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-6 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']):
+ sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts,"
+ sql += "%s as asct2, " % str_fun_join_1
+ sql += "%s as asct1, " % str_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "%s, " % str_fun_join_1
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s )" % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts,"
+ sql += "%s as asct2, " % str_fun_join_1
+ sql += "%s as asct1, " % str_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "%s, " % str_fun_join_1
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s )" % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-7 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']):
+ sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select "
+ sql += "%s as asct1, ts ," % str_fun_s_1
+ sql += "%s as asct2, " % str_fun_s_2
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # tdSql.checkRows(300)
+ # self.cur1.execute(sql)# TD-16039
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select "
+ sql += "%s as asct1, ts ," % str_fun_s_1
+ sql += "%s as asct2, " % str_fun_s_2
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql)# TD-16039
+
+ tdSql.query("select 1-8 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']):
+ sql = "select ts, LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) "
+ sql += "from ( select "
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s as asct1, ts ," % str_fun_s_1
+ sql += "%s as asct2, " % str_fun_s_2
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # tdSql.checkRows(300)
+ # self.cur1.execute(sql)# TD-16039
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) "
+ sql += "from ( select "
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s as asct1, ts ," % str_fun_s_1
+ sql += "%s as asct2, " % str_fun_s_2
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql)# TD-16039
+
+ tdSql.query("select 1-9 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']):
+ sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts,"
+ sql += "%s as asct2, " % str_fun_join_s_1
+ sql += "%s as asct1, " % str_fun_join_s_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "and %s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts,"
+ sql += "%s as asct2, " % str_fun_join_s_1
+ sql += "%s as asct1, " % str_fun_join_s_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "and %s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql)# TD-16039
+
+ self.restartDnodes()
+ tdSql.query("select 1-10 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']):
+ sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select "
+ sql += "%s as asct1, ts ," % str_fun_s_1
+ sql += "%s as asct2, " % str_fun_s_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") %s " % random.choice(self.unionall_or_union)
+ sql += "select ts , max(asct2) from ( select "
+ sql += "%s as asct1, ts ," % str_fun_1
+ sql += "%s as asct2, " % str_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15437 tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select "
+ sql += "%s as asct1, ts ," % str_fun_s_1
+ sql += "%s as asct2, " % str_fun_s_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") %s " % random.choice(self.unionall_or_union)
+ sql += "select ts , max(asct2) from ( select "
+ sql += "%s as asct1, ts ," % str_fun_1
+ sql += "%s as asct2, " % str_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15437 tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+
+ #3 inter union not support
+ tdSql.query("select 1-11 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']):
+ sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select "
+ sql += "%s as asct1, ts ," % str_fun_s_1
+ sql += "%s as asct2, " % str_fun_s_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ #sql += "%s " % random.choice(limit1_where)
+ sql += " %s " % random.choice(self.unionall_or_union)
+ sql += " select "
+ sql += "%s as asct1, ts ," % str_fun_1
+ sql += "%s as asct2, " % str_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15837 tdSql.query(sql)
+ # self.cur1.execute(sql)
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select "
+ sql += "%s as asct1, ts ," % str_fun_s_1
+ sql += "%s as asct2, " % str_fun_s_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ #sql += "%s " % random.choice(limit1_where)
+ sql += " %s " % random.choice(self.unionall_or_union)
+ sql += " select "
+ sql += "%s as asct1, ts ," % str_fun_1
+ sql += "%s as asct2, " % str_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15837 tdSql.query(sql)
+ # self.cur1.execute(sql)
+
+ tdSql.query("select 1-12 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']):
+ sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts,"
+ sql += "%s as asct2, " % str_fun_join_s_1
+ sql += "%s as asct1, " % str_fun_join_s_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql)# TD-16039
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts,"
+ sql += "%s as asct2, " % str_fun_join_s_1
+ sql += "%s as asct1, " % str_fun_join_s_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql)# TD-16039
+
+ tdSql.query("select 1-13 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']):
+ sql = "select ts ,"
+ sql += "%s, " % str_fun_1
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % str_fun_2
+ sql += "%s " % random.choice(self.t_select)
+ sql += " from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # tdSql.checkRows(300)
+ # self.cur1.execute(sql) # TD-16039
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select ts ,"
+ sql += "%s, " % str_fun_1
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % str_fun_2
+ sql += "%s " % random.choice(self.t_select)
+ sql += " from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # tdSql.checkRows(300)
+ # self.cur1.execute(sql)# TD-16039
+
+ tdSql.query("select 1-14 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']):
+ sql = "select LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select "
+ sql += "%s as asct1, " % str_fun_s_1
+ sql += "%s as asct2" % str_fun_s_2
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.partiton_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += " ) ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select "
+ sql += "%s as asct1, " % str_fun_s_1
+ sql += "%s as asct2" % str_fun_s_2
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.partiton_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += " ) ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+
+ tdSql.query("select 1-15 as str_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (strlist == ['LTRIM','RTRIM','LOWER','UPPER']) or (strlist == ['SUBSTR']) or (strlist == ['CONCAT']) or (strlist == ['CONCAT_WS']):
+ sql = "select ts , LTRIM(asct1), LOWER(asct1), RTRIM(asct2), UPPER(asct2) from ( select t1.ts as ts,"
+ sql += "%s as asct2, " % str_fun_join_s_1
+ sql += "%s as asct1, " % str_fun_join_s_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s " % random.choice(self.q_select)
+ sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += " and %s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.partiton_where_j)
+ sql += "%s " % random.choice(self.slimit1_where)
+ sql += ") "
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15955 tdSql.query(sql)
+ #TD-15955 self.cur1.execute(sql)
+ elif (strlist == ['LENGTH','CHAR_LENGTH']):
+ sql = "select sum(asct1), min(asct1), max(asct2), avg(asct2) from ( select t1.ts as ts,"
+ sql += "%s as asct2, " % str_fun_join_s_1
+ sql += "%s as asct1, " % str_fun_join_s_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s " % random.choice(self.q_select)
+ sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += " and %s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.partiton_where_j)
+ sql += "%s " % random.choice(self.slimit1_where)
+ sql += ") "
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15955 tdSql.query(sql)
+ #TD-15955 self.cur1.execute(sql)
+
+ #taos -f sql
+ startTime_taos_f = time.time()
+ print("taos -f %s sql start!" %strlist)
+ taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename)
+ _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8")
+ print("taos -f %s sql over!" %strlist)
+ endTime_taos_f = time.time()
+ print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f))
+
+ print("=========%s====over=============" %strlist)
+
+ def time_nest(self,timelist):
+
+ print("==========%s===start=============" %timelist)
+ os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
+
+ self.dropandcreateDB_random("%s" %self.db_nest, 1)
+
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMEZONE']):
+ time_functions = timelist
+ fun_fix_column = ['()']
+ fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1)
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+ fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1)
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ fun_fix_column_j = ['()']
+ fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1)
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+ fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1)
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ elif (timelist == ['TIMETRUNCATE']):
+ time_functions = timelist
+
+ t = time.time()
+ t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t))
+ fun_fix_column = ['q_ts','ts','_c0','_C0','_rowts','1600000000000','1600000000000000','1600000000000000000',
+ '%d' %t, '%d000' %t, '%d000000' %t,'t_to_s']
+
+ timeunits = ['1u' , '1a' ,'1s', '1m' ,'1h', '1d']
+ timeunit = str(random.sample(timeunits,1)).replace("[","").replace("]","").replace("'","")
+
+ column_1 = ['(%s,timeutil)'%(random.sample(fun_fix_column,1))]
+ fun_column_1 = random.sample(time_functions,1)+random.sample(column_1,1)
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'")
+ time_fun_1 = str(time_fun_1).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s)
+
+ column_2 = ['(%s,timeutil)'%(random.sample(fun_fix_column,1))]
+ fun_column_2 = random.sample(time_functions,1)+random.sample(column_2,1)
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'")
+ time_fun_2 = str(time_fun_2).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s)
+
+
+ fun_fix_column_j = ['(t1.q_ts)','(t1.ts)', '(t2.q_ts)','(t2.ts)','(1600000000000)','(1600000000000000)','(1600000000000000000)',
+ '(%d)' %t, '(%d000)' %t, '(%d000000)' %t,'t_to_s']
+
+ column_j1 = ['(%s,timeutil)'%(random.sample(fun_fix_column_j,1))]
+ fun_column_join_1 = random.sample(time_functions,1)+random.sample(column_j1,1)
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'")
+ time_fun_join_1 = str(time_fun_join_1).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s)
+
+ column_j2 = ['(%s,timeutil)'%(random.sample(fun_fix_column_j,1))]
+ fun_column_join_2 = random.sample(time_functions,1)+random.sample(column_j2,1)
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("\"","").replace("t_to_s","'t_to_s'")
+ time_fun_join_2 = str(time_fun_join_2).replace("timeutil","%s" %timeunit).replace("t_to_s","%s" %t_to_s)
+
+ elif (timelist == ['TO_ISO8601']):
+ time_functions = timelist
+
+ t = time.time()
+ fun_fix_column = ['(now())','(ts)','(q_ts)','(_rowts)','(_c0)','(_C0)',
+ '(1600000000000)','(1600000000000000)','(1600000000000000000)',
+ '(%d)' %t, '(%d000)' %t, '(%d000000)' %t]
+
+ fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1)
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1)
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ fun_fix_column_j = ['(t1.q_ts)','(t1.ts)', '(t2.q_ts)','(t2.ts)','(1600000000000)','(1600000000000000)','(1600000000000000000)','(now())',
+ '(%d)' %t, '(%d000)' %t, '(%d000000)' %t]
+
+ fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1)
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1)
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ elif (timelist == ['TO_UNIXTIMESTAMP']):
+ time_functions = timelist
+
+ t = time.time()
+ t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t))
+ fun_fix_column = ['(q_nchar)','(q_nchar1)','(q_nchar2)','(q_nchar3)','(q_nchar4)','(q_nchar_null)','(q_binary)','(q_binary5)','(q_binary6)','(q_binary7)','(q_binary8)','(q_binary_null)','(t_to_s)']
+
+ fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1)
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'")
+ time_fun_1 = str(time_fun_1).replace("t_to_s","%s" %t_to_s)
+
+ fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1)
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'")
+ time_fun_2 = str(time_fun_2).replace("t_to_s","%s" %t_to_s)
+
+ fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)', '(t2.q_nchar)','(t2.q_binary)','(t_to_s)']
+
+ fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1)
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'")
+ time_fun_join_1 = str(time_fun_join_1).replace("t_to_s","%s" %t_to_s)
+
+ fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1)
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'")
+ time_fun_join_2 = str(time_fun_join_2).replace("t_to_s","%s" %t_to_s)
+
+ elif (timelist == ['TIMEDIFF']):
+ time_functions = timelist
+
+ t = time.time()
+ t_to_s = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t))
+ fun_fix_column = ['(q_nchar)','(q_nchar1)','(q_nchar2)','(q_nchar3)','(q_nchar4)','(q_nchar_null)','(q_binary)','(q_binary5)','(q_binary6)','(q_binary7)','(q_binary8)','(q_binary_null)','(t_to_s)']
+
+ fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1)
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'")
+ time_fun_1 = str(time_fun_1).replace("t_to_s","%s" %t_to_s)
+
+ fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1)
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'")
+ time_fun_2 = str(time_fun_2).replace("t_to_s","%s" %t_to_s)
+
+ fun_fix_column_j = ['(t1.q_nchar)','(t1.q_binary)', '(t2.q_nchar)','(t2.q_binary)','(t_to_s)']
+
+ fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1)
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'")
+ time_fun_join_1 = str(time_fun_join_1).replace("t_to_s","%s" %t_to_s)
+
+ fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1)
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("t_to_s","'t_to_s'")
+ time_fun_join_2 = str(time_fun_join_2).replace("t_to_s","%s" %t_to_s)
+
+ elif (timelist == ['ELAPSED']):
+ time_functions = timelist
+
+ fun_fix_column = ['(ts)','(q_ts)','(_c0)','(_C0)','(_rowts)','(ts,time_unit)','(_c0,time_unit)','(_C0,time_unit)','(_rowts,time_unit)']
+
+ time_units = ['nums','numm','numh','numd','numa']
+ time_unit = str(random.sample(time_units,1)).replace("[","").replace("]","").replace("'","")
+ time_num1 = random.randint(0, 1000)
+ time_unit1 = time_unit.replace("num","%d" %time_num1)
+ time_num2 = random.randint(0, 1000)
+ time_unit2 = time_unit.replace("num","%d" %time_num2)
+
+ fun_column_1 = random.sample(time_functions,1)+random.sample(fun_fix_column,1)
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit1)
+
+ fun_column_2 = random.sample(time_functions,1)+random.sample(fun_fix_column,1)
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit2)
+
+
+ fun_fix_column_j = ['(t1.ts)','(t1.q_ts)', '(t2.ts)','(t2.q_ts)','(t1.ts,time_unit)','(t1.q_ts,time_unit)','(t2.ts,time_unit)','(t2.q_ts,time_unit)']
+
+ fun_column_join_1 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1)
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit1)
+
+ fun_column_join_2 = random.sample(time_functions,1)+random.sample(fun_fix_column_j,1)
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("time_unit","%s" %time_unit2)
+
+
+ elif (timelist == ['CAST']) :
+ str_functions = timelist
+ #下面的4个是全的,这个只是1个
+ i = random.randint(1,4)
+ if i ==1:
+ print('===========cast_1===========')
+ fun_fix_column = ['q_bool','q_bool_null','q_bigint','q_bigint_null','q_smallint','q_smallint_null',
+ 'q_tinyint','q_tinyint_null','q_int','q_int_null','q_float','q_float_null','q_double','q_double_null']
+ type_names = ['BIGINT','BINARY(100)','TIMESTAMP','NCHAR(100)','BIGINT UNSIGNED']
+
+ type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')'
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","")
+
+ type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')'
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","")
+
+ fun_fix_column_j = ['t1.q_bool','t1.q_bool_null','t1.q_bigint','t1.q_bigint_null','t1.q_smallint','t1.q_smallint_null',
+ 't1.q_tinyint','t1.q_tinyint_null','t1.q_int','t1.q_int_null','t1.q_float','t1.q_float_null','t1.q_double','t1.q_double_null',
+ 't2.q_bool','t2.q_bool_null','t2.q_bigint','t2.q_bigint_null','t2.q_smallint','t2.q_smallint_null',
+ 't2.q_tinyint','t2.q_tinyint_null','t2.q_int','t2.q_int_null','t2.q_float','t2.q_float_null','t2.q_double','t2.q_double_null']
+
+ type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')'
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","")
+
+ type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')'
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","")
+
+ elif i==2:
+ print('===========cast_2===========')
+ fun_fix_column = ['q_binary','q_binary_null','q_binary1','q_binary2','q_binary3','q_binary4']
+ type_names = ['BIGINT','BINARY(100)','NCHAR(100)','BIGINT UNSIGNED']
+
+ type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')'
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","")
+
+ type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')'
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","")
+
+ fun_fix_column_j = ['t1.q_binary','t1.q_binary_null','t1.q_binary1','t1.q_binary2','t1.q_binary3','t1.q_smallint_null','t1.q_binary4',
+ 't2.q_binary','t2.q_binary_null','t2.q_bigint','t2.q_binary1','t2.q_binary2','t2.q_binary3','t2.q_binary4']
+
+ type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')'
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","")
+
+ type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')'
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","")
+
+ elif i==3:
+ print('===========cast_3===========')
+ fun_fix_column = ['q_nchar','q_nchar_null','q_nchar5','q_nchar6','q_nchar7','q_nchar8']
+ type_names = ['BIGINT','NCHAR(100)','BIGINT UNSIGNED']
+
+ type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')'
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","")
+
+ type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')'
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","")
+
+ fun_fix_column_j = ['t1.q_nchar','t1.q_nchar_null','t1.q_nchar5','t1.q_nchar6','t1.q_nchar7','t1.q_nchar8',
+ 't2.q_nchar','t2.q_nchar_null','t2.q_nchar5','t2.q_nchar6','t2.q_nchar7','t2.q_nchar8']
+
+ type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')'
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","")
+
+ type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')'
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","")
+
+ elif i==4:
+ print('===========cast_4===========')
+ fun_fix_column = ['q_ts','q_ts_null','_C0','_c0','ts','_rowts']
+ type_names = ['BIGINT','TIMESTAMP','BIGINT UNSIGNED']
+
+ type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')'
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","")
+
+ type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')'
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","")
+
+ fun_fix_column_j = ['t1.q_ts','t1.q_ts_null','t1.ts','t2.q_ts','t2.q_ts_null','t2.ts']
+
+ type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')'
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","")
+
+ type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')'
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","")
+
+ elif (timelist == ['CAST_1']) :
+ str_functions = timelist
+
+ print('===========cast_1===========')
+ fun_fix_column = ['q_bool','q_bool_null','q_bigint','q_bigint_null','q_smallint','q_smallint_null',
+ 'q_tinyint','q_tinyint_null','q_int','q_int_null','q_float','q_float_null','q_double','q_double_null']
+ type_names = ['BIGINT','BINARY(100)','TIMESTAMP','NCHAR(100)','BIGINT UNSIGNED']
+
+ type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')'
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_1","")
+
+ type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')'
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_1","")
+
+ fun_fix_column_j = ['t1.q_bool','t1.q_bool_null','t1.q_bigint','t1.q_bigint_null','t1.q_smallint','t1.q_smallint_null',
+ 't1.q_tinyint','t1.q_tinyint_null','t1.q_int','t1.q_int_null','t1.q_float','t1.q_float_null','t1.q_double','t1.q_double_null',
+ 't2.q_bool','t2.q_bool_null','t2.q_bigint','t2.q_bigint_null','t2.q_smallint','t2.q_smallint_null',
+ 't2.q_tinyint','t2.q_tinyint_null','t2.q_int','t2.q_int_null','t2.q_float','t2.q_float_null','t2.q_double','t2.q_double_null']
+
+ type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')'
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_1","")
+
+ type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')'
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_1","")
+
+ elif (timelist == ['CAST_2']) :
+ str_functions = timelist
+ print('===========cast_2===========')
+ fun_fix_column = ['q_binary','q_binary_null','q_binary1','q_binary2','q_binary3','q_binary4']
+ type_names = ['BIGINT','BINARY(100)','NCHAR(100)','BIGINT UNSIGNED']
+
+ type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')'
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_2","")
+
+ type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')'
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_2","")
+
+ fun_fix_column_j = ['t1.q_binary','t1.q_binary_null','t1.q_binary1','t1.q_binary2','t1.q_binary3','t1.q_smallint_null','t1.q_binary4',
+ 't2.q_binary','t2.q_binary_null','t2.q_bigint','t2.q_binary1','t2.q_binary2','t2.q_binary3','t2.q_binary4']
+
+ type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')'
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_2","")
+
+ type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')'
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_2","")
+
+ elif (timelist == ['CAST_3']) :
+ str_functions = timelist
+ print('===========cast_3===========')
+ fun_fix_column = ['q_nchar','q_nchar_null','q_nchar5','q_nchar6','q_nchar7','q_nchar8']
+ type_names = ['BIGINT','NCHAR(100)','BIGINT UNSIGNED']
+
+ type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')'
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_3","")
+
+ type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')'
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_3","")
+
+ fun_fix_column_j = ['t1.q_nchar','t1.q_nchar_null','t1.q_nchar5','t1.q_nchar6','t1.q_nchar7','t1.q_nchar8',
+ 't2.q_nchar','t2.q_nchar_null','t2.q_nchar5','t2.q_nchar6','t2.q_nchar7','t2.q_nchar8']
+
+ type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')'
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_3","")
+
+ type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')'
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_3","")
+
+ elif (timelist == ['CAST_4']) :
+ str_functions = timelist
+ print('===========cast_4===========')
+ fun_fix_column = ['q_ts','q_ts_null','_C0','_c0','ts','_rowts']
+ type_names = ['BIGINT','TIMESTAMP','BIGINT UNSIGNED']
+
+ type_name1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name1+')'
+ time_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace("_4","")
+
+ type_name2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column,1))+' AS '+type_name2+')'
+ time_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace("_4","")
+
+ fun_fix_column_j = ['t1.q_ts','t1.q_ts_null','t1.ts','t2.q_ts','t2.q_ts_null','t2.ts']
+
+ type_name_j1 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_1 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j1+')'
+ time_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace("_4","")
+
+ type_name_j2 = str(random.sample(type_names,1)).replace("[","").replace("]","").replace("'","")
+ fun_column_join_2 = str(random.sample(str_functions,1))+'('+str(random.sample(fun_fix_column_j,1))+' AS '+type_name_j2+')'
+ time_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace("_4","")
+
+ tdSql.query("select 1-1 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(asct1,now) from ( select "
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+ elif (timelist == ['TIMEZONE']) \
+ or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts , asct1,now(),today(),timezone() from ( select "
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+ elif (timelist == ['ELAPSED']) :
+ sql = "select max(asct1),now(),today(),timezone() from ( select "
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s as asct2 " % time_fun_2
+ sql += "from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-2 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(asct1,now),now(),today(),timezone() from ( select "
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s )" % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.unionall_or_union)
+ sql += "select ts , timediff(asct2,now),now(),today(),timezone() from ( select "
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ #sql += "%s " % random.choice(having_support)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15437 tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+ elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts , (asct1),now(),today(),timezone() from ( select "
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s )" % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.unionall_or_union)
+ sql += "select ts , asct2,now(),today(),timezone() from ( select "
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15437 tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+ elif (timelist == ['ELAPSED']) :
+ sql = "select min(asct1),now(),today(),timezone() from ( select "
+ sql += "%s as asct1 " % time_fun_1
+ sql += " from regular_table_1 where "
+ sql += "%s )" % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.unionall_or_union)
+ sql += "select avg(asct2),now(),today(),timezone() from ( select "
+ sql += "%s as asct2 " % time_fun_2
+ sql += " from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-3 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(asct1,now) from ( select "
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s select " % random.choice(self.unionall_or_union)
+ sql += "%s as asct2, ts ," % time_fun_2
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15473 tdSql.query(sql)
+ #self.cur1.execute(sql)
+ elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts , (asct1),now(),today(),timezone() from ( select "
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s select " % random.choice(self.unionall_or_union)
+ sql += "%s as asct2, ts ," % time_fun_2
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15473 tdSql.query(sql)
+ #self.cur1.execute(sql)
+ elif (timelist == ['ELAPSED']) :
+ sql = "select abs(asct1),now(),today(),timezone() from ( select "
+ sql += "%s as asct1," % time_fun_1
+ sql += "%s as asct2 " % time_fun_2
+ sql += "from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s select " % random.choice(self.unionall_or_union)
+ sql += "%s as asct2," % time_fun_2
+ sql += "%s as asct1 " % time_fun_1
+ sql += "from regular_table_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-4 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts,"
+ sql += "%s, " % time_fun_join_1
+ sql += "%s as asct1, " % time_fun_join_2
+ sql += "%s, " % time_fun_join_1
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+ elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts , (asct1) from ( select t1.ts as ts,"
+ sql += "%s, " % time_fun_join_1
+ sql += "%s as asct1, " % time_fun_join_2
+ sql += "%s, " % time_fun_join_1
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+ elif (timelist == ['ELAPSED']) :
+ sql = "select floor(asct1) from ( select "
+ sql += "%s, " % time_fun_join_1
+ sql += "%s as asct1, " % time_fun_join_2
+ sql += "%s " % time_fun_join_1
+ sql += " from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s " % random.choice(self.q_u_or_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-5 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['ELAPSED']) :
+ sql = "select now(),today(),timezone(), "
+ sql += "%s, " % time_fun_1
+ sql += "%s " % time_fun_2
+ sql += " from ( select * from regular_table_1 ) where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += " ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+ else:
+ sql = "select ts ,now(),today(),timezone(), "
+ sql += "%s, " % time_fun_1
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s " % time_fun_2
+ sql += " from ( select * from regular_table_1 ) where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += " ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-6 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts,"
+ sql += "%s, " % time_fun_join_1
+ sql += "%s as asct1, " % time_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "%s, " % time_fun_join_1
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s )" % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+ elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts , (asct1) from ( select t1.ts as ts,"
+ sql += "%s, " % time_fun_join_1
+ sql += "%s as asct1, " % time_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "%s, " % time_fun_join_1
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s )" % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+ elif (timelist == ['ELAPSED']) :
+ sql = "select (asct1)*111 from ( select "
+ sql += "%s, " % time_fun_join_1
+ sql += "%s as asct1, " % time_fun_join_2
+ sql += "%s " % time_fun_join_1
+ sql += " from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s )" % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-7 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(asct1,now) from ( select "
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql) # TD-16039
+ # tdSql.checkRows(300)
+ # self.cur1.execute(sql)
+ elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts , (asct1),now(),today(),timezone() from ( select "
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql) # TD-16039
+ # tdSql.checkRows(300)
+ # self.cur1.execute(sql)
+ elif (timelist == ['ELAPSED']) :
+ sql = "select (asct1)/asct2 ,now(),today(),timezone() from ( select "
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s as asct2 " % time_fun_2
+ sql += "from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql) #同时出现core:TD-16095和TD-16042
+ # self.cur1.execute(sql)
+
+ tdSql.query("select 1-8 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(asct1,now) "
+ sql += "from ( select "
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql) # TD-16039
+ # tdSql.checkRows(300)
+ # self.cur1.execute(sql)
+ elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts , (asct1),now(),today(),timezone() "
+ sql += "from ( select "
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql) # TD-16039
+ # tdSql.checkRows(300)
+ # self.cur1.execute(sql)
+ elif (timelist == ['ELAPSED']) :
+ sql = "select floor(abs(asct1)),now(),today(),timezone() "
+ sql += "from ( select "
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql) # TD-16039
+ # self.cur1.execute(sql)
+
+ tdSql.query("select 1-9 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts,"
+ sql += "%s, " % time_fun_join_1
+ sql += "%s as asct1, " % time_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "and %s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql) TD-16039
+ # self.cur1.execute(sql) TD-16039
+ elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts , asct1 from ( select t1.ts as ts,"
+ sql += "%s, " % time_fun_join_1
+ sql += "%s as asct1, " % time_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "and %s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+ elif (timelist == ['ELAPSED']) :
+ sql = "select min(asct1*110) from ( select "
+ sql += "%s, " % time_fun_join_1
+ sql += "%s as asct1 " % time_fun_join_2
+ sql += "from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "and %s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+
+ self.restartDnodes()
+ tdSql.query("select 1-10 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(asct1,now) from ( select "
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") %s " % random.choice(self.unionall_or_union)
+ sql += "select ts , max(asct2) from ( select "
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15437 tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+ elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts , (asct1),now(),today(),timezone() from ( select "
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") %s " % random.choice(self.unionall_or_union)
+ sql += "select ts , max(asct2),now(),today(),timezone() from ( select "
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15437 tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+ elif (timelist == ['ELAPSED']) :
+ sql = "select abs(asct1),now(),today(),timezone() from ( select "
+ sql += "%s as asct1 ," % time_fun_1
+ sql += "%s as asct2 " % time_fun_2
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += ") %s " % random.choice(self.unionall_or_union)
+ sql += "select max(asct2),now(),today(),timezone() from ( select "
+ sql += "%s as asct1 ," % time_fun_1
+ sql += "%s as asct2 " % time_fun_2
+ sql += "from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+
+ #3 inter union not support
+ tdSql.query("select 1-11 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(asct1,now), timediff(now,asct2) from ( select "
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += " %s " % random.choice(self.unionall_or_union)
+ sql += " select "
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)#TD-15473
+ # self.cur1.execute(sql)#TD-15473
+ elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts , (asct1,now()),(now(),asct2) from ( select "
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += " %s " % random.choice(self.unionall_or_union)
+ sql += " select "
+ sql += "%s as asct1, ts ," % time_fun_1
+ sql += "%s as asct2, " % time_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)#TD-15473
+ # self.cur1.execute(sql)#TD-15473
+ elif (timelist == ['ELAPSED']) :
+ sql = "select asct1+asct2,now(),today(),timezone() from ( select "
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s as asct2 " % time_fun_2
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += " %s " % random.choice(self.unionall_or_union)
+ sql += " select "
+ sql += "%s as asct1 ," % time_fun_1
+ sql += "%s as asct2 " % time_fun_2
+ sql += " from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)#TD-15473
+ self.cur1.execute(sql)#TD-15473
+
+ tdSql.query("select 1-12 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(asct1,now) from ( select t1.ts as ts,"
+ sql += "%s, " % time_fun_join_1
+ sql += "%s as asct1, " % time_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql)# TD-16039
+ elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts , asct1,now() from ( select t1.ts as ts,"
+ sql += "%s, " % time_fun_join_1
+ sql += "%s as asct1, " % time_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql)# TD-16039
+ elif (timelist == ['ELAPSED']) :
+ sql = "select min(floor(asct1)),now() from ( select "
+ sql += "%s, " % time_fun_join_1
+ sql += "%s as asct1 " % time_fun_join_2
+ sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql)# TD-16039
+
+ tdSql.query("select 1-13 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(%s,now)," % time_fun_2
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s " % time_fun_2
+ sql += "%s " % random.choice(self.t_select)
+ sql += " from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # tdSql.checkRows(300)
+ # self.cur1.execute(sql) # TD-16039
+ elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts ,now(),today(),timezone(), "
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s " % time_fun_2
+ sql += "%s " % random.choice(self.t_select)
+ sql += " from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # tdSql.checkRows(300)
+ # self.cur1.execute(sql) # TD-16039
+ elif (timelist == ['ELAPSED']) :
+ sql = "select now(),today(),timezone(), "
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s " % time_fun_2
+ sql += " from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+
+ tdSql.query("select 1-14 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(asct1,now),timediff(now,asct2) from ( select ts ts ,"
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s as asct2" % time_fun_2
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.partiton_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += " ) ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+ elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts , (asct1),now(),(now()),asct2 from ( select ts ts ,"
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s as asct2" % time_fun_2
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.partiton_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += " ) ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+ elif (timelist == ['ELAPSED']) :
+ sql = "select ts , (asct1)*asct2,now(),(now()) from ( select "
+ sql += "%s as asct1, " % time_fun_1
+ sql += "%s as asct2" % time_fun_2
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.partiton_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += " ) ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+
+ tdSql.query("select 1-15 as time_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ if (timelist == ['NOW','TODAY']) or (timelist == ['TIMETRUNCATE']) or (timelist == ['TO_ISO8601'])\
+ or (timelist == ['TO_UNIXTIMESTAMP']):
+ sql = "select ts , timediff(asct1,now),timediff(now,asct2) from ( select t1.ts as ts,"
+ sql += "%s as asct2, " % time_fun_join_1
+ sql += "%s as asct1, " % time_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s " % random.choice(self.q_select)
+ sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += " and %s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.partiton_where_j)
+ sql += "%s " % random.choice(self.slimit1_where)
+ sql += ") "
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+ elif (timelist == ['TIMEZONE']) or (timelist == ['CAST']) or (timelist == ['CAST_1']) or (timelist == ['CAST_2']) or (timelist == ['CAST_3']) or (timelist == ['CAST_4']):
+ sql = "select ts , asct1,(now()),(now()),asct2 ,now(),today(),timezone() from ( select t1.ts as ts,"
+ sql += "%s as asct2, " % time_fun_join_1
+ sql += "%s as asct1, " % time_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s " % random.choice(self.q_select)
+ sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += " and %s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.partiton_where_j)
+ sql += "%s " % random.choice(self.slimit1_where)
+ sql += ") "
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ # self.cur1.execute(sql) # TD-16039
+ elif (timelist == ['ELAPSED']) :
+ sql = "select asct1,(now()),(now()),asct2 ,now(),today(),timezone() from ( select "
+ sql += "%s as asct2, " % time_fun_join_1
+ sql += "%s as asct1 " % time_fun_join_2
+ sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += " and %s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.partiton_where_j)
+ sql += "%s " % random.choice(self.slimit1_where)
+ sql += ") "
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql) # TD-16039
+
+ #taos -f sql
+ startTime_taos_f = time.time()
+ print("taos -f %s sql start!" %timelist)
+ taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename)
+ _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8")
+ print("taos -f %s sql over!" %timelist)
+ endTime_taos_f = time.time()
+ print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f))
+
+ print("=========%s====over=============" %timelist)
+
+ def base_nest(self,baselist):
+
+ print("==========%s===start=============" %baselist)
+ os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
+
+ self.dropandcreateDB_random("%s" %self.db_nest, 1)
+
+ if (baselist == ['A']) or (baselist == ['S']) or (baselist == ['F']) \
+ or (baselist == ['C']):
+ base_functions = baselist
+ fun_fix_column = ['(q_bigint)','(q_smallint)','(q_tinyint)','(q_int)','(q_float)','(q_double)','(q_bigint_null)','(q_smallint_null)','(q_tinyint_null)','(q_int_null)','(q_float_null)','(q_double_null)']
+ fun_column_1 = random.sample(base_functions,1)+random.sample(fun_fix_column,1)
+ base_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+ fun_column_2 = random.sample(base_functions,1)+random.sample(fun_fix_column,1)
+ base_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ fun_fix_column_j = ['(t1.q_bigint)','(t1.q_smallint)','(t1.q_tinyint)','(t1.q_int)','(t1.q_float)','(t1.q_double)','(t1.q_bigint_null)','(t1.q_smallint_null)','(t1.q_tinyint_null)','(t1.q_int_null)','(t1.q_float_null)','(t1.q_double_null)',
+ '(t2.q_bigint)','(t2.q_smallint)','(t2.q_tinyint)','(t2.q_int)','(t2.q_float)','(t2.q_double)','(t2.q_bigint_null)','(t2.q_smallint_null)','(t2.q_tinyint_null)','(t2.q_int_null)','(t2.q_float_null)','(t2.q_double_null)']
+ fun_column_join_1 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1)
+ base_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","")
+ fun_column_join_2 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1)
+ base_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","")
+
+ elif (baselist == ['P']) or (baselist == ['M']) or (baselist == ['S'])or (baselist == ['T']):
+ base_functions = baselist
+ num = random.randint(0, 1000)
+ fun_fix_column = ['(q_bigint,num)','(q_smallint,num)','(q_tinyint,num)','(q_int,num)','(q_float,num)','(q_double,num)',
+ '(q_bigint_null,num)','(q_smallint_null,num)','(q_tinyint_null,num)','(q_int_null,num)','(q_float_null,num)','(q_double_null,num)']
+ fun_column_1 = random.sample(base_functions,1)+random.sample(fun_fix_column,1)
+ base_fun_1 = str(fun_column_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num))
+ fun_column_2 = random.sample(base_functions,1)+random.sample(fun_fix_column,1)
+ base_fun_2 = str(fun_column_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num))
+
+ fun_fix_column_j = ['(t1.q_bigint,num)','(t1.q_smallint,num)','(t1.q_tinyint,num)','(t1.q_int,num)','(t1.q_float,num)','(t1.q_double,num)',
+ '(t1.q_bigint_null,num)','(t1.q_smallint_null,num)','(t1.q_tinyint_null,num)','(t1.q_int_null,num)','(t1.q_float_null,num)','(t1.q_double_null,num)',
+ '(t2.q_bigint,num)','(t2.q_smallint,num)','(t2.q_tinyint,num)','(t2.q_int,num)','(t2.q_float,num)','(t2.q_double,num)',
+ '(t2.q_bigint_null,num)','(t2.q_smallint_null,num)','(t2.q_tinyint_null,num)','(t2.q_int_null,num)','(t2.q_float_null,num)','(t2.q_double_null,num)']
+ fun_column_join_1 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1)
+ base_fun_join_1 = str(fun_column_join_1).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num))
+ fun_column_join_2 = random.sample(base_functions,1)+random.sample(fun_fix_column_j,1)
+ base_fun_join_2 = str(fun_column_join_2).replace("[","").replace("]","").replace("'","").replace(", ","").replace("num",base(num))
+
+ tdSql.query("select 1-1 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts , floor(asct1) from ( select "
+ sql += "%s as asct1, " % base_fun_1
+ sql += "%s as asct2, " % base_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-2 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts , abs(asct1) from ( select "
+ sql += "%s as asct1, " % base_fun_1
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s )" % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.unionall_or_union)
+ sql += "select ts , asct2 from ( select "
+ sql += "%s as asct2, " % base_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ #sql += "%s " % random.choice(having_support)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15437 tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+
+ tdSql.query("select 1-3 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts , min(asct1) from ( select "
+ sql += "%s as asct1, ts ," % base_fun_1
+ sql += "%s as asct2, " % base_fun_2
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s select " % random.choice(self.unionall_or_union)
+ sql += "%s as asct2, ts ," % base_fun_2
+ sql += "%s as asct1, " % base_fun_1
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15473 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 1-4 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts , asct1 from ( select t1.ts as ts,"
+ sql += "%s, " % base_fun_join_1
+ sql += "%s as asct1, " % base_fun_join_2
+ sql += "%s, " % base_fun_join_1
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-5 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts ,"
+ sql += "%s, " % base_fun_1
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s " % base_fun_2
+ sql += " from ( select * from regular_table_1 ) where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += " ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-6 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts , max(asct1) from ( select t1.ts as ts,"
+ sql += "%s, " % base_fun_join_1
+ sql += "%s as asct1, " % base_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "%s, " % base_fun_join_1
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s )" % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-7 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts , abs(asct1) from ( select "
+ sql += "%s as asct1, ts ," % base_fun_1
+ sql += "%s as asct2, " % base_fun_2
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(300)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-8 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts,floor(asct1) "
+ sql += "from ( select "
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s as asct1, ts ," % base_fun_1
+ sql += "%s as asct2, " % base_fun_2
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(300)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-9 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts , max(asct1) from ( select t1.ts as ts,"
+ sql += "%s, " % base_fun_join_1
+ sql += "%s as asct1, " % base_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "and %s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ self.restartDnodes()
+ tdSql.query("select 1-10 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts , min(asct1) from ( select "
+ sql += "%s as asct1, ts ," % base_fun_1
+ sql += "%s as asct2, " % base_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") %s " % random.choice(self.unionall_or_union)
+ sql += "select ts , max(asct2) from ( select "
+ sql += "%s as asct1, ts ," % base_fun_1
+ sql += "%s as asct2, " % base_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15437 tdSql.query(sql)
+ #TD-15437 self.cur1.execute(sql)
+
+ #3 inter union not support
+ tdSql.query("select 1-11 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts , min(asct1), max(asct2) from ( select "
+ sql += "%s as asct1, ts ," % base_fun_1
+ sql += "%s as asct2, " % base_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ #sql += "%s " % random.choice(limit1_where)
+ sql += " %s " % random.choice(self.unionall_or_union)
+ sql += " select "
+ sql += "%s as asct1, ts ," % base_fun_1
+ sql += "%s as asct2, " % base_fun_2
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15837 tdSql.query(sql)
+ # self.cur1.execute(sql)
+
+ tdSql.query("select 1-12 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts , max(asct1) from ( select t1.ts as ts,"
+ sql += "%s, " % base_fun_join_1
+ sql += "%s as asct1, " % base_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_u_where)
+ sql += "and %s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-13 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts ,"
+ sql += "%s, " % base_fun_1
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s " % base_fun_2
+ sql += "%s " % random.choice(self.t_select)
+ sql += " from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(300)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-14 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select avg(asct1),count(asct2) from ( select "
+ sql += "%s as asct1, " % base_fun_1
+ sql += "%s as asct2" % base_fun_2
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.partiton_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += " ) ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 1-15 as base_nest from stable_1 limit 1;")
+ for i in range(self.fornum):
+ sql = "select ts , max(asct1) from ( select t1.ts as ts,"
+ sql += "%s, " % base_fun_join_1
+ sql += "%s as asct1, " % base_fun_join_2
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s " % random.choice(self.q_select)
+ sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += " and %s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.partiton_where_j)
+ sql += "%s " % random.choice(self.slimit1_where)
+ sql += ") "
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ #taos -f sql
+ startTime_taos_f = time.time()
+ print("taos -f %s sql start!" %baselist)
+ taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename)
+ _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8")
+ print("taos -f %s sql over!" %baselist)
+ endTime_taos_f = time.time()
+ print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f))
+
+ print("=========%s====over=============" %baselist)
+
+ def function_before_26(self):
+
+ print('=====================2.6 old function start ===========')
+ os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
+
+ self.dropandcreateDB_random("%s" %self.db_nest, 1)
+
+ #1 select * from (select column form regular_table where <\>\in\and\or order by)
+ tdSql.query("select 1-1 from stable_1;")
+ for i in range(self.fornum):
+ #sql = "select ts , * from ( select " ===暂时不支持select * ,用下面这一行
+ sql = "select ts from ( select "
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+
+ #1 outer union not support
+ #self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 1-2 from stable_1;")
+ for i in range(self.fornum):
+ #sql = "select ts , * from ( select "
+ sql = "select ts from ( select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") union "
+ #sql += "select ts , * from ( select "
+ sql += "select ts from ( select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+
+ #self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 1-2 from stable_1;")
+ for i in range(self.fornum):
+ #sql = "select ts , * from ( select "
+ sql = "select ts from ( select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") union all "
+ #sql += "select ts , * from ( select "
+ sql += "select ts from ( select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(200)
+ self.cur1.execute(sql)
+
+ #1 inter union not support
+ tdSql.query("select 1-3 from stable_1;")
+ for i in range(self.fornum):
+ #sql = "select ts , * from ( select "
+ sql = "select ts from ( select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += ""
+ sql += " union select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15607 tdSql.query(sql)
+ #tdSql.checkRows(200)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 1-3 from stable_1;")
+ for i in range(self.fornum):
+ #sql = "select ts , * from ( select "
+ sql = "select ts from ( select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += " union all select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15607 tdSql.query(sql)
+ # tdSql.checkRows(300)
+ #self.cur1.execute(sql)
+
+ #join:select * from (select column form regular_table1,regular_table2 where t1.ts=t2.ts and <\>\in\and\or order by)
+ #self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 1-4 from stable_1;")
+ for i in range(self.fornum):
+ #sql = "select ts , * from ( select t1.ts ,"
+ sql = "select * from ( select t1.ts ,"
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "and %s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+
+
+ #2 select column from (select * form regular_table ) where <\>\in\and\or order by
+ #self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 2-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select ts ,"
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s " % random.choice(self.q_select)
+ sql += " from ( select * from regular_table_1 ) where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += " ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(100)
+ self.cur1.execute(sql)
+
+ #join: select column from (select column form regular_table1,regular_table2 )where t1.ts=t2.ts and <\>\in\and\or order by
+ #cross join not supported yet
+ tdSql.query("select 2-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select ts , * from ( select t1.ts ,"
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from regular_table_1 t1 , regular_table_2 t2 ) where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ #sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.error(sql)
+
+ #3 select * from (select column\tag form stable where <\>\in\and\or order by )
+ #self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 3-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(300)
+ self.cur1.execute(sql)
+ tdSql.query("select 3-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select ts, "
+ sql += "%s " % random.choice(self.s_r_select)
+ sql += "from ( select "
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.t_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(300)
+ self.cur1.execute(sql)
+
+ # select ts,* from (select column\tag form stable1,stable2 where t1.ts = t2.ts and <\>\in\and\or order by )
+ #self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 3-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select ts , * from ( select t1.ts , "
+ sql += "t1.%s, " % random.choice(self.s_s_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.s_s_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # TD-15609 tdSql.query(sql)
+ # tdSql.checkRows(100)
+ #self.cur1.execute(sql)
+
+ #3 outer union not support
+ self.restartDnodes()
+ tdSql.query("select 3-3 from stable_1;")
+ for i in range(self.fornum):
+ #sql = "select ts , * from ( select "
+ sql = "select ts from ( select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") union "
+ sql += "select ts from ( select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(300)
+ self.cur1.execute(sql)
+ for i in range(self.fornum):
+ #sql = "select ts , * from ( select "
+ sql = "select ts from ( select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") union all "
+ sql += "select ts from ( select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(600)
+ self.cur1.execute(sql)
+
+ #3 inter union not support
+ tdSql.query("select 3-4 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select ts , * from ( select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += " %s " % random.choice(self.unionall_or_union)
+ sql += " select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from stable_2 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ")"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15837 tdSql.query(sql)
+ # self.cur1.execute(sql)
+
+ #join:select * from (select column form stable1,stable2 where t1.ts=t2.ts and <\>\in\and\or order by)
+ tdSql.query("select 3-5 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select t1.ts ,"
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_u_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # TD-15609 tdSql.query(sql)
+ # tdSql.checkRows(100)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 3-6 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select t1.ts ,"
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t1.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.%s, " % random.choice(self.q_select)
+ sql += "t2.ts from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += ");"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # TD-15609 同上 tdSql.query(sql)
+ # tdSql.checkRows(100)
+ #self.cur1.execute(sql)
+
+ #4 select column from (select * form stable where <\>\in\and\or order by )
+ #self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 4-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select ts , "
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "%s " % random.choice(self.t_select)
+ sql += " from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(300)
+ self.cur1.execute(sql)
+
+ #5 select distinct column\tag from (select * form stable where <\>\in\and\or order by limit offset )
+ tdSql.query("select 5-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s " % random.choice(self.dqt_select)
+ sql += " from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15500 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ #5-1 select distinct column\tag from (select calc form stable where <\>\in\and\or order by limit offset )
+ tdSql.query("select 5-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select distinct c5_1 "
+ sql += " from ( select "
+ sql += "%s " % random.choice(self.calc_select_in_ts)
+ sql += " as c5_1 from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ #sql += "%s " % random.choice(order_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ #tdSql.checkRows(1)有的函数还没有提交,会不返回结果,先忽略
+ self.cur1.execute(sql)
+
+ #6-error select * from (select distinct(tag) form stable where <\>\in\and\or order by limit )
+ tdSql.query("select 6-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s " % random.choice(self.dt_select)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.error(sql)
+ tdSql.query("select 6-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s " % random.choice(self.dt_select)
+ sql += " from stable_1 where "
+ sql += "%s ) ;" % random.choice(self.qt_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ #7-error select * from (select distinct(tag) form stable where <\>\in\and\or order by limit )
+ tdSql.query("select 7-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s " % random.choice(self.dq_select)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice([self.limit_where[0] , self.limit_where[1]] )
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.error(sql) #distinct 和 order by 不能混合使用
+ tdSql.query("select 7-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s " % random.choice(self.dq_select)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ #sql += "%s " % random.choice(order_desc_where)
+ sql += "%s " % random.choice([self.limit_where[0] , self.limit_where[1]] )
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ self.cur1.execute(sql)
+
+ #calc_select,TWA/Diff/Derivative/Irate are not allowed to apply to super table directly
+ #8 select * from (select ts,calc form ragular_table where <\>\in\and\or order by )
+
+ # dcDB = self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 8-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select ts ,"
+ sql += "%s " % random.choice(self.calc_select_support_ts)
+ sql += "from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql) # 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function
+ self.cur1.execute(sql)
+ tdSql.query("select 8-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s " % random.choice(self.calc_select_not_support_ts)
+ sql += "from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15651 tdSql.query(sql) # 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function
+ #self.cur1.execute(sql)
+
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s " % random.choice(self.calc_select_in_ts)
+ sql += "from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ #sql += "%s " % random.choice(order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 8-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select t1.ts, "
+ sql += "%s " % random.choice(self.calc_select_in_support_ts_j)
+ sql += "from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)# 聚合函数不在可以和ts一起使用了 DB error: Not a single-group group function
+ self.cur1.execute(sql)
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s " % random.choice(self.calc_select_in_not_support_ts_j)
+ sql += "from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15651 tdSql.query(sql)
+ ##top返回结果有问题 tdSql.checkRows(1)
+ #self.cur1.execute(sql)
+
+ #9 select * from (select ts,calc form stable where <\>\in\and\or order by )
+ # self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 9-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s " % random.choice(self.calc_select_not_support_ts)
+ sql += "from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15651 tdSql.query(sql)
+ # self.cur1.execute(sql)
+ tdSql.query("select 9-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select ts ,"
+ sql += "%s " % random.choice(self.calc_select_support_ts)
+ sql += "from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 9-3 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s " % random.choice(self.calc_select_in_not_support_ts_j)
+ sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += " and %s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15651 tdSql.query(sql)
+ #self.cur1.execute(sql)
+ tdSql.query("select 9-4 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select t1.ts,"
+ sql += "%s " % random.choice(self.calc_select_in_support_ts_j)
+ sql += "from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += " and %s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ #10 select calc from (select * form regualr_table where <\>\in\and\or order by )
+ tdSql.query("select 10-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s " % random.choice(self.calc_select_in_ts)
+ sql += "as calc10_1 from ( select * from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ self.cur1.execute(sql)
+
+ #10-1 select calc from (select * form regualr_table where <\>\in\and\or order by )
+ # rsDn = self.restartDnodes()
+ # self.dropandcreateDB_random("%s" %db, 1)
+ # rsDn = self.restartDnodes()
+ tdSql.query("select 10-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s " % random.choice(self.calc_select_all)
+ sql += "as calc10_2 from ( select * from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15651 tdSql.query(sql)
+ # tdSql.checkRows(1)
+ #self.cur1.execute(sql)
+
+ #10-2 select calc from (select * form regualr_tables where <\>\in\and\or order by )
+ tdSql.query("select 10-3 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s as calc10_3 " % random.choice(self.calc_select_all)
+ sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += " and %s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ sql += "%s ;" % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15651 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 10-4 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s as calc10_4 " % random.choice(self.calc_select_all)
+ sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_or_where)
+ sql += " and %s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15651 tdSql.query(sql)
+ # tdSql.checkRows(1)
+ #self.cur1.execute(sql)
+
+ #11 select calc from (select * form stable where <\>\in\and\or order by limit )
+ tdSql.query("select 11-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s " % random.choice(self.calc_select_in_ts)
+ sql += "as calc11_1 from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ tdSql.checkRows(1)
+ self.cur1.execute(sql)
+
+ #11-1 select calc from (select * form stable where <\>\in\and\or order by limit )
+ tdSql.query("select 11-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s " % random.choice(self.calc_select_all)
+ sql += "as calc11_1 from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15651 tdSql.query(sql)
+ #self.cur1.execute(sql)
+ #不好计算结果 tdSql.checkRows(1)
+
+ #11-2 select calc from (select * form stables where <\>\in\and\or order by limit )
+ tdSql.query("select 11-3 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s " % random.choice(self.calc_select_all)
+ sql += "as calc11_1 from ( select * from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15651 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 11-4 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s " % random.choice(self.calc_select_all)
+ sql += "as calc11_1 from ( select * from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdLog.info(len(sql))
+ #TD-15651 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ #12 select calc-diff from (select * form regualr_table where <\>\in\and\or order by limit )
+ ##self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 12-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s " % random.choice(self.calc_calculate_regular)
+ sql += " from ( select * from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ ##目前derivative不支持 tdSql.query(sql)
+ # tdSql.checkRows(1)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 12-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s " % random.choice(self.calc_calculate_regular)
+ sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #目前derivative不支持 tdSql.query(sql)
+ # tdSql.checkRows(1)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 12-2.2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s " % random.choice(self.calc_calculate_regular)
+ sql += " from ( select * from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #目前derivative不支持 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ #12-1 select calc-diff from (select * form stable where <\>\in\and\or order by limit )
+ tdSql.query("select 12-3 from stable_1;")
+ self.restartDnodes()
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s " % random.choice(self.calc_calculate_regular)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.group_where)
+ sql += ") "
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += " ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #目前derivative不支持 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 12-4 from stable_1;")
+ #join query does not support group by
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s " % random.choice(self.calc_calculate_regular_j)
+ sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "%s " % random.choice(self.group_where_j)
+ sql += ") "
+ #sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += " ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #tdSql.query(sql) 目前de函数不支持,另外看看需要不需要将group by和pari by分开
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 12-5 from stable_1;")
+ #join query does not support group by
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s " % random.choice(self.calc_calculate_regular_j)
+ sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.group_where_j)
+ sql += ") "
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += " ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #derivative not support tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+
+ #13 select calc-diff as diffns from (select * form stable where <\>\in\and\or order by limit )
+ tdSql.query("select 13-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s " % random.choice(self.calc_calculate_regular)
+ sql += " as calc13_1 from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.orders_desc_where)
+ sql += "%s " % random.choice([self.limit_where[2] , self.limit_where[3]] )
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #derivative not support tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ #14 select * from (select calc_aggregate_alls as agg from stable where <\>\in\and\or group by order by slimit soffset )
+ # TD-5955 select * from ( select count (q_double) from stable_1 where t_bool = true or t_bool = false group by loc order by ts asc slimit 1 ) ;
+ tdSql.query("select 14-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all)
+ sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all)
+ sql += "%s " % random.choice(self.calc_aggregate_all)
+ sql += " as calc14_3 from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.group_where)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice(self.slimit1_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15678 tdSql.query(sql)
+ # tdSql.checkRows(1)
+ #self.cur1.execute(sql)
+
+ # error group by in out query
+ tdSql.query("select 14-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all)
+ sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all)
+ sql += "%s " % random.choice(self.calc_aggregate_all)
+ sql += " as calc14_3 from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.group_where)
+ sql += "%s " % random.choice(self.having_support)
+ sql += "%s " % random.choice(self.orders_desc_where)
+ sql += "%s " % random.choice(self.slimit1_where)
+ sql += ") "
+ sql += "%s " % random.choice(self.group_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15678 tdSql.query(sql)
+ # tdSql.checkRows(1)
+ #self.cur1.execute(sql)
+
+ #14-2 select * from (select calc_aggregate_all_js as agg from stables where <\>\in\and\or group by order by slimit soffset )
+ tdSql.query("select 14-3 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all_j)
+ sql += "%s " % random.choice(self.calc_aggregate_all_j)
+ sql += " as calc14_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "%s " % random.choice(self.partiton_where_j)
+ sql += "%s " % random.choice(self.slimit1_where)
+ sql += ") "
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 14-4 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc14_1, " % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as calc14_2, " % random.choice(self.calc_aggregate_all_j)
+ sql += "%s " % random.choice(self.calc_aggregate_all_j)
+ sql += " as calc14_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.partiton_where_j)
+ sql += "%s " % random.choice(self.slimit1_where)
+ sql += ") "
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ #15 select * from (select calc_aggregate_regulars as agg from regular_table where <\>\in\and\or order by slimit soffset )
+ tdSql.query("select 15-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular)
+ sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular)
+ sql += "%s " % random.choice(self.calc_aggregate_regular)
+ sql += " as calc15_3 from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.group_where_regular)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #tdSql.query(sql) #Invalid function name: twa'
+ # tdSql.checkRows(1)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 15-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular_j)
+ sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular_j)
+ sql += "%s " % random.choice(self.calc_aggregate_regular_j)
+ sql += " as calc15_3 from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "%s " % random.choice(self.group_where_regular_j)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #tdSql.query(sql) #Invalid function name: twa'
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 15-2.2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_regular_j)
+ sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_regular_j)
+ sql += "%s " % random.choice(self.calc_aggregate_regular_j)
+ sql += " as calc15_3 from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.group_where_regular_j)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ sql += "%s ;" % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #tdSql.query(sql) #Invalid function name: twa'
+ #self.cur1.execute(sql)
+
+ self.restartDnodes()
+ tdSql.query("select 15-3 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname)
+ sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname)
+ sql += "%s " % random.choice(self.calc_aggregate_groupbytbname)
+ sql += " as calc15_3 from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.group_where)
+ sql += "%s " % random.choice(self.having_support)
+ sql += "%s " % random.choice(self.order_desc_where)
+ sql += ") "
+ sql += "order by calc15_1 "
+ sql += "%s " % random.choice(self.limit_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #tdSql.query(sql) #Invalid function name: twa',可能还的去掉order by
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 15-4 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname_j)
+ sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname_j)
+ sql += "%s " % random.choice(self.calc_aggregate_groupbytbname_j)
+ sql += " as calc15_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "%s " % random.choice(self.group_where_j)
+ sql += "%s " % random.choice(self.having_support_j)
+ #sql += "%s " % random.choice(orders_desc_where)
+ sql += ") "
+ sql += "order by calc15_1 "
+ sql += "%s " % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #tdSql.query(sql) #'Invalid function name: irate'
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 15-4.2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname_j)
+ sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname_j)
+ sql += "%s " % random.choice(self.calc_aggregate_groupbytbname_j)
+ sql += " as calc15_3 from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.group_where_j)
+ sql += "%s " % random.choice(self.having_support_j)
+ sql += "%s " % random.choice(self.orders_desc_where)
+ sql += ") "
+ sql += "order by calc15_1 "
+ sql += "%s " % random.choice(self.limit_u_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15678 #tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 15-5 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc15_1, " % random.choice(self.calc_aggregate_groupbytbname)
+ sql += "%s as calc15_2, " % random.choice(self.calc_aggregate_groupbytbname)
+ sql += "%s " % random.choice(self.calc_aggregate_groupbytbname)
+ sql += " as calc15_3 from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.group_where)
+ sql += ") "
+ sql += "order by calc15_1 "
+ sql += "%s " % random.choice(self.limit_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #tdSql.query(sql) #'Invalid function name: irate'
+ #self.cur1.execute(sql)
+
+ #16 select * from (select calc_aggregate_regulars as agg from regular_table where <\>\in\and\or order by limit offset )
+ #self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 16-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc16_0 , " % random.choice(self.calc_calculate_all)
+ sql += "%s as calc16_1 , " % random.choice(self.calc_aggregate_all)
+ sql += "%s as calc16_2 " % random.choice(self.calc_select_in)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.group_where)
+ #sql += "%s " % random.choice(having_support)having和 partition不能混合使用
+ sql += ") "
+ sql += "order by calc16_0 "
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #TD-15651 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 16-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc16_0 " % random.choice(self.calc_calculate_all_j)
+ sql += ", %s as calc16_1 " % random.choice(self.calc_aggregate_all_j)
+ #sql += ", %s as calc16_2 " % random.choice(self.calc_select_in_j)
+ sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += ") "
+ sql += "order by calc16_0 "
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 16-2.2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc16_0 " % random.choice(self.calc_calculate_all_j)
+ sql += ", %s as calc16_1 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.qt_u_or_where)
+ sql += ") "
+ sql += "order by calc16_0 "
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 16-3 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular)
+ sql += " from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "limit 2 ) "
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #tdSql.query(sql)#Invalid function name: derivative'
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 16-4 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular_j)
+ sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "limit 2 ) "
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #tdSql.query(sql)#Invalid function name: derivative'
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 16-4.2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc16_1 " % random.choice(self.calc_calculate_regular_j)
+ sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_or_where)
+ sql += "limit 2 ) "
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #tdSql.query(sql)#Invalid function name: derivative'
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 16-5 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc16_1 , " % random.choice(self.calc_calculate_all)
+ sql += "%s as calc16_1 , " % random.choice(self.calc_calculate_regular)
+ sql += "%s as calc16_2 " % random.choice(self.calc_select_all)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.group_where)
+ #sql += "%s " % random.choice(having_support)
+ sql += ") "
+ sql += "order by calc16_1 "
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ # tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 16-6 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.group_where)
+ sql += "limit 2 ) "
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #Invalid function name: derivative' tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 16-7 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname_j)
+ sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "limit 2 ) "
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #Invalid function name: derivative' tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 16-8 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s as calc16_1 " % random.choice(self.calc_calculate_groupbytbname_j)
+ sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.qt_u_or_where)
+ sql += "limit 2 ) "
+ sql += "%s " % random.choice(self.limit1_where)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #Invalid function name: derivative' tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ #17 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or interval_sliding group by having order by limit offset )interval_sliding
+ #self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 17-1 from stable_1;")
+ for i in range(self.fornum):
+ #this is having_support , but tag-select cannot mix with last_row,other select can
+ sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all)
+ sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all)
+ sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.partiton_where)
+ sql += "%s " % random.choice(self.interval_sliding)
+ #sql += "%s " % random.choice(having_support)
+ #sql += "%s " % random.choice(order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") "
+ #sql += "%s " % random.choice(interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 17-2 from stable_1;")
+ for i in range(self.fornum):
+ #this is having_support , but tag-select cannot mix with last_row,other select can
+ sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all_j)
+ sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "%s " % random.choice(self.interval_sliding)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 17-2.2 from stable_1;")
+ for i in range(self.fornum):
+ #this is having_support , but tag-select cannot mix with last_row,other select can
+ sql = "select apercentile(cal17_0, %d)/10 ,apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal17_0 , " % random.choice(self.calc_calculate_all_j)
+ sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.interval_sliding)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(self.interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ self.restartDnodes()
+ tdSql.query("select 17-3 from stable_1;")
+ for i in range(self.fornum):
+ #this is having_tagnot_support , because tag-select cannot mix with last_row...
+ sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all)
+ sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.partiton_where)
+ sql += "%s " % random.choice(self.interval_sliding)
+ #sql += "%s " % random.choice(self.having_tagnot_support)
+ #sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") "
+ #sql += "%s " % random.choice(self.interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 17-4 from stable_1;")
+ for i in range(self.fornum):
+ #this is having_tagnot_support , because tag-select cannot mix with last_row...
+ sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "%s " % random.choice(self.interval_sliding)
+ #sql += "%s " % random.choice(order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 17-4.2 from stable_1;")
+ for i in range(self.fornum):
+ #this is having_tagnot_support , because tag-select cannot mix with last_row...
+ sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.interval_sliding)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(self.interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 17-5 from stable_1;")
+ for i in range(self.fornum):
+ #having_not_support
+ sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all)
+ sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.partiton_where)
+ sql += "%s " % random.choice(self.interval_sliding)
+ # sql += "%s " % random.choice(self.having_not_support)
+ # sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") "
+ #sql += "%s " % random.choice(self.interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 17-6 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all)
+ sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.interval_sliding)
+ #sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") "
+ #sql += "%s " % random.choice(interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 17-7 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "%s " % random.choice(self.interval_sliding)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") "
+ #sql += "%s " % random.choice(interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 17-7.2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.interval_sliding)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") "
+ #sql += "%s " % random.choice(interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ self.restartDnodes()
+ tdSql.query("select 17-8 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all)
+ sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all)
+ sql += " from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.interval_sliding)
+ #sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") "
+ #sql += "%s " % random.choice(interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 17-9 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "%s " % random.choice(self.interval_sliding)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 17-10 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal17_1, %d)/1000 ,apercentile(cal17_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal17_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal17_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.interval_sliding)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ #18 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or session order by limit )interval_sliding
+ tdSql.query("select 18-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all)
+ sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all)
+ sql += " from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.session_where)
+ #sql += "%s " % random.choice(self.fill_where)
+ #sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") "
+ #sql += "%s " % random.choice(interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 18-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "%s " % random.choice(self.session_u_where)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 18-2.2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.session_u_where)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(self.interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ self.restartDnodes()
+ tdSql.query("select 18-3 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all)
+ sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all)
+ sql += " from stable_1_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.session_where)
+ #sql += "%s " % random.choice(self.fill_where)
+ #sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") "
+ #sql += "%s " % random.choice(self.interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 18-4 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "%s " % random.choice(self.session_u_where)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(self.interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 18-4.2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.session_u_where)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(self.interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 18-5 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all)
+ sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.session_where)
+ #sql += "%s " % random.choice(self.fill_where)
+ #sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") "
+ #sql += "%s " % random.choice(interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 18-6 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.t_join_where)
+ sql += "%s " % random.choice(self.session_u_where)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(self.interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 18-7 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal18_1, %d)/1000 ,apercentile(cal18_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal18_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal18_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.session_u_where)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(self.interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ #19 select apercentile from (select calc_aggregate_alls form regualr_table or stable where <\>\in\and\or session order by limit )interval_sliding
+ #self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 19-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all)
+ sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all)
+ sql += " from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.state_window)
+ #sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 19-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ sql += "%s " % random.choice(self.state_u_window)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 19-2.2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_or_where)
+ sql += "%s " % random.choice(self.state_u_window)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 19-3 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all)
+ sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all)
+ sql += " from stable_1_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.state_window)
+ #sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 19-4 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ #sql += "%s " % random.choice(self.state_window)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 19-4.2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1_1 t1, stable_1_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_or_where)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 19-5 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all)
+ sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += "%s " % random.choice(self.state_window)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit1_where)
+ sql += ") "
+ sql += "%s " % random.choice(self.interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.error(sql) #'STATE_WINDOW not support for super table query'
+
+ tdSql.query("select 19-6 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.q_u_where)
+ #sql += "%s " % random.choice(self.state_window)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(self.interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ tdSql.query("select 19-7 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select apercentile(cal19_1, %d)/1000 ,apercentile(cal19_2, %d)*10+%d from ( select " %(random.randint(0,100) , random.randint(0,100) ,random.randint(-1000,1000))
+ sql += "%s as cal19_1 ," % random.choice(self.calc_aggregate_all_j)
+ sql += "%s as cal19_2 " % random.choice(self.calc_aggregate_all_j)
+ sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s " % random.choice(self.qt_u_or_where)
+ #sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ #sql += "%s " % random.choice(self.interval_sliding)
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ #20 select * from (select calc_select_fills form regualr_table or stable where <\>\in\and\or fill_where group by order by limit offset )
+ #self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 20-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s , " % random.choice(self.calc_select_fill)
+ sql += "%s ," % random.choice(self.calc_select_fill)
+ sql += "%s " % random.choice(self.calc_select_fill)
+ sql += " from stable_1 where "
+ sql += "%s " % random.choice(self.interp_where)
+ sql += "%s " % random.choice(self.fill_where)
+ sql += "%s " % random.choice(self.group_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #interp不支持 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ rsDn = self.restartDnodes()
+ tdSql.query("select 20-2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s , " % random.choice(self.calc_select_fill_j)
+ sql += "%s ," % random.choice(self.calc_select_fill_j)
+ sql += "%s " % random.choice(self.calc_select_fill_j)
+ sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s and " % random.choice(self.t_join_where)
+ sql += "%s " % random.choice(self.interp_where_j)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #interp不支持 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 20-2.2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s , " % random.choice(self.calc_select_fill_j)
+ sql += "%s ," % random.choice(self.calc_select_fill_j)
+ sql += "%s " % random.choice(self.calc_select_fill_j)
+ sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s and " % random.choice(self.qt_u_or_where)
+ sql += "%s " % random.choice(self.interp_where_j)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #interp不支持 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 20-3 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s , " % random.choice(self.calc_select_fill)
+ sql += "%s ," % random.choice(self.calc_select_fill)
+ sql += "%s " % random.choice(self.calc_select_fill)
+ sql += " from stable_1 where "
+ sql += "%s " % self.interp_where[2]
+ sql += "%s " % random.choice(self.fill_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #interp不支持 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 20-4 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s , " % random.choice(self.calc_select_fill_j)
+ sql += "%s ," % random.choice(self.calc_select_fill_j)
+ sql += "%s " % random.choice(self.calc_select_fill_j)
+ sql += " from stable_1 t1, table_1 t2 where t1.ts = t2.ts and "
+ #sql += "%s and " % random.choice(self.t_join_where)
+ sql += "%s " % self.interp_where_j[random.randint(0,5)]
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #interp不支持 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 20-4.2 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s , " % random.choice(self.calc_select_fill_j)
+ sql += "%s ," % random.choice(self.calc_select_fill_j)
+ sql += "%s " % random.choice(self.calc_select_fill_j)
+ sql += " from stable_1 t1, stable_1_1 t2 where t1.ts = t2.ts and "
+ sql += "%s and " % random.choice(self.qt_u_or_where)
+ sql += "%s " % self.interp_where_j[random.randint(0,5)]
+ sql += "%s " % random.choice(self.fill_where)
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ ##interp不支持 tdSql.error(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 20-5 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s , " % random.choice(self.calc_select_fill)
+ sql += "%s ," % random.choice(self.calc_select_fill)
+ sql += "%s " % random.choice(self.calc_select_fill)
+ sql += " from regular_table_1 where "
+ sql += "%s " % self.interp_where[1]
+ sql += "%s " % random.choice(self.fill_where)
+ sql += "%s " % random.choice(self.order_where)
+ sql += "%s " % random.choice(self.limit_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ ##interp不支持 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ tdSql.query("select 20-6 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s , " % random.choice(self.calc_select_fill_j)
+ sql += "%s ," % random.choice(self.calc_select_fill_j)
+ sql += "%s " % random.choice(self.calc_select_fill_j)
+ sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
+ #sql += "%s " % random.choice(self.interp_where_j)
+ sql += "%s " % self.interp_where_j[random.randint(0,5)]
+ sql += "%s " % random.choice(self.order_u_where)
+ sql += "%s " % random.choice(self.limit_u_where)
+ sql += ") "
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ ##interp不支持 tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ #1 select * from (select * from (select * form regular_table where <\>\in\and\or order by limit ))
+ tdSql.query("select 1-1 from stable_1;")
+ for i in range(self.fornum):
+ # sql_start = "select * from ( "
+ # sql_end = ")"
+ for_num = random.randint(1, 15);
+ sql = "select * from (" * for_num
+ sql += "select * from ( select * from ( select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += ")) "
+ sql += ")" * for_num
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ sql2 = "select * from ( select * from ( select "
+ sql2 += "%s, " % random.choice(self.s_r_select)
+ sql2 += "%s, " % random.choice(self.q_select)
+ sql2 += "ts from regular_table_1 where "
+ sql2 += "%s " % random.choice(self.q_where)
+ sql2 += ")) "
+ tdLog.info(sql2)
+ tdLog.info(len(sql2))
+ tdSql.query(sql2)
+ self.cur1.execute(sql2)
+
+ self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1)
+ self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql ,1,10,3,3)
+ self.data_matrix_equal('%s' %sql ,1,10,3,3,'%s' %sql2 ,1,10,3,3)
+
+ for i in range(self.fornum):
+ for_num = random.randint(1, 15);
+ sql = "select ts from (" * for_num
+ sql += "select * from ( select * from ( select "
+ sql += "%s, " % random.choice(self.s_r_select)
+ sql += "%s, " % random.choice(self.q_select)
+ sql += "ts from regular_table_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += ")) "
+ sql += ")" * for_num
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ sql2 = "select * from ( select * from ( select "
+ sql2 += "%s, " % random.choice(self.s_r_select)
+ sql2 += "%s, " % random.choice(self.q_select)
+ sql2 += "ts from regular_table_1 where "
+ sql2 += "%s " % random.choice(self.q_where)
+ sql2 += ")) "
+ tdLog.info(sql2)
+ tdLog.info(len(sql2))
+ tdSql.query(sql2)
+ self.cur1.execute(sql2)
+
+ self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1)
+
+ #2 select * from (select * from (select * form stable where <\>\in\and\or order by limit ))
+ tdSql.query("select 2-1 from stable_1;")
+ for i in range(self.fornum):
+ for_num = random.randint(1, 15);
+ sql = "select * from (" * for_num
+ sql += "select * from ( select * from ( select "
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.qt_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += ")) "
+ sql += ")" * for_num
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ sql2 = "select * from ( select * from ( select "
+ sql2 += "%s, " % random.choice(self.s_s_select)
+ sql2 += "%s, " % random.choice(self.qt_select)
+ sql2 += "ts from stable_1 where "
+ sql2 += "%s " % random.choice(self.q_where)
+ sql2 += ")) "
+ tdLog.info(sql2)
+ tdLog.info(len(sql2))
+ tdSql.query(sql2)
+ self.cur1.execute(sql2)
+
+ self.data_matrix_equal('%s' %sql ,1,10,3,3,'%s' %sql2 ,1,10,3,3)
+
+ for i in range(self.fornum):
+ for_num = random.randint(1, 15);
+ sql = "select ts from (" * for_num
+ sql += "select * from ( select * from ( select "
+ sql += "%s, " % random.choice(self.s_s_select)
+ sql += "%s, " % random.choice(self.qt_select)
+ sql += "ts from stable_1 where "
+ sql += "%s " % random.choice(self.q_where)
+ sql += ")) "
+ sql += ")" * for_num
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ sql2 = "select ts from ( select * from ( select "
+ sql2 += "%s, " % random.choice(self.s_s_select)
+ sql2 += "%s, " % random.choice(self.qt_select)
+ sql2 += "ts from stable_1 where "
+ sql2 += "%s " % random.choice(self.q_where)
+ sql2 += ")) "
+ tdLog.info(sql2)
+ tdLog.info(len(sql2))
+ tdSql.query(sql2)
+ self.cur1.execute(sql2)
+
+ self.data_matrix_equal('%s' %sql ,1,10,1,1,'%s' %sql2 ,1,10,1,1)
+
+ #3 select ts ,calc from (select * form stable where <\>\in\and\or order by limit )
+ #self.dropandcreateDB_random("%s" %db, 1)
+ tdSql.query("select 3-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select "
+ sql += "%s " % random.choice(self.calc_calculate_regular)
+ sql += " from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.orders_desc_where)
+ sql += "%s " % random.choice(self.limit_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ #'Invalid function name: derivative' tdSql.query(sql)
+ #self.cur1.execute(sql)
+
+ #4 select * from (select calc form stable where <\>\in\and\or order by limit )
+ tdSql.query("select 4-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select "
+ sql += "%s " % random.choice(self.calc_select_in_ts)
+ sql += "from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ #sql += "%s " % random.choice(self.order_desc_where)
+ sql += "%s " % random.choice(self.limit_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.query(sql)
+ self.cur1.execute(sql)
+
+ #5 select ts ,tbname from (select * form stable where <\>\in\and\or order by limit )
+ tdSql.query("select 5-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select ts , tbname , "
+ sql += "%s ," % random.choice(self.calc_calculate_regular)
+ sql += "%s ," % random.choice(self.dqt_select)
+ sql += "%s " % random.choice(self.qt_select)
+ sql += " from ( select * from stable_1 where "
+ sql += "%s " % random.choice(self.qt_where)
+ sql += "%s " % random.choice(self.orders_desc_where)
+ sql += "%s " % random.choice(self.limit_where)
+ sql += ") ;"
+ tdLog.info(sql)
+ tdLog.info(len(sql))
+ tdSql.error(sql)
+
+ #special sql
+ tdSql.query("select 6-1 from stable_1;")
+ for i in range(self.fornum):
+ sql = "select * from ( select _block_dist() from stable_1);"
+ # tdSql.query(sql)
+ # tdSql.checkRows(1)
+ sql = "select _block_dist() from (select * from stable_1);"
+ tdSql.error(sql)
+ sql = "select * from (select database());"
+ tdSql.error(sql)
+ sql = "select * from (select client_version());"
+ tdSql.error(sql)
+ sql = "select * from (select client_version() as version);"
+ tdSql.error(sql)
+ sql = "select * from (select server_version());"
+ tdSql.error(sql)
+ sql = "select * from (select server_version() as version);"
+ tdSql.error(sql)
+ sql = "select * from (select server_status());"
+ tdSql.error(sql)
+ sql = "select * from (select server_status() as status);"
+ tdSql.error(sql)
+
+ #taos -f sql
+ startTime_taos_f = time.time()
+ print("taos -f sql start!")
+ taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename)
+ _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8")
+ print("taos -f sql over!")
+ endTime_taos_f = time.time()
+ print("taos_f total time %ds" % (endTime_taos_f - startTime_taos_f))
+
+ print('=====================2.6 old function end ===========')
+
+
+
+ def run(self):
+ tdSql.prepare()
+
+ startTime = time.time()
+
+ #
+
+
+ #self.math_nest(['TAIL']) #TD-16009
+ # self.math_nest(['HYPERLOGLOG']) #TD-16038
+ # self.math_nest(['UNIQUE'])
+
+
+
+ # #
+ #self.function_before_26() #TD-16031
+
+ # self.math_nest(['ABS','SQRT']) #TD-16042
+ # self.math_nest(['SIN','COS','TAN','ASIN','ACOS','ATAN'])
+ # self.math_nest(['POW','LOG']) #TD-16039
+ # self.math_nest(['FLOOR','CEIL','ROUND'])
+ # #self.math_nest(['SAMPLE']) #TD-16017
+ # #self.math_nest(['CSUM']) #TD-15936 crash
+ # self.math_nest(['MAVG'])
+
+ self.str_nest(['LTRIM','RTRIM','LOWER','UPPER'])
+ self.str_nest(['LENGTH','CHAR_LENGTH'])
+ self.str_nest(['SUBSTR']) #TD-16042
+ self.str_nest(['CONCAT']) #TD-16002 偶尔
+ self.str_nest(['CONCAT_WS']) #TD-16002 偶尔
+ # self.time_nest(['CAST']) #TD-16017偶尔,放到time里起来弄
+ self.time_nest(['CAST_1'])
+ self.time_nest(['CAST_2'])
+ self.time_nest(['CAST_3'])
+ self.time_nest(['CAST_4'])
+
+
+
+ # self.time_nest(['NOW','TODAY']) #
+ # self.time_nest(['TIMEZONE']) #
+ # self.time_nest(['TIMETRUNCATE']) #TD-16039
+ # self.time_nest(['TO_ISO8601'])
+ # self.time_nest(['TO_UNIXTIMESTAMP'])#core多
+ # self.time_nest(['ELAPSED'])
+
+
+ endTime = time.time()
+ print("total time %ds" % (endTime - startTime))
+
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/fulltest.bat b/tests/system-test/fulltest.bat
new file mode 100644
index 0000000000000000000000000000000000000000..871c93c9824333acb6ba05474d9249fb9f8d8ed7
--- /dev/null
+++ b/tests/system-test/fulltest.bat
@@ -0,0 +1,4 @@
+
+python3 .\test.py -f 0-others\taosShell.py
+python3 .\test.py -f 0-others\taosShellError.py
+python3 .\test.py -f 0-others\taosShellNetChk.py
\ No newline at end of file
diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh
index 37c7f18177d27a7b56f4d6219b0151f973cd8cbb..126af6667744d247a636cd927187fac53e17a4e1 100755
--- a/tests/system-test/fulltest.sh
+++ b/tests/system-test/fulltest.sh
@@ -14,6 +14,8 @@ python3 ./test.py -f 0-others/udf_restart_taosd.py
python3 ./test.py -f 0-others/user_control.py
python3 ./test.py -f 0-others/fsync.py
+python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py
+
python3 ./test.py -f 2-query/between.py
python3 ./test.py -f 2-query/distinct.py
python3 ./test.py -f 2-query/varchar.py
@@ -70,6 +72,7 @@ python3 ./test.py -f 2-query/arccos.py
python3 ./test.py -f 2-query/arctan.py
python3 ./test.py -f 2-query/query_cols_tags_and_or.py
# python3 ./test.py -f 2-query/nestedQuery.py
+python3 ./test.py -f 2-query/nestedQuery_str.py
python3 ./test.py -f 7-tmq/basic5.py
python3 ./test.py -f 7-tmq/subscribeDb.py
diff --git a/tests/system-test/test-all.bat b/tests/system-test/test-all.bat
new file mode 100644
index 0000000000000000000000000000000000000000..ae6c98b06f3504b20e712630d40184b093143835
--- /dev/null
+++ b/tests/system-test/test-all.bat
@@ -0,0 +1,25 @@
+@echo off
+SETLOCAL EnableDelayedExpansion
+for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( set "DEL=%%a")
+set /a a=0
+@REM echo Windows Taosd Test
+@REM for /F "usebackq tokens=*" %%i in (fulltest.bat) do (
+@REM echo Processing %%i
+@REM set /a a+=1
+@REM call %%i ARG1 > result_!a!.txt 2>error_!a!.txt
+@REM if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. )
+@REM )
+echo Linux Taosd Test
+for /F "usebackq tokens=*" %%i in (fulltest.bat) do (
+ echo Processing %%i
+ set /a a+=1
+ call %%i ARG1 -m %1 > result_!a!.txt 2>error_!a!.txt
+ if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. )
+)
+exit
+
+:colorEcho
+echo off
+ "%~2"
+findstr /v /a:%1 /R "^$" "%~2" nul
+del "%~2" > nul 2>&1i
\ No newline at end of file
diff --git a/tests/system-test/test.py b/tests/system-test/test.py
index 2ac8153a0d1761491317925b2e817473a42d963e..6fd7237b339dc1d2eeeee1d1f5965ec77d03b53d 100644
--- a/tests/system-test/test.py
+++ b/tests/system-test/test.py
@@ -44,8 +44,9 @@ if __name__ == "__main__":
if platform.system().lower() == 'windows':
windows = 1
updateCfgDict = {}
- opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:', [
- 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict'])
+ execCmd = ""
+ opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:e:', [
+ 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'execCmd'])
for key, value in opts:
if key in ['-h', '--help']:
tdLog.printNoPrefix(
@@ -59,6 +60,7 @@ if __name__ == "__main__":
tdLog.printNoPrefix('-g valgrind Test Flag')
tdLog.printNoPrefix('-r taosd restart test')
tdLog.printNoPrefix('-d update cfg dict, base64 json str')
+ tdLog.printNoPrefix('-e eval str to run')
sys.exit(0)
if key in ['-r', '--restart']:
@@ -97,6 +99,19 @@ if __name__ == "__main__":
except:
print('updateCfgDict convert fail.')
sys.exit(0)
+
+ if key in ['-e', '--execCmd']:
+ try:
+ execCmd = base64.b64decode(value.encode()).decode()
+ except:
+ print('updateCfgDict convert fail.')
+ sys.exit(0)
+
+ if not execCmd == "":
+ tdDnodes.init(deployPath)
+ exec(execCmd)
+ quit()
+
if (stop != 0):
if (valgrind == 0):
toBeKilled = "taosd"
@@ -136,7 +151,7 @@ if __name__ == "__main__":
if windows:
tdCases.logSql(logSql)
tdLog.info("Procedures for testing self-deployment")
- tdDnodes.init(deployPath)
+ tdDnodes.init(deployPath, masterIp)
tdDnodes.setTestCluster(testCluster)
tdDnodes.setValgrind(valgrind)
tdDnodes.stopAll()
@@ -161,15 +176,7 @@ if __name__ == "__main__":
else:
pass
tdDnodes.deploy(1,updateCfgDict)
- if masterIp == "" or masterIp == "localhost":
- tdDnodes.start(1)
- else:
- remote_conn = Connection("root@%s"%host)
- with remote_conn.cd('/var/lib/jenkins/workspace/TDinternal/community/tests/pytest'):
- remote_conn.run("python3 ./test.py %s"%updateCfgDictStr)
- # print("docker exec -d cross_platform bash -c \"cd ~/test/community/tests/system-test && python3 ./test.py %s\""%updateCfgDictStr)
- # os.system("docker exec -d cross_platform bash -c \"cd ~/test/community/tests/system-test && (ps -aux | grep taosd | head -n 1 | awk '{print $2}' | xargs kill -9) && rm -rf /root/test/sim/dnode1/data/ && python3 ./test.py %s\""%updateCfgDictStr)
- # time.sleep(2)
+ tdDnodes.start(1)
conn = taos.connect(
host="%s"%(host),
config=tdDnodes.sim.getCfgDir())
@@ -178,7 +185,7 @@ if __name__ == "__main__":
else:
tdCases.runAllWindows(conn)
else:
- tdDnodes.init(deployPath)
+ tdDnodes.init(deployPath, masterIp)
tdDnodes.setTestCluster(testCluster)
tdDnodes.setValgrind(valgrind)
tdDnodes.stopAll()
diff --git a/tests/test/c/sdbDump.c b/tests/test/c/sdbDump.c
index 5641587c569aa2a2ea3f5a3f18c398f33979714c..8be2822c0ae8d6c2176895d0a6e51449d81ea44b 100644
--- a/tests/test/c/sdbDump.c
+++ b/tests/test/c/sdbDump.c
@@ -20,13 +20,13 @@
#include "tconfig.h"
#include "tjson.h"
-#define TMP_DNODE_DIR "/tmp/dumpsdb"
-#define TMP_MNODE_DIR "/tmp/dumpsdb/mnode"
-#define TMP_SDB_DATA_DIR "/tmp/dumpsdb/mnode/data"
-#define TMP_SDB_SYNC_DIR "/tmp/dumpsdb/mnode/sync"
-#define TMP_SDB_DATA_FILE "/tmp/dumpsdb/mnode/data/sdb.data"
-#define TMP_SDB_RAFT_CFG_FILE "/tmp/dumpsdb/mnode/sync/raft_config.json"
-#define TMP_SDB_RAFT_STORE_FILE "/tmp/dumpsdb/mnode/sync/raft_store.json"
+#define TMP_DNODE_DIR TD_TMP_DIR_PATH "dumpsdb"
+#define TMP_MNODE_DIR TD_TMP_DIR_PATH "dumpsdb/mnode"
+#define TMP_SDB_DATA_DIR TD_TMP_DIR_PATH "dumpsdb/mnode/data"
+#define TMP_SDB_SYNC_DIR TD_TMP_DIR_PATH "dumpsdb/mnode/sync"
+#define TMP_SDB_DATA_FILE TD_TMP_DIR_PATH "dumpsdb/mnode/data/sdb.data"
+#define TMP_SDB_RAFT_CFG_FILE TD_TMP_DIR_PATH "dumpsdb/mnode/sync/raft_config.json"
+#define TMP_SDB_RAFT_STORE_FILE TD_TMP_DIR_PATH "dumpsdb/mnode/sync/raft_store.json"
void reportStartup(const char *name, const char *desc) {}
diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c
index a866488d3ad2d239c47b3279f506e755737b88bf..851d9a2070b75f7863f8e55f5779e9bac90607db 100644
--- a/tools/shell/src/shellEngine.c
+++ b/tools/shell/src/shellEngine.c
@@ -587,6 +587,8 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) {
int32_t width = (int32_t)strlen(field->name);
switch (field->type) {
+ case TSDB_DATA_TYPE_NULL:
+ return TMAX(4, width); // null
case TSDB_DATA_TYPE_BOOL:
return TMAX(5, width); // 'false'