diff --git a/README-CN.md b/README-CN.md
index afb242d6219069887be615b606f3a0eb5206422c..d5586c78b7ef4d4652fbe57b4d17af30a8bc8d36 100644
--- a/README-CN.md
+++ b/README-CN.md
@@ -23,7 +23,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维
TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是 [数据模型](https://www.taosdata.com/cn/documentation/architecture) 与 [数据建模](https://www.taosdata.com/cn/documentation/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf)。
-# 生成
+# 构建
TDengine目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、macOS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。本快速指南仅适用于通过源码安装。
@@ -107,7 +107,7 @@ Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话
git submodule update --init --recursive
```
-## 生成 TDengine
+## 构建 TDengine
### Linux 系统
@@ -116,6 +116,12 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
+您可以选择使用 Jemalloc 作为内存分配器,替代默认的 glibc:
+```bash
+apt install autoconf
+cmake .. -DJEMALLOC_ENABLED=true
+```
+
在X86-64、X86、arm64、arm32 和 mips64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
aarch64:
diff --git a/README.md b/README.md
index 0e1adcd97c61265ec0ad272043c9604736545b3d..89e35f6e630ea3db2ddab2bc0187a62f9793ac32 100644
--- a/README.md
+++ b/README.md
@@ -110,6 +110,12 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
+You can use Jemalloc as memory allocator instead of glibc:
+```
+apt install autoconf
+cmake .. -DJEMALLOC_ENABLED=true
+```
+
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform.
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct:
diff --git a/cmake/env.inc b/cmake/env.inc
index 356bd61442c51a9b697e375e9fe7ee7c1ad3d24b..fa15ec6aee01a619139417fceb21b3a71bd96364 100755
--- a/cmake/env.inc
+++ b/cmake/env.inc
@@ -39,7 +39,7 @@ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_C_FLAGS} ${DEBUG_FLAGS}
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_C_FLAGS} ${RELEASE_FLAGS}")
# Set c++ compiler options
-SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11")
+SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11 -Wno-unused-function")
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}")
diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md
index 4c37ce598cd589b800efd532447797cfce568cd7..ed7156be178284349aac776f06c047a461e91684 100644
--- a/documentation20/cn/00.index/docs.md
+++ b/documentation20/cn/00.index/docs.md
@@ -42,7 +42,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入
* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等
* [SQL函数](/taos-sql#functions):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等
-* [时间维度聚合](/taos-sql#aggregation):将表中数据按照时间段进行切割后聚合,降维处理
+* [窗口切分聚合](/taos-sql#aggregation):将表中数据按照时间段等方式进行切割后聚合,降维处理
* [边界限制](/taos-sql#limitation):库、表、SQL等边界限制条件
* [错误码](/taos-sql/error-code):TDengine 2.0 错误码以及对应的十进制码
@@ -63,7 +63,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## [高级功能](/advanced-features)
* [连续查询(Continuous Query)](/advanced-features#continuous-query):基于滑动窗口,定时自动的对数据流进行查询计算
-* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe):象典型的消息队列,应用可订阅接收到的最新数据
+* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe):类似典型的消息队列,应用可订阅接收到的最新数据
* [缓存(Cache)](/advanced-features#cache):每个设备最新的数据都会缓存在内存中,可快速获取
* [报警监测](/advanced-features#alert):根据配置规则,自动监测超限行为数据,并主动推送
diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md
index fb47d79268fe0a4fa84b444187a5aa700a687027..546e2dbcd37549ea621b9f54e8116fb94c44b25b 100644
--- a/documentation20/cn/08.connector/01.java/docs.md
+++ b/documentation20/cn/08.connector/01.java/docs.md
@@ -532,8 +532,9 @@ Query OK, 1 row(s) in set (0.000141s)
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
-| 2.0.22 | 2.0.18.0 及以上 | 1.8.x |
-| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.0 | 1.8.x |
+| 2.0.31 | 2.1.3.0 及以上 | 1.8.x |
+| 2.0.22 - 20.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
+| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md
index 2d76c866d11c1e1f51927c5536184b15aa6afe14..aa5fa50b66e237b87de2893678d4e6c2738d21cb 100644
--- a/documentation20/cn/08.connector/docs.md
+++ b/documentation20/cn/08.connector/docs.md
@@ -427,12 +427,15 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
* res:查询结果集,注意结果集中可能没有记录
* param:调用 `taos_subscribe`时客户程序提供的附加参数
* code:错误码
+
**注意**:在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。
* `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用`taos_consume`的间隔小于订阅的轮询周期,API将会阻塞,直到时间间隔超过此周期。 如果数据库有新记录到达,该API将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此API。
+ **注意**:在调用 `taos_consume()` 之后,用户应用应确保尽快调用 `taos_fetch_row()` 或 `taos_fetch_block()` 来处理订阅结果,否则服务端会持续缓存查询结果数据等待客户端读取,极端情况下会导致服务端内存消耗殆尽,影响服务稳定性。
+
* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)`
取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index 278757b81b4fdcac72ab218168863308fb1bc906..844865f6dbf67bc5031ea2556ac0e937ac965898 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -476,9 +476,10 @@ Query OK, 1 row(s) in set (0.001091s)
SELECT select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
- [INTERVAL (interval_val [, interval_offset])]
- [SLIDING sliding_val]
- [FILL fill_val]
+ [SESSION(ts_col, tol_val)]
+ [STATE_WINDOW(col)]
+ [INTERVAL(interval_val [, interval_offset]) [SLIDING sliding_val]]
+ [FILL(fill_mod_and_val)]
[GROUP BY col_list]
[ORDER BY col_list { DESC | ASC }]
[SLIMIT limit_val [SOFFSET offset_val]]
@@ -853,7 +854,23 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
- 适用于:**表**。
+ 适用于:**表、(超级表)**。
+
+ 说明:从 2.1.3.0 版本开始,TWA 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
+
+- **IRATE**
+ ```mysql
+ SELECT IRATE(field_name) FROM tb_name WHERE clause;
+ ```
+ 功能说明:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。
+
+ 返回结果数据类型:双精度浮点数Double。
+
+ 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+
+ 适用于:**表、(超级表)**。
+
+ 说明:(从 2.1.3.0 版本开始新增此函数)IRATE 可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
- **SUM**
```mysql
@@ -1202,13 +1219,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```
### 计算函数
+
- **DIFF**
```mysql
SELECT DIFF(field_name) FROM tb_name [WHERE clause];
```
功能说明:统计表中某列的值与前一行对应值的差。
- 返回结果数据类型: 同应用字段。
+ 返回结果数据类型:同应用字段。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
@@ -1226,13 +1244,27 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
Query OK, 2 row(s) in set (0.001162s)
```
+- **DERIVATIVE**
+ ```mysql
+ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause];
+ ```
+ 功能说明:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。
+
+ 返回结果数据类型:双精度浮点数。
+
+ 应用字段:不能应用在 timestamp、binary、nchar、bool 类型字段。
+
+ 适用于:**表、(超级表)**。
+
+ 说明:(从 2.1.3.0 版本开始新增此函数)输出结果行数是范围内总行数减一,第一行没有结果输出。DERIVATIVE 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
+
- **SPREAD**
```mysql
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
功能说明:统计表/超级表中某列的最大值和最小值之差。
- 返回结果数据类型: 双精度浮点数。
+ 返回结果数据类型:双精度浮点数。
应用字段:不能应用在binary、nchar、bool类型字段。
@@ -1284,39 +1316,45 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
Query OK, 3 row(s) in set (0.001046s)
```
-## 时间维度聚合
+## 按窗口切分聚合
-TDengine支持按时间段进行聚合,可以将表中数据按照时间段进行切割后聚合生成结果,比如温度传感器每秒采集一次数据,但需查询每隔10分钟的温度平均值。这个聚合适合于降维(down sample)操作, 语法如下:
+TDengine 支持按时间段等窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这类聚合适合于降维(down sample)操作,语法如下:
```mysql
SELECT function_list FROM tb_name
[WHERE where_condition]
- INTERVAL (interval [, offset])
- [SLIDING sliding]
- [FILL ({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
+ [SESSION(ts_col, tol_val)]
+ [STATE_WINDOW(col)]
+ [INTERVAL(interval [, offset]) [SLIDING sliding]]
+ [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
SELECT function_list FROM stb_name
[WHERE where_condition]
- INTERVAL (interval [, offset])
- [SLIDING sliding]
- [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]
+ [SESSION(ts_col, tol_val)]
+ [STATE_WINDOW(col)]
+ [INTERVAL(interval [, offset]) [SLIDING sliding]]
+ [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
[GROUP BY tags]
```
-- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
-- WHERE语句可以指定查询的起止时间和其他过滤条件。
-- SLIDING语句用于指定聚合时间段的前向增量。
-- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
- 1. 不进行填充:NONE(默认填充模式)。
- 2. VALUE填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。
- 3. NULL填充:使用NULL填充数据。例如:FILL(NULL)。
- 4. PREV填充:使用前一个非NULL值填充数据。例如:FILL(PREV)。
- 5. NEXT填充:使用下一个非NULL值填充数据。例如:FILL(NEXT)。
+- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:TOP、BOTTOM、DIFF 以及四则运算)。
+- 查询过滤、聚合等操作按照每个切分窗口为独立的单位执行。聚合查询目前支持三种窗口的划分方式:
+ 1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。
+ 2. 状态窗口:使用整数(布尔值)或字符串来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STAT_WINDOW 语句的参数来指定。
+ 3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。
+- WHERE 语句可以指定查询的起止时间和其他过滤条件。
+- FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:
+ 1. 不进行填充:NONE(默认填充模式)。
+ 2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。
+ 3. PREV 填充:使用前一个非 NULL 值填充数据。例如:FILL(PREV)。
+ 4. NULL 填充:使用 NULL 填充数据。例如:FILL(NULL)。
+ 5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如:FILL(LINEAR)。
+ 6. NEXT 填充:使用下一个非 NULL 值填充数据。例如:FILL(NEXT)。
说明:
- 1. 使用FILL语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过1千万条具有插值的结果。
+ 1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
- 3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用GROUP BY语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了GROUP BY语句分组,则返回结果中每个GROUP内不按照时间序列严格单调递增。
+ 3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 GROUP BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 GROUP BY 语句分组,则返回结果中每个 GROUP 内不按照时间序列严格单调递增。
时间聚合也常被用于连续查询场景,可以参考文档 [连续查询(Continuous Query)](https://www.taosdata.com/cn/documentation/advanced-features#continuous-query)。
@@ -1326,7 +1364,7 @@ SELECT function_list FROM stb_name
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
```
-针对智能电表采集的数据,以10分钟为一个阶段,计算过去24小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非NULL值填充。使用的查询语句如下:
+针对智能电表采集的数据,以 10 分钟为一个阶段,计算过去 24 小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非 NULL 值填充。使用的查询语句如下:
```mysql
SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index 35f3b42811a1cd90dfb87cbc7695cfcd55c50fa4..950d81fb61794e99647ba0777129cebba141a8ee 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -210,7 +210,8 @@ int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaI
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta);
SColumn* tscColumnClone(const SColumn* src);
-bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid);
+void tscColumnCopy(SColumn* pDest, const SColumn* pSrc);
+int32_t tscColumnExists(SArray* pColumnList, int32_t columnId, uint64_t uid);
SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t uid, SSchema* pSchema);
void tscColumnListDestroy(SArray* pColList);
void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid);
diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c
index d835b37c2497c241d52a243d34ab4ab63e76c12a..ffec03b65adc38db15d3e57bb11dccb8b0f93a92 100644
--- a/src/client/src/tscGlobalmerge.c
+++ b/src/client/src/tscGlobalmerge.c
@@ -898,7 +898,9 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
SSDataBlock* pBlock = NULL;
while(1) {
bool prev = *newgroup;
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
*newgroup = prev;
break;
@@ -966,7 +968,9 @@ static SSDataBlock* skipGroupBlock(SOperatorInfo* pOperator, bool* newgroup) {
SSDataBlock* pBlock = NULL;
if (pInfo->currentGroupOffset == 0) {
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
@@ -974,7 +978,9 @@ static SSDataBlock* skipGroupBlock(SOperatorInfo* pOperator, bool* newgroup) {
if (*newgroup == false && pInfo->limit.limit > 0 && pInfo->rowsTotal >= pInfo->limit.limit) {
while ((*newgroup) == false) { // ignore the remain blocks
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
@@ -986,7 +992,10 @@ static SSDataBlock* skipGroupBlock(SOperatorInfo* pOperator, bool* newgroup) {
return pBlock;
}
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
@@ -1000,7 +1009,10 @@ static SSDataBlock* skipGroupBlock(SOperatorInfo* pOperator, bool* newgroup) {
}
while ((*newgroup) == false) {
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index 2c9717306f2b26dfbbb5ec1d676b5baad3eb2d2d..668a9e940657f0cde8d8406dbe4a18fdb9c72f7e 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -485,6 +485,7 @@ static int32_t tscGetDBInfo(SCreateBuilder *builder, char *result) {
char buf[TSDB_DB_NAME_LEN + 64] = {0};
do {
+ memset(buf, 0, sizeof(buf));
int32_t* lengths = taos_fetch_lengths(pSql);
int32_t ret = tscGetNthFieldResult(row, fields, lengths, 0, buf);
if (0 == ret && STR_NOCASE_EQUAL(buf, strlen(buf), builder->buf, strlen(builder->buf))) {
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index a158162dc56af24e4a114f4926e0d339dddf553a..137a7be7c7443d071acdb81543ef7f6402196074 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -1580,7 +1580,6 @@ void tscImportDataFromFile(SSqlObj *pSql) {
SImportFileSupport *pSupporter = calloc(1, sizeof(SImportFileSupport));
SSqlObj *pNew = createSubqueryObj(pSql, 0, parseFileSendDataBlock, pSupporter, TSDB_SQL_INSERT, NULL);
- pCmd->count = 1;
FILE *fp = fopen(pCmd->payload, "rb");
if (fp == NULL) {
diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c
index bac8920d8f1d17cb53756378724a56b37dead866..8bb776ffeefa68992411433d98f8162fb4186198 100644
--- a/src/client/src/tscPrepare.c
+++ b/src/client/src/tscPrepare.c
@@ -1156,27 +1156,6 @@ static void insertBatchClean(STscStmt* pStmt) {
tfree(pCmd->insertParam.pTableNameList);
-/*
- STableDataBlocks** p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, NULL);
-
- STableDataBlocks* pOneTableBlock = *p;
-
- while (1) {
- SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
-
- pOneTableBlock->size = sizeof(SSubmitBlk);
-
- pBlocks->numOfRows = 0;
-
- p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, p);
- if (p == NULL) {
- break;
- }
-
- pOneTableBlock = *p;
- }
-*/
-
pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
pCmd->insertParam.numOfTables = 0;
@@ -1499,7 +1478,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pRes->numOfRows = 1;
strtolower(pSql->sqlstr, sql);
- tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
+ tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
if (tscIsInsertData(pSql->sqlstr)) {
pStmt->isInsert = true;
@@ -1604,7 +1583,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) {
SHashObj* hashList = pCmd->insertParam.pTableBlockHashList;
pCmd->insertParam.pTableBlockHashList = NULL;
- tscResetSqlCmd(pCmd, true);
+ tscResetSqlCmd(pCmd, false);
pCmd->insertParam.pTableBlockHashList = hashList;
}
@@ -1663,7 +1642,7 @@ int taos_stmt_close(TAOS_STMT* stmt) {
} else {
if (pStmt->multiTbInsert) {
taosHashCleanup(pStmt->mtb.pTableHash);
- pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, true);
+ pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, false);
taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList);
pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL;
taosArrayDestroy(pStmt->mtb.tags);
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 76a56a79d0d5f0290b4d088dc5e669165cf17bd0..5884d18ee73f77a7de0f62504ab0027bea180ebc 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -65,7 +65,6 @@ static char* getAccountId(SSqlObj* pSql);
static int convertTimestampStrToInt64(tVariant *pVar, int32_t precision);
static bool serializeExprListToVariant(SArray* pList, tVariant **dest, int16_t colType, uint8_t precision);
-static int32_t validateParamOfRelationIn(tVariant *pVar, int32_t colType);
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
static char* cloneCurrentDBName(SSqlObj* pSql);
@@ -156,78 +155,78 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType,
return ret;
}
- tSqlExprItem* item = (tSqlExprItem *)taosArrayGet(pList, 0);
- int32_t firstTokenType = item->pNode->token.type;
- int32_t type = firstTokenType;
+ tSqlExpr* item = ((tSqlExprItem*)(taosArrayGet(pList, 0)))->pNode;
+ int32_t firstVarType = item->value.nType;
- //nchar to binary and other xxint to bigint
- toTSDBType(type);
- if (colType != TSDB_DATA_TYPE_TIMESTAMP && !IS_UNSIGNED_NUMERIC_TYPE(colType)) {
- if (type != colType && (type != TSDB_DATA_TYPE_BINARY || colType != TSDB_DATA_TYPE_NCHAR)) {
- return false;
- }
- }
- type = colType;
-
SBufferWriter bw = tbufInitWriter( NULL, false);
-
tbufEnsureCapacity(&bw, 512);
+ if (colType == TSDB_DATA_TYPE_TIMESTAMP) {
+ tbufWriteUint32(&bw, TSDB_DATA_TYPE_BIGINT);
+ } else {
+ tbufWriteUint32(&bw, colType);
+ }
- int32_t size = (int32_t)(pList->size);
- tbufWriteUint32(&bw, type);
- tbufWriteInt32(&bw, size);
+ tbufWriteInt32(&bw, (int32_t)(pList->size));
- for (int32_t i = 0; i < size; i++) {
+ for (int32_t i = 0; i < (int32_t)pList->size; i++) {
tSqlExpr* pSub = ((tSqlExprItem*)(taosArrayGet(pList, i)))->pNode;
+ tVariant* var = &pSub->value;
// check all the token type in expr list same or not
- if (firstTokenType != pSub->token.type) {
+ if (firstVarType != var->nType) {
break;
}
- toTSDBType(pSub->token.type);
-
- tVariant var;
- tVariantCreate(&var, &pSub->token);
- if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type)) {
- tbufWriteInt64(&bw, var.i64);
- } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
- // ugly code, refactor later
- if (IS_UNSIGNED_NUMERIC_TYPE(pSub->token.type) || IS_SIGNED_NUMERIC_TYPE(pSub->token.type)) {
- tbufWriteUint64(&bw, var.i64);
+ if ((colType == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(colType))) {
+ if (var->nType != TSDB_DATA_TYPE_BOOL && !IS_SIGNED_NUMERIC_TYPE(var->nType)) {
+ break;
+ }
+ tbufWriteInt64(&bw, var->i64);
+ } else if (IS_UNSIGNED_NUMERIC_TYPE(colType)) {
+ if (IS_SIGNED_NUMERIC_TYPE(var->nType) || IS_UNSIGNED_NUMERIC_TYPE(var->nType)) {
+ tbufWriteUint64(&bw, var->u64);
} else {
- tVariantDestroy(&var);
break;
}
- }
- else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
- tbufWriteDouble(&bw, var.dKey);
- } else if (type == TSDB_DATA_TYPE_BINARY){
- tbufWriteBinary(&bw, var.pz, var.nLen);
- } else if (type == TSDB_DATA_TYPE_NCHAR) {
- char *buf = (char *)calloc(1, (var.nLen + 1)*TSDB_NCHAR_SIZE);
- if (tVariantDump(&var, buf, type, false) != TSDB_CODE_SUCCESS) {
+ } else if (colType == TSDB_DATA_TYPE_DOUBLE || colType == TSDB_DATA_TYPE_FLOAT) {
+ if (IS_SIGNED_NUMERIC_TYPE(var->nType) || IS_UNSIGNED_NUMERIC_TYPE(var->nType)) {
+ tbufWriteDouble(&bw, (double)(var->i64));
+ } else if (var->nType == TSDB_DATA_TYPE_DOUBLE || var->nType == TSDB_DATA_TYPE_FLOAT){
+ tbufWriteDouble(&bw, var->dKey);
+ } else {
+ break;
+ }
+ } else if (colType == TSDB_DATA_TYPE_BINARY) {
+ if (var->nType != TSDB_DATA_TYPE_BINARY) {
+ break;
+ }
+ tbufWriteBinary(&bw, var->pz, var->nLen);
+ } else if (colType == TSDB_DATA_TYPE_NCHAR) {
+ if (var->nType != TSDB_DATA_TYPE_BINARY) {
+ break;
+ }
+ char *buf = (char *)calloc(1, (var->nLen + 1)*TSDB_NCHAR_SIZE);
+ if (tVariantDump(var, buf, colType, false) != TSDB_CODE_SUCCESS) {
free(buf);
- tVariantDestroy(&var);
break;
}
tbufWriteBinary(&bw, buf, twcslen((wchar_t *)buf) * TSDB_NCHAR_SIZE);
free(buf);
- } else if (type == TSDB_DATA_TYPE_TIMESTAMP) {
- if (var.nType == TSDB_DATA_TYPE_BINARY) {
- if (convertTimestampStrToInt64(&var, precision) < 0) {
- tVariantDestroy(&var);
+ } else if (colType == TSDB_DATA_TYPE_TIMESTAMP) {
+ if (var->nType == TSDB_DATA_TYPE_BINARY) {
+ if (convertTimestampStrToInt64(var, precision) < 0) {
break;
}
- tbufWriteInt64(&bw, var.i64);
- } else if (var.nType == TSDB_DATA_TYPE_BIGINT) {
- tbufWriteInt64(&bw, var.i64);
+ tbufWriteInt64(&bw, var->i64);
+ } else if (var->nType == TSDB_DATA_TYPE_BIGINT) {
+ tbufWriteInt64(&bw, var->i64);
+ } else {
+ break;
}
+ } else {
+ break;
}
- tVariantDestroy(&var);
-
- if (i == size - 1) { ret = true;}
- }
-
+ if (i == (int32_t)(pList->size - 1)) { ret = true;}
+ }
if (ret == true) {
if ((*dst = calloc(1, sizeof(tVariant))) != NULL) {
tVariantCreateFromBinary(*dst, tbufGetData(&bw, false), tbufTell(&bw), TSDB_DATA_TYPE_BINARY);
@@ -239,13 +238,6 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType,
return ret;
}
-static int32_t validateParamOfRelationIn(tVariant *pVar, int32_t colType) {
- if (pVar->nType != TSDB_DATA_TYPE_BINARY) {
- return -1;
- }
- SBufferReader br = tbufInitReader(pVar->pz, pVar->nLen, false);
- return colType == TSDB_DATA_TYPE_NCHAR ? 0 : (tbufReadUint32(&br) == colType ? 0: -1);
-}
static uint8_t convertOptr(SStrToken *pToken) {
switch (pToken->type) {
@@ -1699,7 +1691,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32
// arithmetic expression always return result in the format of double float
pExprInfo->base.resBytes = sizeof(double);
- pExprInfo->base.interBytes = sizeof(double);
+ pExprInfo->base.interBytes = 0;
pExprInfo->base.resType = TSDB_DATA_TYPE_DOUBLE;
pExprInfo->base.functionId = TSDB_FUNC_ARITHM;
@@ -1934,14 +1926,14 @@ SExprInfo* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tab
index.columnIndex = colIndex;
}
- return tscExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, pSchema->bytes,
+ return tscExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, 0,
(functionId == TSDB_FUNC_TAGPRJ));
}
SExprInfo* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag, int16_t colId) {
SExprInfo* pExpr = tscExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type,
- pColSchema->bytes, colId, pColSchema->bytes, TSDB_COL_IS_TAG(flag));
+ pColSchema->bytes, colId, 0, TSDB_COL_IS_TAG(flag));
tstrncpy(pExpr->base.aliasName, pColSchema->name, sizeof(pExpr->base.aliasName));
tstrncpy(pExpr->base.token, pColSchema->name, sizeof(pExpr->base.token));
@@ -2079,33 +2071,29 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
const char* name, int32_t resColIdx, SColumnIndex* pColIndex, bool finalResult) {
const char* msg1 = "not support column types";
- int16_t type = 0;
- int16_t bytes = 0;
- int32_t functionID = cvtFunc.execFuncId;
-
- if (functionID == TSDB_FUNC_SPREAD) {
+ int32_t f = cvtFunc.execFuncId;
+ if (f == TSDB_FUNC_SPREAD) {
int32_t t1 = pSchema->type;
- if (t1 == TSDB_DATA_TYPE_BINARY || t1 == TSDB_DATA_TYPE_NCHAR || t1 == TSDB_DATA_TYPE_BOOL) {
+ if (IS_VAR_DATA_TYPE(t1) || t1 == TSDB_DATA_TYPE_BOOL) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return -1;
- } else {
- type = TSDB_DATA_TYPE_DOUBLE;
- bytes = tDataTypes[type].bytes;
}
- } else {
- type = pSchema->type;
- bytes = pSchema->bytes;
}
-
- SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, getNewResColId(pCmd), bytes, false);
+
+ int16_t resType = 0;
+ int16_t resBytes = 0;
+ int32_t interBufSize = 0;
+
+ getResultDataInfo(pSchema->type, pSchema->bytes, f, 0, &resType, &resBytes, &interBufSize, 0, false);
+ SExprInfo* pExpr = tscExprAppend(pQueryInfo, f, pColIndex, resType, resBytes, getNewResColId(pCmd), interBufSize, false);
tstrncpy(pExpr->base.aliasName, name, tListLen(pExpr->base.aliasName));
- if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != functionID) {
+ if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != f) {
pExpr->base.colInfo.flag |= TSDB_COL_NULL;
}
// set reverse order scan data blocks for last query
- if (functionID == TSDB_FUNC_LAST) {
+ if (f == TSDB_FUNC_LAST) {
pExpr->base.numOfParams = 1;
pExpr->base.param[0].i64 = TSDB_ORDER_DESC;
pExpr->base.param[0].nType = TSDB_DATA_TYPE_INT;
@@ -2118,7 +2106,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
// if it is not in the final result, do not add it
SColumnList ids = createColumnList(1, pColIndex->tableIndex, pColIndex->columnIndex);
if (finalResult) {
- insertResultField(pQueryInfo, resColIdx, &ids, bytes, (int8_t)type, pExpr->base.aliasName, pExpr);
+ insertResultField(pQueryInfo, resColIdx, &ids, resBytes, (int8_t)resType, pExpr->base.aliasName, pExpr);
} else {
tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema);
}
@@ -2168,6 +2156,17 @@ static void updateLastScanOrderIfNeeded(SQueryInfo* pQueryInfo) {
}
}
+static UNUSED_FUNC void updateFunctionInterBuf(SQueryInfo* pQueryInfo, bool superTable) {
+ size_t numOfExpr = tscNumOfExprs(pQueryInfo);
+ for (int32_t i = 0; i < numOfExpr; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+
+ int32_t param = (int32_t)pExpr->base.param[0].i64;
+ getResultDataInfo(pExpr->base.colType, pExpr->base.colBytes, pExpr->base.functionId, param, &pExpr->base.resType, &pExpr->base.resBytes,
+ &pExpr->base.interBytes, 0, superTable);
+ }
+}
+
int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult) {
STableMetaInfo* pTableMetaInfo = NULL;
int32_t functionId = pItem->pNode->functionId;
@@ -2277,10 +2276,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
case TSDB_FUNC_LEASTSQR: {
// 1. valid the number of parameters
int32_t numOfParams = (pItem->pNode->pParam == NULL)? 0: (int32_t) taosArrayGetSize(pItem->pNode->pParam);
+
+ // no parameters or more than one parameter for function
if (pItem->pNode->pParam == NULL ||
(functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && numOfParams != 1) ||
((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3)) {
- /* no parameters or more than one parameter for function */
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -2294,14 +2294,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
+
+ // functions can not be applied to tags
+ if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta))) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
// 2. check if sql function can be applied on this column data type
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
-
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
if (!IS_NUMERIC_TYPE(pSchema->type)) {
@@ -2330,11 +2331,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr);
}
- // functions can not be applied to tags
- if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
- }
-
SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false);
if (functionId == TSDB_FUNC_LEASTSQR) { // set the leastsquares parameters
@@ -2363,9 +2359,9 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
if (info.precision == TSDB_TIME_PRECISION_MILLI) {
- tickPerSec /= 1000000;
+ tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO);
} else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
- tickPerSec /= 1000;
+ tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI);
}
if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) {
@@ -2559,8 +2555,9 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tVariant* pVariant = &pParamElem[1].pNode->value;
- int8_t resultType = pSchema->type;
- int16_t resultSize = pSchema->bytes;
+ int16_t resultType = pSchema->type;
+ int16_t resultSize = pSchema->bytes;
+ int32_t interResult = 0;
char val[8] = {0};
@@ -2573,8 +2570,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
- resultSize = sizeof(double);
- resultType = TSDB_DATA_TYPE_DOUBLE;
+ getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &resultType, &resultSize, &interResult, 0, false);
/*
* sql function transformation
@@ -2584,7 +2580,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
colIndex += 1; // the first column is ts
- pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), resultSize, false);
+ pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), interResult, false);
tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double));
} else {
tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true);
@@ -2598,7 +2594,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// set the first column ts for top/bottom query
SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pCmd),
- TSDB_KEYSIZE, false);
+ 0, false);
tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->base.aliasName));
const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX;
@@ -2619,7 +2615,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
if (finalResult) {
- insertResultField(pQueryInfo, colIndex, &ids, resultSize, resultType, pExpr->base.aliasName, pExpr);
+ insertResultField(pQueryInfo, colIndex, &ids, resultSize, (int8_t)resultType, pExpr->base.aliasName, pExpr);
} else {
assert(ids.num == 1);
tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema);
@@ -3113,15 +3109,10 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
return true;
}
- if (pQueryInfo->groupbyExpr.numOfGroupCols != 1) {
+ SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
+ if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return true;
- } else {
- SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
- if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
- return true;
- }
}
} else if (tscIsSessionWindowQuery(pQueryInfo)) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
@@ -3371,11 +3362,6 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
if (pRight->tokenId != TK_SET || !serializeExprListToVariant(pRight->pParam, &pVal, colType, timePrecision)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
- if (validateParamOfRelationIn(pVal, colType) != TSDB_CODE_SUCCESS) {
- tVariantDestroy(pVal);
- free(pVal);
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
- }
pColumnFilter->pz = (int64_t)calloc(1, pVal->nLen + 1);
pColumnFilter->len = pVal->nLen;
pColumnFilter->filterstr = 1;
@@ -3675,7 +3661,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
- if (!tscColumnExists(pTableMetaInfo->tagColList, index.columnIndex, pTableMetaInfo->pTableMeta->id.uid)) {
+ if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema1->colId, pTableMetaInfo->pTableMeta->id.uid) < 0) {
tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema1);
if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
@@ -3707,7 +3693,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMeta);
- if (!tscColumnExists(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid)) {
+ if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema2->colId, pTableMeta->id.uid) < 0) {
tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema2);
if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
@@ -4831,6 +4817,12 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq
if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) {
goto PARSE_WHERE_EXIT;
}
+
+ if (taosArrayGetSize(pQueryInfo->pUpstream) > 0 ) {
+ if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pTimewindow, TK_AND)) != TSDB_CODE_SUCCESS) {
+ goto PARSE_WHERE_EXIT;
+ }
+ }
// 6. join condition
if ((ret = getJoinCondInfo(&pSql->cmd, pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) {
@@ -7818,8 +7810,9 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
const char* msg3 = "start(end) time of query range required or time range too large";
const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column";
const char* msg5 = "only tag query not compatible with normal column filter";
- const char* msg6 = "not support stddev/percentile in outer query yet";
- const char* msg7 = "drivative requires timestamp column exists in subquery";
+ const char* msg6 = "not support stddev/percentile/interp in the outer query yet";
+ const char* msg7 = "derivative/twa/irate requires timestamp column exists in subquery";
+ const char* msg8 = "condition missing for join query";
int32_t code = TSDB_CODE_SUCCESS;
@@ -7862,15 +7855,17 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, false, false, timeWindowQuery) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
+
// parse the window_state
if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, false) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
+
// todo NOT support yet
for(int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
int32_t f = pExpr->base.functionId;
- if (f == TSDB_FUNC_STDDEV || f == TSDB_FUNC_PERCT) {
+ if (f == TSDB_FUNC_STDDEV || f == TSDB_FUNC_PERCT || f == TSDB_FUNC_INTERP) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
@@ -7885,9 +7880,17 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, 0);
- if (tscNumOfExprs(pQueryInfo) > 1) {
+ int32_t numOfExprs = (int32_t) tscNumOfExprs(pQueryInfo);
+ if (numOfExprs == 1) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
+ int32_t f = pExpr->base.functionId;
+ if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ }
+ } else {
SExprInfo* pExpr = tscExprGet(pQueryInfo, 1);
- if (pExpr->base.functionId == TSDB_FUNC_DERIVATIVE && pSchema->type != TSDB_DATA_TYPE_TIMESTAMP) {
+ int32_t f = pExpr->base.functionId;
+ if ((f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE) && pSchema->type != TSDB_DATA_TYPE_TIMESTAMP) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
}
@@ -7897,10 +7900,9 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
-
- if (pTableMeta->tableInfo.precision == TSDB_TIME_PRECISION_MILLI) {
- pQueryInfo->window.skey = pQueryInfo->window.skey / 1000;
- pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
+ } else {
+ if (pQueryInfo->numOfTables > 1) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
}
@@ -7933,6 +7935,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
return code;
}
+// updateFunctionInterBuf(pQueryInfo, false);
updateLastScanOrderIfNeeded(pQueryInfo);
} else {
pQueryInfo->command = TSDB_SQL_SELECT;
@@ -8061,6 +8064,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
updateLastScanOrderIfNeeded(pQueryInfo);
tscFieldInfoUpdateOffset(pQueryInfo);
+// updateFunctionInterBuf(pQueryInfo, isSTable);
if ((code = validateFillNode(pCmd, pQueryInfo, pSqlNode)) != TSDB_CODE_SUCCESS) {
return code;
@@ -8200,7 +8204,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
int32_t colType = -1;
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
if (pCols != NULL && taosArrayGetSize(pCols) > 0) {
- SColIndex* idx = taosArrayGet(pCols, 0);
+ SColIndex* idx = taosArrayGet(pCols, taosArrayGetSize(pCols) - 1);
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex);
if (pSchema != NULL) {
colType = pSchema->type;
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index 8c5e99474da66062004f460ce5915d8157b8a7d6..7fc47947c85913a2f64cb8a58092d4616a4779f9 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -795,6 +795,7 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo,
pSqlExpr->colBytes = htons(pExpr->colBytes);
pSqlExpr->resType = htons(pExpr->resType);
pSqlExpr->resBytes = htons(pExpr->resBytes);
+ pSqlExpr->interBytes = htonl(pExpr->interBytes);
pSqlExpr->functionId = htons(pExpr->functionId);
pSqlExpr->numOfParams = htons(pExpr->numOfParams);
pSqlExpr->resColId = htons(pExpr->resColId);
@@ -1495,7 +1496,9 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg = (char *)pSchema;
pAlterTableMsg->tagValLen = htonl(pAlterInfo->tagData.dataLen);
- memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
+ if (pAlterInfo->tagData.dataLen > 0) {
+ memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
+ }
pMsg += pAlterInfo->tagData.dataLen;
msgLen = (int32_t)(pMsg - (char*)pAlterTableMsg);
diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c
index ef46b4068ecb0641aa1730d48f1e4ad4d95b8222..52ba424fa5adcd43ac5b624b7f486c06df71f2c4 100644
--- a/src/client/src/tscSub.c
+++ b/src/client/src/tscSub.c
@@ -512,6 +512,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
pSub->pSql = pSql;
pSql->pSubscription = pSub;
+ pSub->lastSyncTime = 0;
// no table list now, force to update it
tscDebug("begin table synchronization");
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 22a603b71eacecb635350b518221654db9292810..4d97fef52f956b6d550f24c1bb88a34dd64c6d13 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -103,13 +103,6 @@ bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
pthread_mutex_lock(&subState->mutex);
-// bool done = allSubqueryDone(pParentSql);
-// if (done) {
-// tscDebug("0x%"PRIx64" subquery:0x%"PRIx64",%d all subs already done", pParentSql->self, pSql->self, idx);
-// pthread_mutex_unlock(&subState->mutex);
-// return false;
-// }
-
tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index:%d state set to 1", pParentSql->self, pSql->self, idx);
subState->states[idx] = 1;
@@ -2389,8 +2382,14 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SColumn *pCol = taosArrayGetP(pColList, i);
if (pCol->info.flist.numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered.
- SColumn *p = tscColumnClone(pCol);
- taosArrayPush(pNewQueryInfo->colList, &p);
+ int32_t index1 = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid);
+ if (index1 >= 0) {
+ SColumn* x = taosArrayGetP(pNewQueryInfo->colList, index1);
+ tscColumnCopy(x, pCol);
+ } else {
+ SColumn *p = tscColumnClone(pCol);
+ taosArrayPush(pNewQueryInfo->colList, &p);
+ }
}
}
@@ -3605,10 +3604,10 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr
// todo refactor: filter should not be applied here.
createFilterInfo(pQueryAttr, 0);
- pQueryAttr->numOfFilterCols = 0;
SArray* pa = NULL;
if (stage == MASTER_SCAN) {
+ pQueryAttr->createFilterOperator = false; // no need for parent query
pa = createExecOperatorPlan(pQueryAttr);
} else {
pa = createGlobalMergePlan(pQueryAttr);
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 9d2c500a92d8641d4b7f5c4573c74926fa97944d..6219fe23c791673777588ba9f3cf3c868d13353a 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -825,7 +825,10 @@ static void fetchNextBlockIfCompleted(SOperatorInfo* pOperator, bool* newgroup)
SJoinStatus* pStatus = &pJoinInfo->status[i];
if (pStatus->pBlock == NULL || pStatus->index >= pStatus->pBlock->info.rows) {
tscDebug("Retrieve nest query result, index:%d, total:%d", i, pOperator->numOfUpstream);
+
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pStatus->pBlock = pOperator->upstream[i]->exec(pOperator->upstream[i], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
pStatus->index = 0;
if (pStatus->pBlock == NULL) {
@@ -1304,7 +1307,7 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
if (pCmd->pTableMetaMap != NULL) {
STableMetaVgroupInfo* p = taosHashIterate(pCmd->pTableMetaMap, NULL);
while (p) {
- tfree(p->pVgroupInfo);
+ tscVgroupInfoClear(p->pVgroupInfo);
tfree(p->pTableMeta);
p = taosHashIterate(pCmd->pTableMetaMap, p);
}
@@ -1332,7 +1335,7 @@ void tscFreeSubobj(SSqlObj* pSql) {
tscDebug("0x%"PRIx64" start to free sub SqlObj, numOfSub:%d", pSql->self, pSql->subState.numOfSub);
for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
- tscDebug("0x%"PRIx64" free sub SqlObj:%p, index:%d", pSql->self, pSql->pSubs[i], i);
+ tscDebug("0x%"PRIx64" free sub SqlObj:0x%"PRIx64", index:%d", pSql->self, pSql->pSubs[i]->self, i);
taos_free_result(pSql->pSubs[i]);
pSql->pSubs[i] = NULL;
}
@@ -1784,7 +1787,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
tscSortRemoveDataBlockDupRows(pOneTableBlock);
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
- tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName),
+ tscDebug("0x%"PRIx64" name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName),
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
@@ -2270,18 +2273,14 @@ int32_t tscExprCopyAll(SArray* dst, const SArray* src, bool deepcopy) {
return 0;
}
-bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid) {
- // ignore the tbname columnIndex to be inserted into source list
- if (columnIndex < 0) {
- return false;
- }
-
+// ignore the tbname columnIndex to be inserted into source list
+int32_t tscColumnExists(SArray* pColumnList, int32_t columnId, uint64_t uid) {
size_t numOfCols = taosArrayGetSize(pColumnList);
int32_t i = 0;
while (i < numOfCols) {
SColumn* pCol = taosArrayGetP(pColumnList, i);
- if ((pCol->columnIndex != columnIndex) || (pCol->tableUid != uid)) {
+ if ((pCol->info.colId != columnId) || (pCol->tableUid != uid)) {
++i;
continue;
} else {
@@ -2290,10 +2289,10 @@ bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid) {
}
if (i >= numOfCols || numOfCols == 0) {
- return false;
+ return -1;
}
- return true;
+ return i;
}
void tscExprAssign(SExprInfo* dst, const SExprInfo* src) {
@@ -2379,13 +2378,7 @@ SColumn* tscColumnClone(const SColumn* src) {
return NULL;
}
- dst->columnIndex = src->columnIndex;
- dst->tableUid = src->tableUid;
- dst->info.flist.numOfFilters = src->info.flist.numOfFilters;
- dst->info.flist.filterInfo = tFilterInfoDup(src->info.flist.filterInfo, src->info.flist.numOfFilters);
- dst->info.type = src->info.type;
- dst->info.colId = src->info.colId;
- dst->info.bytes = src->info.bytes;
+ tscColumnCopy(dst, src);
return dst;
}
@@ -2394,6 +2387,18 @@ static void tscColumnDestroy(SColumn* pCol) {
free(pCol);
}
+void tscColumnCopy(SColumn* pDest, const SColumn* pSrc) {
+ destroyFilterInfo(&pDest->info.flist);
+
+ pDest->columnIndex = pSrc->columnIndex;
+ pDest->tableUid = pSrc->tableUid;
+ pDest->info.flist.numOfFilters = pSrc->info.flist.numOfFilters;
+ pDest->info.flist.filterInfo = tFilterInfoDup(pSrc->info.flist.filterInfo, pSrc->info.flist.numOfFilters);
+ pDest->info.type = pSrc->info.type;
+ pDest->info.colId = pSrc->info.colId;
+ pDest->info.bytes = pSrc->info.bytes;
+}
+
void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid) {
assert(src != NULL && dst != NULL);
@@ -3276,6 +3281,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pnCmd->insertParam.pTableNameList = NULL;
pnCmd->insertParam.pTableBlockHashList = NULL;
+ memset(&pnCmd->insertParam.tagData, 0, sizeof(STagData));
+
if (tscAddQueryInfo(pnCmd) != TSDB_CODE_SUCCESS) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
@@ -4075,7 +4082,10 @@ SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo) {
size_t size = sizeof(SVgroupInfo) * pVgroupsInfo->numOfVgroups + sizeof(SVgroupsInfo);
SVgroupsInfo* pInfo = calloc(1, size);
- memcpy(pInfo, pVgroupsInfo, size);
+ pInfo->numOfVgroups = pVgroupsInfo->numOfVgroups;
+ for (int32_t m = 0; m < pVgroupsInfo->numOfVgroups; ++m) {
+ tscSVgroupInfoCopy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m]);
+ }
return pInfo;
}
diff --git a/src/client/tests/timeParseTest.cpp b/src/client/tests/timeParseTest.cpp
index 692398e3b7329b31ac02d4c36635000d4721d8fc..3cc6d541e002a9167b5e2668d4914ad1aa6f94f0 100644
--- a/src/client/tests/timeParseTest.cpp
+++ b/src/client/tests/timeParseTest.cpp
@@ -98,7 +98,7 @@ TEST(testCase, parse_time) {
taosParseTime(t41, &time, strlen(t41), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999);
-// int64_t k = timezone;
+ // int64_t k = timezone;
char t42[] = "1997-1-1T0:0:0.999999999Z";
taosParseTime(t42, &time, strlen(t42), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999 - timezone * MILLISECOND_PER_SECOND);
diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h
index 8ee73291566afcbf604b607c0bc6a426ca372b87..e3989a1deb5fa4817bd459916ad238e58a88577b 100644
--- a/src/common/inc/tdataformat.h
+++ b/src/common/inc/tdataformat.h
@@ -289,6 +289,11 @@ static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) {
}
}
+static FORCE_INLINE TSKEY dataColsKeyAtRow(SDataCols *pCols, int row) {
+ ASSERT(row < pCols->numOfRows);
+ return dataColsKeyAt(pCols, row);
+}
+
static FORCE_INLINE TSKEY dataColsKeyFirst(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsKeyAt(pCols, 0);
diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c
index 7ae34d532ca7ca52d3414a9d8f7366db15e046b8..94c429cfc0d24937020ff77513b168a3e6811ca9 100644
--- a/src/common/src/tdataformat.c
+++ b/src/common/src/tdataformat.c
@@ -452,7 +452,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *
SDataCols *pTarget = NULL;
- if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyFirst(source))) { // No overlap
+ if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyAtRow(source, *pOffset))) { // No overlap
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
for (int i = 0; i < rowsToMerge; i++) {
for (int j = 0; j < source->numOfCols; j++) {
diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c
index cf633502c14ddc3f85f8e8f36059c5e835d402c0..61b11579bf8da901697df3202c872caf7d6c7371 100644
--- a/src/dnode/src/dnodeMain.c
+++ b/src/dnode/src/dnodeMain.c
@@ -303,6 +303,8 @@ static int32_t dnodeInitStorage() {
dnodeCheckDataDirOpenned(tsDnodeDir);
+ taosGetDisk();
+ taosPrintDiskInfo();
dInfo("dnode storage is initialized at %s", tsDnodeDir);
return 0;
}
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index dac2dc84b6065d27514bbbbd2a836a81422408ad..365f24e12608ad99f6c317df7f4103fc4b07282f 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -100,7 +100,7 @@ extern const int32_t TYPE_BYTES[15];
#define TSDB_TIME_PRECISION_MICRO_STR "us"
#define TSDB_TIME_PRECISION_NANO_STR "ns"
-#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L))
+#define TSDB_TICK_PER_SECOND(precision) ((int64_t)((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L)))
#define T_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#define T_APPEND_MEMBER(dst, ptr, type, member) \
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 33ee2a9bc2fc2df5b2b8074e4acdd78449603ffa..b2ac11b2cadc928e927b56010c7720e6cd9ef9fa 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -79,10 +79,9 @@ enum TEST_MODE {
#define MAX_SQL_SIZE 65536
#define BUFFER_SIZE (65536*2)
-#define COND_BUF_LEN BUFFER_SIZE - 30
+#define COND_BUF_LEN (BUFFER_SIZE - 30)
#define MAX_USERNAME_SIZE 64
#define MAX_PASSWORD_SIZE 64
-#define MAX_DB_NAME_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space
@@ -90,7 +89,7 @@ enum TEST_MODE {
#define OPT_ABORT 1 /* –abort */
#define STRING_LEN 60000
#define MAX_PREPARED_RAND 1000000
-#define MAX_FILE_NAME_LEN 256
+#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255.
#define MAX_SAMPLES_ONCE_FROM_FILE 10000
#define MAX_NUM_DATATYPE 10
@@ -195,13 +194,6 @@ enum _describe_table_index {
TSDB_MAX_DESCRIBE_METRIC
};
-typedef struct {
- char field[TSDB_COL_NAME_LEN + 1];
- char type[16];
- int length;
- char note[128];
-} SColDes;
-
/* Used by main to communicate with parse_opt. */
static char *g_dupstr = NULL;
@@ -247,16 +239,16 @@ typedef struct SArguments_S {
} SArguments;
typedef struct SColumn_S {
- char field[TSDB_COL_NAME_LEN + 1];
- char dataType[MAX_TB_NAME_SIZE];
+ char field[TSDB_COL_NAME_LEN];
+ char dataType[16];
uint32_t dataLen;
char note[128];
} StrColumn;
typedef struct SSuperTable_S {
- char sTblName[MAX_TB_NAME_SIZE+1];
- char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample
- char childTblPrefix[MAX_TB_NAME_SIZE];
+ char sTblName[TSDB_TABLE_NAME_LEN];
+ char dataSource[MAX_TB_NAME_SIZE]; // rand_gen or sample
+ char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq
char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest
uint16_t childTblExists;
int64_t childTblCount;
@@ -277,8 +269,8 @@ typedef struct SSuperTable_S {
int64_t timeStampStep;
char startTimestamp[MAX_TB_NAME_SIZE];
char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json
- char sampleFile[MAX_FILE_NAME_LEN+1];
- char tagsFile[MAX_FILE_NAME_LEN+1];
+ char sampleFile[MAX_FILE_NAME_LEN];
+ char tagsFile[MAX_FILE_NAME_LEN];
uint32_t columnCount;
StrColumn columns[MAX_COLUMN_COUNT];
@@ -305,7 +297,7 @@ typedef struct SSuperTable_S {
} SSuperTable;
typedef struct {
- char name[TSDB_DB_NAME_LEN + 1];
+ char name[TSDB_DB_NAME_LEN];
char create_time[32];
int64_t ntables;
int32_t vgroups;
@@ -341,11 +333,11 @@ typedef struct SDbCfg_S {
int cache;
int blocks;
int quorum;
- char precision[MAX_TB_NAME_SIZE];
+ char precision[8];
} SDbCfg;
typedef struct SDataBase_S {
- char dbName[MAX_DB_NAME_SIZE];
+ char dbName[TSDB_DB_NAME_LEN];
bool drop; // 0: use exists, 1: if exists, drop then new create
SDbCfg dbCfg;
uint64_t superTblCount;
@@ -353,14 +345,14 @@ typedef struct SDataBase_S {
} SDataBase;
typedef struct SDbs_S {
- char cfgDir[MAX_FILE_NAME_LEN+1];
+ char cfgDir[MAX_FILE_NAME_LEN];
char host[MAX_HOSTNAME_SIZE];
struct sockaddr_in serv_addr;
uint16_t port;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
- char resultFile[MAX_FILE_NAME_LEN+1];
+ char resultFile[MAX_FILE_NAME_LEN];
bool use_metric;
bool insert_only;
bool do_aggreFunc;
@@ -387,7 +379,7 @@ typedef struct SpecifiedQueryInfo_S {
bool subscribeRestart;
int subscribeKeepProgress;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
- char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
+ char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume[MAX_QUERY_SQL_COUNT];
int endAfterConsume[MAX_QUERY_SQL_COUNT];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
@@ -398,7 +390,7 @@ typedef struct SpecifiedQueryInfo_S {
} SpecifiedQueryInfo;
typedef struct SuperQueryInfo_S {
- char sTblName[MAX_TB_NAME_SIZE+1];
+ char sTblName[TSDB_TABLE_NAME_LEN];
uint64_t queryInterval; // 0: unlimit > 0 loop/s
uint32_t threadCnt;
uint32_t asyncMode; // 0: sync, 1: async
@@ -407,10 +399,10 @@ typedef struct SuperQueryInfo_S {
int subscribeKeepProgress;
uint64_t queryTimes;
int64_t childTblCount;
- char childTblPrefix[MAX_TB_NAME_SIZE];
+ char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq
int sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
- char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
+ char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume;
int endAfterConsume;
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
@@ -420,13 +412,13 @@ typedef struct SuperQueryInfo_S {
} SuperQueryInfo;
typedef struct SQueryMetaInfo_S {
- char cfgDir[MAX_FILE_NAME_LEN+1];
+ char cfgDir[MAX_FILE_NAME_LEN];
char host[MAX_HOSTNAME_SIZE];
uint16_t port;
struct sockaddr_in serv_addr;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
- char dbName[MAX_DB_NAME_SIZE+1];
+ char dbName[TSDB_DB_NAME_LEN];
char queryMode[MAX_TB_NAME_SIZE]; // taosc, rest
SpecifiedQueryInfo specifiedQueryInfo;
@@ -438,11 +430,11 @@ typedef struct SThreadInfo_S {
TAOS * taos;
TAOS_STMT *stmt;
int threadID;
- char db_name[MAX_DB_NAME_SIZE+1];
+ char db_name[TSDB_DB_NAME_LEN];
uint32_t time_precision;
char filePath[4096];
FILE *fp;
- char tb_prefix[MAX_TB_NAME_SIZE];
+ char tb_prefix[TSDB_TABLE_NAME_LEN];
uint64_t start_table_from;
uint64_t end_table_to;
int64_t ntables;
@@ -608,7 +600,7 @@ SArguments g_args = {
1, // query_times
0, // interlace_rows;
30000, // num_of_RPR
- (1024*1024), // max_sql_len
+ (1024*1024), // max_sql_len
10000, // num_of_tables
10000, // num_of_DPT
0, // abort
@@ -990,9 +982,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
arguments->len_of_binary = atoi(argv[++i]);
} else if (strcmp(argv[i], "-m") == 0) {
if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
+ (isStringNumber(argv[i+1]))) {
printHelp();
- errorPrint("%s", "\n\t-m need a number following!\n");
+ errorPrint("%s", "\n\t-m need a letter-initial string following!\n");
exit(EXIT_FAILURE);
}
arguments->tb_prefix = argv[++i];
@@ -1212,23 +1204,24 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
return ;
}
- int totalLen = 0;
+ int64_t totalLen = 0;
// fetch the records row by row
while((row = taos_fetch_row(res))) {
- if ((strlen(pThreadInfo->filePath) > 0)
- && (totalLen >= 100*1024*1024 - 32000)) {
- appendResultBufToFile(databuf, pThreadInfo);
+ if (totalLen >= 100*1024*1024 - 32000) {
+ if (strlen(pThreadInfo->filePath) > 0)
+ appendResultBufToFile(databuf, pThreadInfo);
totalLen = 0;
memset(databuf, 0, 100*1024*1024);
}
num_rows++;
- char temp[16000] = {0};
+ char temp[16000] = {0};
int len = taos_print_row(temp, row, fields, num_fields);
len += sprintf(temp + len, "\n");
//printf("query result:%s\n", temp);
memcpy(databuf + totalLen, temp, len);
totalLen += len;
+ debugPrint("totalLen: %"PRId64"\n", totalLen);
}
verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n",
@@ -2501,6 +2494,13 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* pTblName = childTblName;
while((row = taos_fetch_row(res)) != NULL) {
int32_t* len = taos_fetch_lengths(res);
+
+ if (0 == strlen((char *)row[0])) {
+ errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n",
+ __func__, __LINE__, count);
+ exit(-1);
+ }
+
tstrncpy(pTblName, (char *)row[0], len[0]+1);
//printf("==== sub table name: %s\n", pTblName);
count++;
@@ -3035,7 +3035,7 @@ static int startMultiThreadCreateChildTable(
for (int64_t i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
pThreadInfo->threadID = i;
- tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE);
+ tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN);
pThreadInfo->superTblInfo = superTblInfo;
verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name);
pThreadInfo->taos = taos_connect(
@@ -3326,7 +3326,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
goto PARSE_OVER;
}
//tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
- tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
+ tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1);
cJSON* dataLen = cJSON_GetObjectItem(column, "len");
if (dataLen && dataLen->type == cJSON_Number) {
@@ -3341,7 +3341,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
for (int n = 0; n < count; ++n) {
tstrncpy(superTbls->columns[index].dataType,
- columnCase.dataType, MAX_TB_NAME_SIZE);
+ columnCase.dataType, strlen(columnCase.dataType) + 1);
superTbls->columns[index].dataLen = columnCase.dataLen;
index++;
}
@@ -3397,7 +3397,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
__func__, __LINE__);
goto PARSE_OVER;
}
- tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
+ tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1);
cJSON* dataLen = cJSON_GetObjectItem(tag, "len");
if (dataLen && dataLen->type == cJSON_Number) {
@@ -3412,7 +3412,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
for (int n = 0; n < count; ++n) {
tstrncpy(superTbls->tags[index].dataType, columnCase.dataType,
- MAX_TB_NAME_SIZE);
+ strlen(columnCase.dataType) + 1);
superTbls->tags[index].dataLen = columnCase.dataLen;
index++;
}
@@ -3635,7 +3635,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
printf("ERROR: failed to read json, db name not found\n");
goto PARSE_OVER;
}
- tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE);
+ tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN);
cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop");
if (drop && drop->type == cJSON_String && drop->valuestring != NULL) {
@@ -3656,10 +3656,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (precision && precision->type == cJSON_String
&& precision->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring,
- MAX_DB_NAME_SIZE);
+ 8);
} else if (!precision) {
- //tstrncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
- memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE);
+ memset(g_Dbs.db[i].dbCfg.precision, 0, 8);
} else {
printf("ERROR: failed to read json, precision not found\n");
goto PARSE_OVER;
@@ -3836,7 +3835,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring,
- MAX_TB_NAME_SIZE);
+ TSDB_TABLE_NAME_LEN);
cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix");
if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) {
@@ -3844,7 +3843,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring,
- MAX_DB_NAME_SIZE);
+ TSDB_TABLE_NAME_LEN - 20);
cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table");
if (autoCreateTbl
@@ -3912,9 +3911,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (dataSource && dataSource->type == cJSON_String
&& dataSource->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource,
- dataSource->valuestring, MAX_DB_NAME_SIZE);
+ dataSource->valuestring, TSDB_DB_NAME_LEN);
} else if (!dataSource) {
- tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE);
+ tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", TSDB_DB_NAME_LEN);
} else {
errorPrint("%s() LN%d, failed to read json, data_source not found\n",
__func__, __LINE__);
@@ -3972,10 +3971,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp");
if (ts && ts->type == cJSON_String && ts->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp,
- ts->valuestring, MAX_DB_NAME_SIZE);
+ ts->valuestring, TSDB_DB_NAME_LEN);
} else if (!ts) {
tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp,
- "now", MAX_DB_NAME_SIZE);
+ "now", TSDB_DB_NAME_LEN);
} else {
printf("ERROR: failed to read json, start_timestamp not found\n");
goto PARSE_OVER;
@@ -3995,9 +3994,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (sampleFormat && sampleFormat->type
== cJSON_String && sampleFormat->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat,
- sampleFormat->valuestring, MAX_DB_NAME_SIZE);
+ sampleFormat->valuestring, TSDB_DB_NAME_LEN);
} else if (!sampleFormat) {
- tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE);
+ tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", TSDB_DB_NAME_LEN);
} else {
printf("ERROR: failed to read json, sample_format not found\n");
goto PARSE_OVER;
@@ -4242,7 +4241,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* dbs = cJSON_GetObjectItem(root, "databases");
if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) {
- tstrncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE);
+ tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN);
} else if (!dbs) {
printf("ERROR: failed to read json, databases not found\n");
goto PARSE_OVER;
@@ -4492,7 +4491,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (stblname && stblname->type == cJSON_String
&& stblname->valuestring != NULL) {
tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring,
- MAX_TB_NAME_SIZE);
+ TSDB_TABLE_NAME_LEN);
} else {
errorPrint("%s() LN%d, failed to read json, super table name input error\n",
__func__, __LINE__);
@@ -5103,7 +5102,7 @@ static int32_t generateStbDataTail(
} else {
retLen = getRowDataFromSample(
data,
- remainderBufLen,
+ remainderBufLen < MAX_DATA_SIZE ? remainderBufLen : MAX_DATA_SIZE,
startTime + superTblInfo->timeStampStep * k,
superTblInfo,
pSamplePos);
@@ -6302,16 +6301,6 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
- // read sample data from file first
- if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource,
- "sample", strlen("sample")))) {
- if (0 != prepareSampleDataForSTable(superTblInfo)) {
- errorPrint("%s() LN%d, prepare sample data for stable failed!\n",
- __func__, __LINE__);
- exit(-1);
- }
- }
-
TAOS* taos0 = taos_connect(
g_Dbs.host, g_Dbs.user,
g_Dbs.password, db_name, g_Dbs.port);
@@ -6417,7 +6406,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
for (int i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
pThreadInfo->threadID = i;
- tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE);
+ tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN);
pThreadInfo->time_precision = timePrec;
pThreadInfo->superTblInfo = superTblInfo;
@@ -6861,7 +6850,7 @@ static void *specifiedTableQuery(void *sarg) {
}
}
- char sqlStr[MAX_DB_NAME_SIZE + 5];
+ char sqlStr[TSDB_DB_NAME_LEN + 5];
sprintf(sqlStr, "use %s", g_queryInfo.dbName);
if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
taos_close(pThreadInfo->taos);
@@ -7337,12 +7326,6 @@ static void *superSubscribe(void *sarg) {
performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st));
if (res) {
- if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
- sprintf(pThreadInfo->filePath, "%s-%d",
- g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
- pThreadInfo->threadID);
- fetchResult(res, pThreadInfo);
- }
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
@@ -7449,10 +7432,10 @@ static void *specifiedSubscribe(void *sarg) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
- fetchResult(
- g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID],
- pThreadInfo);
}
+ fetchResult(
+ g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID],
+ pThreadInfo);
g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++;
if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1)
@@ -7689,9 +7672,9 @@ static void setParaFromArg(){
g_Dbs.dbCount = 1;
g_Dbs.db[0].drop = true;
- tstrncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE);
+ tstrncpy(g_Dbs.db[0].dbName, g_args.database, TSDB_DB_NAME_LEN);
g_Dbs.db[0].dbCfg.replica = g_args.replica;
- tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
+ tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", 8);
tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN);
@@ -7713,7 +7696,7 @@ static void setParaFromArg(){
if (g_args.use_metric) {
g_Dbs.db[0].superTblCount = 1;
- tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE);
+ tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", TSDB_TABLE_NAME_LEN);
g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
g_Dbs.threadCount = g_args.num_of_threads;
g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
@@ -7724,7 +7707,7 @@ static void setParaFromArg(){
g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange;
g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio;
tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix,
- g_args.tb_prefix, MAX_TB_NAME_SIZE);
+ g_args.tb_prefix, TSDB_TABLE_NAME_LEN - 20);
tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].iface = g_args.iface;
tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp,
@@ -7741,7 +7724,7 @@ static void setParaFromArg(){
}
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
- data_type[i], MAX_TB_NAME_SIZE);
+ data_type[i], strlen(data_type[i]) + 1);
g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary;
g_Dbs.db[0].superTbls[0].columnCount++;
}
@@ -7752,18 +7735,18 @@ static void setParaFromArg(){
for (int i = g_Dbs.db[0].superTbls[0].columnCount;
i < g_args.num_of_CPR; i++) {
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
- "INT", MAX_TB_NAME_SIZE);
+ "INT", strlen("INT") + 1);
g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0;
g_Dbs.db[0].superTbls[0].columnCount++;
}
}
tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType,
- "INT", MAX_TB_NAME_SIZE);
+ "INT", strlen("INT") + 1);
g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0;
tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType,
- "BINARY", MAX_TB_NAME_SIZE);
+ "BINARY", strlen("BINARY") + 1);
g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary;
g_Dbs.db[0].superTbls[0].tagCount = 2;
} else {
@@ -7899,11 +7882,11 @@ static void queryResult() {
pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
tstrncpy(pThreadInfo->tb_prefix,
- g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE);
+ g_Dbs.db[0].superTbls[0].childTblPrefix, TSDB_TABLE_NAME_LEN - 20);
} else {
pThreadInfo->ntables = g_args.num_of_tables;
pThreadInfo->end_table_to = g_args.num_of_tables -1;
- tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
+ tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN);
}
pThreadInfo->taos = taos_connect(
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index 165bbdf990babde49f3dd1b610360b0b13ac9dfe..33118ce3113eb401dcd9ba143e99ef359b07f935 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -29,6 +29,9 @@
#define COMMAND_SIZE 65536
//#define DEFAULT_DUMP_FILE "taosdump.sql"
+// for strncpy buffer overflow
+#define min(a, b) (((a) < (b)) ? (a) : (b))
+
int converStringToReadable(char *str, int size, char *buf, int bufsize);
int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
void taosDumpCharset(FILE *fp);
@@ -1119,12 +1122,11 @@ int taosGetTableDes(
TAOS_FIELD *fields = taos_fetch_fields(res);
tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
-
while ((row = taos_fetch_row(res)) != NULL) {
strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
+ min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
@@ -1575,7 +1577,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
- fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
+ min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes));
taosWrite(fd, &tableRecord, sizeof(STableRecord));
diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c
index a087b076a59e8b7f849d6d97b7f8e4e6283f7756..e3feea7d3af4b78cbae5038ecea2e3825ddb370d 100644
--- a/src/mnode/src/mnodeDnode.c
+++ b/src/mnode/src/mnodeDnode.c
@@ -101,6 +101,8 @@ static int32_t mnodeDnodeActionInsert(SSdbRow *pRow) {
pDnode->offlineReason = TAOS_DN_OFF_STATUS_NOT_RECEIVED;
}
+ pDnode->customScore = 0;
+
dnodeUpdateEp(pDnode->dnodeId, pDnode->dnodeEp, pDnode->dnodeFqdn, &pDnode->dnodePort);
mnodeUpdateDnodeEps();
@@ -1296,4 +1298,4 @@ int32_t mnodeCompactDnodes() {
mInfo("end to compact dnodes table...");
return 0;
-}
\ No newline at end of file
+}
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index 032c6ee94b5abe422c017d56d0bd3a44d4a1c8cf..20edb02c381b92ac3e04be546f63e41e5d21830e 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -1068,7 +1068,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
pStable->info.tableId = strdup(pCreate->tableName);
pStable->info.type = TSDB_SUPER_TABLE;
pStable->createdTime = taosGetTimestampMs();
- pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
+ int64_t x = (us&0x000000FFFFFFFFFF);
+ x = x<<24;
+ pStable->uid = x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
pStable->sversion = 0;
pStable->tversion = 0;
pStable->numOfColumns = numOfColumns;
@@ -1740,16 +1742,22 @@ static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) {
return TSDB_CODE_SUCCESS;
}
-static int32_t calculateVgroupMsgLength(SSTableVgroupMsg* pInfo, int32_t numOfTable) {
+static int32_t doGetVgroupInfoLength(char* name) {
+ SSTableObj *pTable = mnodeGetSuperTable(name);
+ int32_t len = 0;
+ if (pTable != NULL && pTable->vgHash != NULL) {
+ len = (taosHashGetSize(pTable->vgHash) * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg));
+ }
+
+ mnodeDecTableRef(pTable);
+ return len;
+}
+
+static int32_t getVgroupInfoLength(SSTableVgroupMsg* pInfo, int32_t numOfTable) {
int32_t contLen = sizeof(SSTableVgroupRspMsg) + 32 * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg);
for (int32_t i = 0; i < numOfTable; ++i) {
char *stableName = (char *)pInfo + sizeof(SSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN)*i;
- SSTableObj *pTable = mnodeGetSuperTable(stableName);
- if (pTable != NULL && pTable->vgHash != NULL) {
- contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg));
- }
-
- mnodeDecTableRef(pTable);
+ contLen += doGetVgroupInfoLength(stableName);
}
return contLen;
@@ -1820,7 +1828,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
int32_t numOfTable = htonl(pInfo->numOfTables);
// calculate the required space.
- int32_t contLen = calculateVgroupMsgLength(pInfo, numOfTable);
+ int32_t contLen = getVgroupInfoLength(pInfo, numOfTable);
SSTableVgroupRspMsg *pRsp = rpcMallocCont(contLen);
if (pRsp == NULL) {
return TSDB_CODE_MND_OUT_OF_MEMORY;
@@ -2860,6 +2868,27 @@ static void mnodeProcessAlterTableRsp(SRpcMsg *rpcMsg) {
}
}
+static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray* pList, int32_t* totalMallocLen, int32_t numOfVgroupList) {
+ int32_t len = 0;
+ for (int32_t i = 0; i < numOfVgroupList; ++i) {
+ char *name = taosArrayGetP(pList, i);
+ len += doGetVgroupInfoLength(name);
+ }
+
+ if (len + pMultiMeta->contLen > (*totalMallocLen)) {
+ while (len + pMultiMeta->contLen > (*totalMallocLen)) {
+ (*totalMallocLen) *= 2;
+ }
+
+ pMultiMeta = rpcReallocCont(pMultiMeta, *totalMallocLen);
+ if (pMultiMeta == NULL) {
+ return NULL;
+ }
+ }
+
+ return pMultiMeta;
+}
+
static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
SMultiTableInfoMsg *pInfo = pMsg->rpcMsg.pCont;
@@ -2950,8 +2979,6 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
}
}
- char* msg = (char*) pMultiMeta + pMultiMeta->contLen;
-
// add the additional super table names that needs the vgroup info
for(;t < num; ++t) {
taosArrayPush(pList, &nameList[t]);
@@ -2961,6 +2988,13 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
int32_t numOfVgroupList = (int32_t) taosArrayGetSize(pList);
pMultiMeta->numOfVgroup = htonl(numOfVgroupList);
+ pMultiMeta = ensureMsgBufferSpace(pMultiMeta, pList, &totalMallocLen, numOfVgroupList);
+ if (pMultiMeta == NULL) {
+ code = TSDB_CODE_MND_OUT_OF_MEMORY;
+ goto _end;
+ }
+
+ char* msg = (char*) pMultiMeta + pMultiMeta->contLen;
for(int32_t i = 0; i < numOfVgroupList; ++i) {
char* name = taosArrayGetP(pList, i);
diff --git a/src/os/inc/os.h b/src/os/inc/os.h
index 6731ca6d7db9ce72e72a88a1b9dadf76fb8ec87e..903e80d5c7f554d420eafc9224fe5e7e35fe8467 100644
--- a/src/os/inc/os.h
+++ b/src/os/inc/os.h
@@ -29,7 +29,7 @@ extern "C" {
#include "osMath.h"
#include "osMemory.h"
#include "osRand.h"
-#include "osSemphone.h"
+#include "osSemaphore.h"
#include "osSignal.h"
#include "osSleep.h"
#include "osSocket.h"
diff --git a/src/os/inc/osSemphone.h b/src/os/inc/osSemaphore.h
similarity index 97%
rename from src/os/inc/osSemphone.h
rename to src/os/inc/osSemaphore.h
index fe59095205010bef553413809706c62cd772a7e3..10d14700e013f66e6d98208f0e65fe1ca5fc3874 100644
--- a/src/os/inc/osSemphone.h
+++ b/src/os/inc/osSemaphore.h
@@ -13,8 +13,8 @@
* along with this program. If not, see .
*/
-#ifndef TDENGINE_OS_SEMPHONE_H
-#define TDENGINE_OS_SEMPHONE_H
+#ifndef TDENGINE_OS_SEMAPHORE_H
+#define TDENGINE_OS_SEMAPHORE_H
#ifdef __cplusplus
extern "C" {
diff --git a/src/os/inc/osSysinfo.h b/src/os/inc/osSysinfo.h
index d136f9664c9d5c00dc68382eaf3ebc6e97013cd0..5f0bc2950c1a7b0e9d2caf57c19f1913c7035980 100644
--- a/src/os/inc/osSysinfo.h
+++ b/src/os/inc/osSysinfo.h
@@ -27,18 +27,20 @@ typedef struct {
} SysDiskSize;
int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize);
-void taosGetSystemInfo();
-bool taosGetProcIO(float *readKB, float *writeKB);
-bool taosGetBandSpeed(float *bandSpeedKb);
-void taosGetDisk();
-bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage);
-bool taosGetProcMemory(float *memoryUsedMB);
-bool taosGetSysMemory(float *memoryUsedMB);
-void taosPrintOsInfo();
-int taosSystem(const char *cmd);
-void taosKillSystem();
-bool taosGetSystemUid(char *uid);
-char * taosGetCmdlineByPID(int pid);
+
+void taosGetSystemInfo();
+bool taosGetProcIO(float *readKB, float *writeKB);
+bool taosGetBandSpeed(float *bandSpeedKb);
+void taosGetDisk();
+bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) ;
+bool taosGetProcMemory(float *memoryUsedMB) ;
+bool taosGetSysMemory(float *memoryUsedMB);
+void taosPrintOsInfo();
+void taosPrintDiskInfo();
+int taosSystem(const char * cmd) ;
+void taosKillSystem();
+bool taosGetSystemUid(char *uid);
+char *taosGetCmdlineByPID(int pid);
void taosSetCoreDump();
diff --git a/src/os/src/darwin/dwSemphone.c b/src/os/src/darwin/dwSemaphore.c
similarity index 100%
rename from src/os/src/darwin/dwSemphone.c
rename to src/os/src/darwin/dwSemaphore.c
diff --git a/src/os/src/darwin/dwSysInfo.c b/src/os/src/darwin/dwSysInfo.c
index b3c9bd528e9459a8d5798a2ff6ca4a1664503a90..10e0acc1309d80154a42b6cc948a8afb7a04b668 100644
--- a/src/os/src/darwin/dwSysInfo.c
+++ b/src/os/src/darwin/dwSysInfo.c
@@ -136,9 +136,6 @@ void taosPrintOsInfo() {
// uInfo(" os openMax: %" PRId64, tsOpenMax);
// uInfo(" os streamMax: %" PRId64, tsStreamMax);
uInfo(" os numOfCores: %d", tsNumOfCores);
- uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
- uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
- uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
struct utsname buf;
@@ -154,6 +151,14 @@ void taosPrintOsInfo() {
uInfo("==================================");
}
+void taosPrintDiskInfo() {
+ uInfo("==================================");
+ uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
+ uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
+ uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
+ uInfo("==================================");
+}
+
void taosKillSystem() {
uError("function taosKillSystem, exit!");
exit(0);
diff --git a/src/os/src/detail/osSemphone.c b/src/os/src/detail/osSemaphore.c
similarity index 100%
rename from src/os/src/detail/osSemphone.c
rename to src/os/src/detail/osSemaphore.c
diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c
index d0b284e1cab6717ab7b589557e4545ea744efa33..891dccaf9779016065013d4f59580026fb98352a 100644
--- a/src/os/src/detail/osSysinfo.c
+++ b/src/os/src/detail/osSysinfo.c
@@ -506,9 +506,6 @@ void taosPrintOsInfo() {
uInfo(" os openMax: %" PRId64, tsOpenMax);
uInfo(" os streamMax: %" PRId64, tsStreamMax);
uInfo(" os numOfCores: %d", tsNumOfCores);
- uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
- uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
- uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
struct utsname buf;
@@ -523,6 +520,14 @@ void taosPrintOsInfo() {
uInfo(" os machine: %s", buf.machine);
}
+void taosPrintDiskInfo() {
+ uInfo("==================================");
+ uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
+ uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
+ uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
+ uInfo("==================================");
+}
+
void taosKillSystem() {
// SIGINT
uInfo("taosd will shut down soon");
diff --git a/src/os/src/windows/wSemphone.c b/src/os/src/windows/wSemaphore.c
similarity index 100%
rename from src/os/src/windows/wSemphone.c
rename to src/os/src/windows/wSemaphore.c
diff --git a/src/os/src/windows/wSysinfo.c b/src/os/src/windows/wSysinfo.c
index 8a81e3079a17e013372ebd7de0facb6b49a99c7b..72793a1049506fed0fce2d1a04c576097fec9fba 100644
--- a/src/os/src/windows/wSysinfo.c
+++ b/src/os/src/windows/wSysinfo.c
@@ -205,10 +205,15 @@ void taosGetSystemInfo() {
void taosPrintOsInfo() {
uInfo(" os numOfCores: %d", tsNumOfCores);
+ uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
+ uInfo("==================================");
+}
+
+void taosPrintDiskInfo() {
+ uInfo("==================================");
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
- uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
uInfo("==================================");
}
diff --git a/src/plugins/http/src/httpGcJson.c b/src/plugins/http/src/httpGcJson.c
index 397791706d0fc24f250c2332dddc5b0b031a4817..f33a994465a94bad5d79df8af73ff4fd9d640516 100644
--- a/src/plugins/http/src/httpGcJson.c
+++ b/src/plugins/http/src/httpGcJson.c
@@ -228,13 +228,11 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
case TSDB_DATA_TYPE_NCHAR:
httpJsonStringForTransMean(jsonBuf, (char *)row[i], fields[i].bytes);
break;
- case TSDB_DATA_TYPE_TIMESTAMP:
- if (precision == TSDB_TIME_PRECISION_MILLI) { // ms
- httpJsonInt64(jsonBuf, *((int64_t *)row[i]));
- } else {
- httpJsonInt64(jsonBuf, *((int64_t *)row[i]) / 1000);
- }
+ case TSDB_DATA_TYPE_TIMESTAMP: {
+ int64_t ts = convertTimePrecision(*((int64_t *)row[i]), precision, TSDB_TIME_PRECISION_MILLI);
+ httpJsonInt64(jsonBuf, ts);
break;
+ }
default:
httpJsonString(jsonBuf, "-", 1);
break;
diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h
index 57e7d2982f73432b9559c4ec231cf72e9680363e..044c538f4708b606eb77b63ac68921bcb3877ba7 100644
--- a/src/query/inc/qAggMain.h
+++ b/src/query/inc/qAggMain.h
@@ -204,7 +204,7 @@ typedef struct SAggFunctionInfo {
bool (*init)(SQLFunctionCtx *pCtx); // setup the execute environment
void (*xFunction)(SQLFunctionCtx *pCtx); // blocks version function
- void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version, todo merge with blockwise function
+// void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version, todo merge with blockwise function
// finalizer must be called after all xFunction has been executed to generated final result.
void (*xFinalize)(SQLFunctionCtx *pCtx);
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index 955dd734cf58582ecfe1bbf9871525be0ebc161c..8279c58b24796c734b39e97e9a8e953e0248332f 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -133,6 +133,28 @@ typedef struct STableQueryInfo {
SResultRowInfo resInfo;
} STableQueryInfo;
+typedef enum {
+ QUERY_PROF_BEFORE_OPERATOR_EXEC = 0,
+ QUERY_PROF_AFTER_OPERATOR_EXEC,
+ QUERY_PROF_QUERY_ABORT
+} EQueryProfEventType;
+
+typedef struct {
+ EQueryProfEventType eventType;
+ int64_t eventTime;
+
+ union {
+ uint8_t operatorType; //for operator event
+ int32_t abortCode; //for query abort event
+ };
+} SQueryProfEvent;
+
+typedef struct {
+ uint8_t operatorType;
+ int64_t sumSelfTime;
+ int64_t sumRunTimes;
+} SOperatorProfResult;
+
typedef struct SQueryCostInfo {
uint64_t loadStatisTime;
uint64_t loadFileBlockTime;
@@ -154,6 +176,9 @@ typedef struct SQueryCostInfo {
uint64_t tableInfoSize;
uint64_t hashSize;
uint64_t numOfTimeWindows;
+
+ SArray* queryProfEvents; //SArray
+ SHashObj* operatorProfResults; //map
} SQueryCostInfo;
typedef struct {
@@ -192,6 +217,7 @@ typedef struct SQueryAttr {
bool needReverseScan; // need reverse scan
bool distinctTag; // distinct tag query
bool stateWindow; // window State on sub/normal table
+ bool createFilterOperator; // if filter operator is needed
int32_t interBufSize; // intermediate buffer sizse
int32_t havingNum; // having expr number
@@ -285,7 +311,7 @@ enum OPERATOR_TYPE_E {
OP_TagScan = 4,
OP_TableBlockInfoScan= 5,
OP_Aggregate = 6,
- OP_Arithmetic = 7,
+ OP_Project = 7,
OP_Groupby = 8,
OP_Limit = 9,
OP_SLimit = 10,
@@ -295,7 +321,7 @@ enum OPERATOR_TYPE_E {
OP_MultiTableAggregate = 14,
OP_MultiTableTimeInterval = 15,
OP_DummyInput = 16, //TODO remove it after fully refactor.
- OP_MultiwayMergeSort = 17, // multi-way data merge into one input stream.
+ OP_MultiwayMergeSort = 17, // multi-way data merge into one input stream.
OP_GlobalAggregate = 18, // global merge for the multi-way data sources.
OP_Filter = 19,
OP_Distinct = 20,
@@ -413,13 +439,13 @@ typedef struct SAggOperatorInfo {
uint32_t seed;
} SAggOperatorInfo;
-typedef struct SArithOperatorInfo {
+typedef struct SProjectOperatorInfo {
SOptrBasicInfo binfo;
int32_t bufCapacity;
uint32_t seed;
SSDataBlock *existDataBlock;
-} SArithOperatorInfo;
+} SProjectOperatorInfo;
typedef struct SLimitOperatorInfo {
int64_t limit;
@@ -513,7 +539,7 @@ SOperatorInfo* createTableScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv*
SOperatorInfo* createTableSeqScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
-SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
+SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream);
SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
@@ -586,7 +612,12 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data);
size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows);
void setQueryKilled(SQInfo *pQInfo);
+
+void publishOperatorProfEvent(SOperatorInfo* operatorInfo, EQueryProfEventType eventType);
+void publishQueryAbortEvent(SQInfo* pQInfo, int32_t code);
+void calculateOperatorProfResults(SQInfo* pQInfo);
void queryCostStatis(SQInfo *pQInfo);
+
void freeQInfo(SQInfo *pQInfo);
void freeQueryAttr(SQueryAttr *pQuery);
diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y
index a54d46974ae94c56cf5d967be1daee1ab11b7190..ce2c3e361654ebe67310e6b71edc4ea6506d64ed 100644
--- a/src/query/inc/sql.y
+++ b/src/query/inc/sql.y
@@ -470,7 +470,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
//////////////////////// The SELECT statement /////////////////////////////////
%type select {SSqlNode*}
%destructor select {destroySqlNode($$);}
-select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) windowstate_option(D) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
+select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) windowstate_option(D) fill_opt(F) sliding_opt(S) groupby_opt(P) having_opt(N) orderby_opt(Z) slimit_opt(G) limit_opt(L). {
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &D, &S, F, &L, &G, N);
}
diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c
index bc14c75af561706a214fb950d2ae8567ef2c442c..676e5b6ce63648b2f8182baed756cf0f90039a44 100644
--- a/src/query/src/qAggMain.c
+++ b/src/query/src/qAggMain.c
@@ -74,7 +74,6 @@
} while (0);
void noop1(SQLFunctionCtx *UNUSED_PARAM(pCtx)) {}
-void noop2(SQLFunctionCtx *UNUSED_PARAM(pCtx), int32_t UNUSED_PARAM(index)) {}
void doFinalizer(SQLFunctionCtx *pCtx) { RESET_RESULT_INFO(GET_RES_INFO(pCtx)); }
@@ -456,20 +455,6 @@ static void count_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, numOfElem, 1);
}
-static void count_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
- *((int64_t *)pCtx->pOutput) += pCtx->size;
-
- // do not need it actually
- SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx);
- pInfo->hasResult = DATA_SET_FLAG;
-}
-
static void count_func_merge(SQLFunctionCtx *pCtx) {
int64_t *pData = (int64_t *)GET_INPUT_DATA_LIST(pCtx);
for (int32_t i = 0; i < pCtx->size; ++i) {
@@ -609,46 +594,6 @@ static void do_sum(SQLFunctionCtx *pCtx) {
}
}
-static void do_sum_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
- int64_t *res = (int64_t*) pCtx->pOutput;
-
- if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) {
- *res += GET_INT8_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) {
- *res += GET_INT16_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) {
- *res += GET_INT32_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) {
- *res += GET_INT64_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) {
- uint64_t *r = (uint64_t *)pCtx->pOutput;
- *r += GET_UINT8_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) {
- uint64_t *r = (uint64_t *)pCtx->pOutput;
- *r += GET_UINT16_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) {
- uint64_t *r = (uint64_t *)pCtx->pOutput;
- *r += GET_UINT32_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) {
- uint64_t *r = (uint64_t *)pCtx->pOutput;
- *r += GET_UINT64_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
- double *retVal = (double*) pCtx->pOutput;
- *retVal += GET_DOUBLE_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
- double *retVal = (double*) pCtx->pOutput;
- *retVal += GET_FLOAT_VAL(pData);
- }
-
- GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG;
-}
-
static void sum_function(SQLFunctionCtx *pCtx) {
do_sum(pCtx);
@@ -661,17 +606,6 @@ static void sum_function(SQLFunctionCtx *pCtx) {
}
}
-static void sum_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- do_sum_f(pCtx, index);
-
- // keep the result data in output buffer, not in the intermediate buffer
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) {
- SSumInfo *pSum = (SSumInfo *)pCtx->pOutput;
- pSum->hasResult = DATA_SET_FLAG;
- }
-}
-
static void sum_func_merge(SQLFunctionCtx *pCtx) {
int32_t notNullElems = 0;
@@ -847,53 +781,6 @@ static void avg_function(SQLFunctionCtx *pCtx) {
}
}
-static void avg_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
-
- // NOTE: keep the intermediate result into the interResultBuf
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
-
- SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo);
-
- if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) {
- pAvgInfo->sum += GET_INT8_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) {
- pAvgInfo->sum += GET_INT16_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) {
- pAvgInfo->sum += GET_INT32_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) {
- pAvgInfo->sum += GET_INT64_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
- pAvgInfo->sum += GET_DOUBLE_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
- pAvgInfo->sum += GET_FLOAT_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) {
- pAvgInfo->sum += GET_UINT8_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) {
- pAvgInfo->sum += GET_UINT16_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) {
- pAvgInfo->sum += GET_UINT32_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) {
- pAvgInfo->sum += GET_UINT64_VAL(pData);
- }
-
- // restore sum and count of elements
- pAvgInfo->num += 1;
-
- // set has result flag
- pResInfo->hasResult = DATA_SET_FLAG;
-
- // keep the data into the final output buffer for super table query since this execution may be the last one
- if (pCtx->stableQuery) {
- memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo));
- }
-}
-
static void avg_func_merge(SQLFunctionCtx *pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
@@ -1307,78 +1194,6 @@ static void max_func_merge(SQLFunctionCtx *pCtx) {
}
}
-static void minMax_function_f(SQLFunctionCtx *pCtx, int32_t index, int32_t isMin) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- TSKEY key = GET_TS_DATA(pCtx, index);
-
- int32_t num = 0;
- if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) {
- int8_t *output = (int8_t *)pCtx->pOutput;
- int8_t i = GET_INT8_VAL(pData);
-
- UPDATE_DATA(pCtx, *output, i, num, isMin, key);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) {
- int16_t *output = (int16_t*) pCtx->pOutput;
- int16_t i = GET_INT16_VAL(pData);
-
- UPDATE_DATA(pCtx, *output, i, num, isMin, key);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) {
- int32_t *output = (int32_t*) pCtx->pOutput;
- int32_t i = GET_INT32_VAL(pData);
-
- UPDATE_DATA(pCtx, *output, i, num, isMin, key);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) {
- int64_t *output = (int64_t*) pCtx->pOutput;
- int64_t i = GET_INT64_VAL(pData);
-
- UPDATE_DATA(pCtx, *output, i, num, isMin, key);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
- float *output = (float*) pCtx->pOutput;
- float i = GET_FLOAT_VAL(pData);
-
- UPDATE_DATA(pCtx, *output, i, num, isMin, key);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
- double *output = (double*) pCtx->pOutput;
- double i = GET_DOUBLE_VAL(pData);
-
- UPDATE_DATA(pCtx, *output, i, num, isMin, key);
- }
-
- GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG;
-}
-
-static void max_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
- minMax_function_f(pCtx, index, 0);
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) {
- char *flag = pCtx->pOutput + pCtx->inputBytes;
- *flag = DATA_SET_FLAG;
- }
-}
-
-static void min_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
- minMax_function_f(pCtx, index, 1);
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) {
- char *flag = pCtx->pOutput + pCtx->inputBytes;
- *flag = DATA_SET_FLAG;
- }
-}
-
#define LOOP_STDDEV_IMPL(type, r, d, ctx, delta, _type, num) \
for (int32_t i = 0; i < (ctx)->size; ++i) { \
if ((ctx)->hasNull && isNull((char *)&((type *)d)[i], (_type))) { \
@@ -1472,114 +1287,6 @@ static void stddev_function(SQLFunctionCtx *pCtx) {
}
}
-static void stddev_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- // the second stage to calculate standard deviation
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SStddevInfo *pStd = GET_ROWCELL_INTERBUF(pResInfo);
-
- if (pCtx->currentStage == REPEAT_SCAN && pStd->stage == 0) {
- pStd->stage++;
- avg_finalizer(pCtx);
-
- pResInfo->initialized = true; // set it initialized to avoid re-initialization
-
- // save average value into tmpBuf, for second stage scan
- SAvgInfo *pAvg = GET_ROWCELL_INTERBUF(pResInfo);
-
- pStd->avg = GET_DOUBLE_VAL(pCtx->pOutput);
- assert((isnan(pAvg->sum) && pAvg->num == 0) || (pStd->num == pAvg->num && pStd->avg == pAvg->sum));
- }
-
- /* the first stage is to calculate average value */
- if (pStd->stage == 0) {
- avg_function_f(pCtx, index);
- } else if (pStd->num > 0) {
- double avg = pStd->avg;
- void * pData = GET_INPUT_DATA(pCtx, index);
-
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- switch (pCtx->inputType) {
- case TSDB_DATA_TYPE_INT: {
- pStd->res += POW2(GET_INT32_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- pStd->res += POW2(GET_FLOAT_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- pStd->res += POW2(GET_DOUBLE_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- pStd->res += POW2(GET_INT64_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- pStd->res += POW2(GET_INT16_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_TINYINT: {
- pStd->res += POW2(GET_INT8_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- pStd->res += POW2(GET_UINT32_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- pStd->res += POW2(GET_UINT64_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- pStd->res += POW2(GET_UINT16_VAL(pData) - avg);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- pStd->res += POW2(GET_UINT8_VAL(pData) - avg);
- break;
- }
- default:
- qError("stddev function not support data type:%d", pCtx->inputType);
- }
-
- SET_VAL(pCtx, 1, 1);
- }
-}
-
-static UNUSED_FUNC void stddev_next_step(SQLFunctionCtx *pCtx) {
- /*
- * the stddevInfo and the average info struct share the same buffer area
- * And the position of each element in their struct is exactly the same matched
- */
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SStddevInfo *pStd = GET_ROWCELL_INTERBUF(pResInfo);
-
- if (pStd->stage == 0) {
- /*
- * stddev is calculated in two stage:
- * 1. get the average value of all data;
- * 2. get final result, based on the average values;
- * so, if this routine is in second stage, no further step is required
- */
- pStd->stage++;
- avg_finalizer(pCtx);
-
- pResInfo->initialized = true; // set it initialized to avoid re-initialization
-
- // save average value into tmpBuf, for second stage scan
- SAvgInfo *pAvg = GET_ROWCELL_INTERBUF(pResInfo);
-
- pStd->avg = GET_DOUBLE_VAL(pCtx->pOutput);
- assert((isnan(pAvg->sum) && pAvg->num == 0) || (pStd->num == pAvg->num && pStd->avg == pAvg->sum));
- } else {
- pResInfo->complete = true;
- }
-}
-
static void stddev_finalizer(SQLFunctionCtx *pCtx) {
SStddevInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
@@ -1696,97 +1403,6 @@ static void stddev_dst_function(SQLFunctionCtx *pCtx) {
memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)), sizeof(SAvgInfo));
}
-static void stddev_dst_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- // the second stage to calculate standard deviation
- SStddevdstInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
- double *retVal = &pStd->res;
-
- // all data are null, no need to proceed
- SArray* resList = (SArray*) pCtx->param[0].pz;
- if (resList == NULL) {
- return;
- }
-
- // find the correct group average results according to the tag value
- int32_t len = (int32_t) taosArrayGetSize(resList);
- assert(len > 0);
-
- double avg = 0;
- if (len == 1) {
- SResPair* p = taosArrayGet(resList, 0);
- avg = p->avg;
- } else { // todo opt performance by using iterator since the timestamp lsit is matched with the output result
- SResPair* p = bsearch(&pCtx->startTs, resList->pData, len, sizeof(SResPair), tsCompare);
- assert(p != NULL);
-
- avg = p->avg;
- }
-
- int32_t num = 0;
- switch (pCtx->inputType) {
- case TSDB_DATA_TYPE_INT: {
- for (int32_t i = 0; i < pCtx->size; ++i) {
- if (pCtx->hasNull && isNull((const char*) (&((int32_t *)pData)[i]), pCtx->inputType)) {
- continue;
- }
- num += 1;
- *retVal += POW2(((int32_t *)pData)[i] - avg);
- }
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- LOOP_STDDEV_IMPL(float, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- LOOP_STDDEV_IMPL(double, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_TINYINT: {
- LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- LOOP_STDDEV_IMPL(int16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- LOOP_STDDEV_IMPL(uint16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- LOOP_STDDEV_IMPL(uint32_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- LOOP_STDDEV_IMPL(int64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- LOOP_STDDEV_IMPL(uint64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num);
- break;
- }
- default:
- qError("stddev function not support data type:%d", pCtx->inputType);
- }
-
- pStd->num += num;
- SET_VAL(pCtx, num, 1);
-
- // copy to the final output buffer for super table
- memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)), sizeof(SAvgInfo));
-}
-
-
static void stddev_dst_merge(SQLFunctionCtx *pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
SStddevdstInfo* pRes = GET_ROWCELL_INTERBUF(pResInfo);
@@ -1833,7 +1449,7 @@ static bool first_last_function_setup(SQLFunctionCtx *pCtx) {
// todo opt for null block
static void first_function(SQLFunctionCtx *pCtx) {
- if (pCtx->order == TSDB_ORDER_DESC /*|| pCtx->preAggVals.dataBlockLoaded == false*/) {
+ if (pCtx->order == TSDB_ORDER_DESC) {
return;
}
@@ -1862,27 +1478,6 @@ static void first_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, notNullElems, 1);
}
-static void first_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- if (pCtx->order == TSDB_ORDER_DESC) {
- return;
- }
-
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
- memcpy(pCtx->pOutput, pData, pCtx->inputBytes);
-
- TSKEY ts = GET_TS_DATA(pCtx, index);
- DO_UPDATE_TAG_COLUMNS(pCtx, ts);
-
- SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx);
- pInfo->hasResult = DATA_SET_FLAG;
- pInfo->complete = true; // get the first not-null data, completed
-}
-
static void first_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) {
int64_t *timestamp = GET_TS_LIST(pCtx);
@@ -1932,21 +1527,6 @@ static void first_dist_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, notNullElems, 1);
}
-static void first_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- if (pCtx->order == TSDB_ORDER_DESC) {
- return;
- }
-
- first_data_assign_impl(pCtx, pData, index);
-
- SET_VAL(pCtx, 1, 1);
-}
-
static void first_dist_func_merge(SQLFunctionCtx *pCtx) {
assert(pCtx->stableQuery);
@@ -1978,70 +1558,55 @@ static void first_dist_func_merge(SQLFunctionCtx *pCtx) {
* least one data in this block that is not null.(TODO opt for this case)
*/
static void last_function(SQLFunctionCtx *pCtx) {
- if (pCtx->order != pCtx->param[0].i64/* || pCtx->preAggVals.dataBlockLoaded == false*/) {
+ if (pCtx->order != pCtx->param[0].i64) {
return;
}
-
+
+ SResultRowCellInfo* pResInfo = GET_RES_INFO(pCtx);
+
int32_t notNullElems = 0;
-
- for (int32_t i = pCtx->size - 1; i >= 0; --i) {
- char *data = GET_INPUT_DATA(pCtx, i);
- if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
- if (!pCtx->requireNull) {
- continue;
- }
- }
+ if (pCtx->order == TSDB_ORDER_DESC) {
- memcpy(pCtx->pOutput, data, pCtx->inputBytes);
-
- TSKEY ts = GET_TS_DATA(pCtx, i);
- DO_UPDATE_TAG_COLUMNS(pCtx, ts);
-
- SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx);
- pInfo->hasResult = DATA_SET_FLAG;
-
- pInfo->complete = true; // set query completed on this column
- notNullElems++;
- break;
- }
-
- SET_VAL(pCtx, notNullElems, 1);
-}
+ for (int32_t i = pCtx->size - 1; i >= 0; --i) {
+ char *data = GET_INPUT_DATA(pCtx, i);
+ if (pCtx->hasNull && isNull(data, pCtx->inputType) && (!pCtx->requireNull)) {
+ continue;
+ }
-static void last_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
+ memcpy(pCtx->pOutput, data, pCtx->inputBytes);
- // the scan order is not the required order, ignore it
- if (pCtx->order != pCtx->param[0].i64) {
- return;
- }
+ TSKEY ts = GET_TS_DATA(pCtx, i);
+ DO_UPDATE_TAG_COLUMNS(pCtx, ts);
- if (pCtx->order == TSDB_ORDER_DESC) {
- SET_VAL(pCtx, 1, 1);
- memcpy(pCtx->pOutput, pData, pCtx->inputBytes);
+ pResInfo->hasResult = DATA_SET_FLAG;
+ pResInfo->complete = true; // set query completed on this column
+ notNullElems++;
+ break;
+ }
+ } else { // ascending order
+ for (int32_t i = pCtx->size - 1; i >= 0; --i) {
+ char *data = GET_INPUT_DATA(pCtx, i);
+ if (pCtx->hasNull && isNull(data, pCtx->inputType) && (!pCtx->requireNull)) {
+ continue;
+ }
- TSKEY ts = GET_TS_DATA(pCtx, index);
- DO_UPDATE_TAG_COLUMNS(pCtx, ts);
+ TSKEY ts = GET_TS_DATA(pCtx, i);
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- pResInfo->hasResult = DATA_SET_FLAG;
- pResInfo->complete = true; // set query completed
- } else { // in case of ascending order check, all data needs to be checked
- SResultRowCellInfo* pResInfo = GET_RES_INFO(pCtx);
- TSKEY ts = GET_TS_DATA(pCtx, index);
+ char* buf = GET_ROWCELL_INTERBUF(pResInfo);
+ if (pResInfo->hasResult != DATA_SET_FLAG || (*(TSKEY*)buf) < ts) {
+ pResInfo->hasResult = DATA_SET_FLAG;
+ memcpy(pCtx->pOutput, data, pCtx->inputBytes);
- char* buf = GET_ROWCELL_INTERBUF(pResInfo);
- if (pResInfo->hasResult != DATA_SET_FLAG || (*(TSKEY*)buf) < ts) {
- pResInfo->hasResult = DATA_SET_FLAG;
- memcpy(pCtx->pOutput, pData, pCtx->inputBytes);
+ *(TSKEY*)buf = ts;
+ DO_UPDATE_TAG_COLUMNS(pCtx, ts);
+ }
- *(TSKEY*)buf = ts;
- DO_UPDATE_TAG_COLUMNS(pCtx, ts);
+ notNullElems++;
+ break;
}
}
+
+ SET_VAL(pCtx, notNullElems, 1);
}
static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) {
@@ -2092,29 +1657,6 @@ static void last_dist_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, notNullElems, 1);
}
-static void last_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- if (pCtx->size == 0) {
- return;
- }
-
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- /*
- * 1. for scan data in asc order, no need to check data
- * 2. for data blocks that are not loaded, no need to check data
- */
- if (pCtx->order != pCtx->param[0].i64) {
- return;
- }
-
- last_data_assign_impl(pCtx, pData, index);
-
- SET_VAL(pCtx, 1, 1);
-}
-
/*
* in the secondary merge(local reduce), the output is limited by the
* final output size, so the main difference between last_dist_func_merge and second_merge
@@ -2616,28 +2158,6 @@ static void top_function(SQLFunctionCtx *pCtx) {
}
}
-static void top_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- STopBotInfo *pRes = getTopBotOutputInfo(pCtx);
- assert(pRes->num >= 0);
-
- if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) {
- buildTopBotStruct(pRes, pCtx);
- }
-
- SET_VAL(pCtx, 1, 1);
- TSKEY ts = GET_TS_DATA(pCtx, index);
-
- do_top_function_add(pRes, (int32_t)pCtx->param[0].i64, pData, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0);
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- pResInfo->hasResult = DATA_SET_FLAG;
-}
-
static void top_func_merge(SQLFunctionCtx *pCtx) {
STopBotInfo *pInput = (STopBotInfo *)GET_INPUT_DATA_LIST(pCtx);
@@ -2695,27 +2215,6 @@ static void bottom_function(SQLFunctionCtx *pCtx) {
}
}
-static void bottom_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- TSKEY ts = GET_TS_DATA(pCtx, index);
-
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- STopBotInfo *pRes = getTopBotOutputInfo(pCtx);
-
- if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) {
- buildTopBotStruct(pRes, pCtx);
- }
-
- SET_VAL(pCtx, 1, 1);
- do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64, pData, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0);
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- pResInfo->hasResult = DATA_SET_FLAG;
-}
-
static void bottom_func_merge(SQLFunctionCtx *pCtx) {
STopBotInfo *pInput = (STopBotInfo *)GET_INPUT_DATA_LIST(pCtx);
@@ -2835,80 +2334,36 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
continue;
}
- double v = 0;
- GET_TYPED_DATA(v, double, pCtx->inputType, data);
-
- if (v < GET_DOUBLE_VAL(&pInfo->minval)) {
- SET_DOUBLE_VAL(&pInfo->minval, v);
- }
-
- if (v > GET_DOUBLE_VAL(&pInfo->maxval)) {
- SET_DOUBLE_VAL(&pInfo->maxval, v);
- }
-
- pInfo->numOfElems += 1;
- }
- }
-
- return;
- }
-
- // the second stage, calculate the true percentile value
- for (int32_t i = 0; i < pCtx->size; ++i) {
- char *data = GET_INPUT_DATA(pCtx, i);
- if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
- continue;
- }
-
- notNullElems += 1;
- tMemBucketPut(pInfo->pMemBucket, data, 1);
- }
-
- SET_VAL(pCtx, notNullElems, 1);
- pResInfo->hasResult = DATA_SET_FLAG;
-}
-
-static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo);
-
- if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) {
- pInfo->stage += 1;
-
- // all data are null, set it completed
- if (pInfo->numOfElems == 0) {
- pResInfo->complete = true;
-
- return;
- } else {
- pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
- }
- }
+ double v = 0;
+ GET_TYPED_DATA(v, double, pCtx->inputType, data);
- if (pInfo->stage == 0) {
- double v = 0;
- GET_TYPED_DATA(v, double, pCtx->inputType, pData);
+ if (v < GET_DOUBLE_VAL(&pInfo->minval)) {
+ SET_DOUBLE_VAL(&pInfo->minval, v);
+ }
- if (v < GET_DOUBLE_VAL(&pInfo->minval)) {
- SET_DOUBLE_VAL(&pInfo->minval, v);
- }
+ if (v > GET_DOUBLE_VAL(&pInfo->maxval)) {
+ SET_DOUBLE_VAL(&pInfo->maxval, v);
+ }
- if (v > GET_DOUBLE_VAL(&pInfo->maxval)) {
- SET_DOUBLE_VAL(&pInfo->maxval, v);
+ pInfo->numOfElems += 1;
+ }
}
- pInfo->numOfElems += 1;
return;
}
+
+ // the second stage, calculate the true percentile value
+ for (int32_t i = 0; i < pCtx->size; ++i) {
+ char *data = GET_INPUT_DATA(pCtx, i);
+ if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
+ continue;
+ }
+
+ notNullElems += 1;
+ tMemBucketPut(pInfo->pMemBucket, data, 1);
+ }
- tMemBucketPut(pInfo->pMemBucket, pData, 1);
-
- SET_VAL(pCtx, 1, 1);
+ SET_VAL(pCtx, notNullElems, 1);
pResInfo->hasResult = DATA_SET_FLAG;
}
@@ -2930,24 +2385,6 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
doFinalizer(pCtx);
}
-static UNUSED_FUNC void percentile_next_step(SQLFunctionCtx *pCtx) {
- SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx);
- SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
-
- if (pInfo->stage == 0) {
- // all data are null, set it completed
- if (pInfo->numOfElems == 0) {
- pResInfo->complete = true;
- } else {
- pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
- }
-
- pInfo->stage += 1;
- } else {
- pResInfo->complete = true;
- }
-}
-
//////////////////////////////////////////////////////////////////////////////////
static void buildHistogramInfo(SAPercentileInfo* pInfo) {
pInfo->pHisto = (SHistogramInfo*) ((char*) pInfo + sizeof(SAPercentileInfo));
@@ -3012,24 +2449,6 @@ static void apercentile_function(SQLFunctionCtx *pCtx) {
}
}
-static void apercentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx);
- SAPercentileInfo *pInfo = getAPerctInfo(pCtx);
-
- double v = 0;
- GET_TYPED_DATA(v, double, pCtx->inputType, pData);
-
- tHistogramAdd(&pInfo->pHisto, v);
-
- SET_VAL(pCtx, 1, 1);
- pResInfo->hasResult = DATA_SET_FLAG;
-}
-
static void apercentile_func_merge(SQLFunctionCtx *pCtx) {
SAPercentileInfo *pInput = (SAPercentileInfo *)GET_INPUT_DATA_LIST(pCtx);
@@ -3213,60 +2632,6 @@ static void leastsquares_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, numOfElem, 1);
}
-static void leastsquares_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SLeastsquaresInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
-
- double(*param)[3] = pInfo->mat;
-
- switch (pCtx->inputType) {
- case TSDB_DATA_TYPE_INT: {
- int32_t *p = pData;
- LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey);
- break;
- }
- case TSDB_DATA_TYPE_TINYINT: {
- int8_t *p = pData;
- LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- int16_t *p = pData;
- LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- int64_t *p = pData;
- LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- float *p = pData;
- LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- double *p = pData;
- LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey);
- break;
- }
- default:
- qError("error data type in leastsquare function:%d", pCtx->inputType);
- };
-
- SET_VAL(pCtx, 1, 1);
- pInfo->num += 1;
-
- if (pInfo->num > 0) {
- pResInfo->hasResult = DATA_SET_FLAG;
- }
-}
-
static void leastsquares_finalizer(SQLFunctionCtx *pCtx) {
// no data in query
SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx);
@@ -3304,25 +2669,23 @@ static void date_col_output_function(SQLFunctionCtx *pCtx) {
*(int64_t *)(pCtx->pOutput) = pCtx->startTs;
}
-static FORCE_INLINE void date_col_output_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- date_col_output_function(pCtx);
-}
-
static void col_project_function(SQLFunctionCtx *pCtx) {
// the number of output rows should not affect the final number of rows, so set it to be 0
if (pCtx->numOfParams == 2) {
return;
}
+
+ // only one row is required.
if (pCtx->param[0].i64 == 1) {
SET_VAL(pCtx, pCtx->size, 1);
} else {
INC_INIT_VAL(pCtx, pCtx->size);
}
-
char *pData = GET_INPUT_DATA_LIST(pCtx);
if (pCtx->order == TSDB_ORDER_ASC) {
- memcpy(pCtx->pOutput, pData, (size_t) pCtx->size * pCtx->inputBytes);
+ int32_t numOfRows = (pCtx->param[0].i64 == 1)? 1:pCtx->size;
+ memcpy(pCtx->pOutput, pData, (size_t) numOfRows * pCtx->inputBytes);
} else {
for(int32_t i = 0; i < pCtx->size; ++i) {
memcpy(pCtx->pOutput + (pCtx->size - 1 - i) * pCtx->inputBytes, pData + i * pCtx->inputBytes,
@@ -3331,22 +2694,6 @@ static void col_project_function(SQLFunctionCtx *pCtx) {
}
}
-static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- if (pCtx->numOfParams == 2) { // the number of output rows should not affect the final number of rows, so set it to be 0
- return;
- }
-
- // only one output
- if (pCtx->param[0].i64 == 1 && pResInfo->numOfRes >= 1) {
- return;
- }
-
- INC_INIT_VAL(pCtx, 1);
- char *pData = GET_INPUT_DATA(pCtx, index);
- memcpy(pCtx->pOutput, pData, pCtx->inputBytes);
-}
-
/**
* only used for tag projection query in select clause
* @param pCtx
@@ -3368,13 +2715,6 @@ static void tag_project_function(SQLFunctionCtx *pCtx) {
}
}
-static void tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- INC_INIT_VAL(pCtx, 1);
-
- tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->tag.nType, true);
- pCtx->pOutput += pCtx->outputBytes;
-}
-
/**
* used in group by clause. when applying group by tags, the tags value is
* assign by using tag function.
@@ -3393,11 +2733,6 @@ static void tag_function(SQLFunctionCtx *pCtx) {
}
}
-static void tag_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- SET_VAL(pCtx, 1, 1);
- tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->outputType, true);
-}
-
static void copy_function(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, pCtx->size, 1);
@@ -3793,61 +3128,6 @@ static void diff_function(SQLFunctionCtx *pCtx) {
}
}
-static void diff_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- char *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- // the output start from the second source element
- if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is set
- GET_RES_INFO(pCtx)->numOfRes += 1;
- }
-
- int32_t step = 1/*GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/;
-
- switch (pCtx->inputType) {
- case TSDB_DATA_TYPE_INT: {
- if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet
- pCtx->param[1].nType = pCtx->inputType;
- pCtx->param[1].i64 = *(int32_t *)pData;
- } else {
- *(int32_t *)pCtx->pOutput = *(int32_t *)pData - (int32_t)pCtx->param[1].i64;
- pCtx->param[1].i64 = *(int32_t *)pData;
- *(int64_t *)pCtx->ptsOutputBuf = GET_TS_DATA(pCtx, index);
- }
- break;
- };
- case TSDB_DATA_TYPE_BIGINT: {
- DIFF_IMPL(pCtx, pData, int64_t);
- break;
- };
- case TSDB_DATA_TYPE_DOUBLE: {
- DIFF_IMPL(pCtx, pData, double);
- break;
- };
- case TSDB_DATA_TYPE_FLOAT: {
- DIFF_IMPL(pCtx, pData, float);
- break;
- };
- case TSDB_DATA_TYPE_SMALLINT: {
- DIFF_IMPL(pCtx, pData, int16_t);
- break;
- };
- case TSDB_DATA_TYPE_TINYINT: {
- DIFF_IMPL(pCtx, pData, int8_t);
- break;
- };
- default:
- qError("error input type");
- }
-
- if (GET_RES_INFO(pCtx)->numOfRes > 0) {
- pCtx->pOutput += pCtx->outputBytes * step;
- pCtx->ptsOutputBuf = (char *)pCtx->ptsOutputBuf + TSDB_KEYSIZE * step;
- }
-}
-
char *getArithColumnData(void *param, const char* name, int32_t colId) {
SArithmeticSupport *pSupport = (SArithmeticSupport *)param;
@@ -3870,16 +3150,6 @@ static void arithmetic_function(SQLFunctionCtx *pCtx) {
arithmeticTreeTraverse(sas->pExprInfo->pExpr, pCtx->size, pCtx->pOutput, sas, pCtx->order, getArithColumnData);
}
-static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- INC_INIT_VAL(pCtx, 1);
- SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz;
-
- sas->offset = index;
- arithmeticTreeTraverse(sas->pExprInfo->pExpr, 1, pCtx->pOutput, sas, pCtx->order, getArithColumnData);
-
- pCtx->pOutput += pCtx->outputBytes;
-}
-
#define LIST_MINMAX_N(ctx, minOutput, maxOutput, elemCnt, data, type, tsdbType, numOfNotNullElem) \
{ \
type *inputData = (type *)data; \
@@ -3998,49 +3268,6 @@ static void spread_function(SQLFunctionCtx *pCtx) {
}
}
-static void spread_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SET_VAL(pCtx, 1, 1);
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
-
- double val = 0.0;
- if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) {
- val = GET_INT8_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) {
- val = GET_INT16_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) {
- val = GET_INT32_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT || pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) {
- val = (double)(GET_INT64_VAL(pData));
- } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
- val = GET_DOUBLE_VAL(pData);
- } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
- val = GET_FLOAT_VAL(pData);
- }
-
- // keep the result data in output buffer, not in the intermediate buffer
- if (val > pInfo->max) {
- pInfo->max = val;
- }
-
- if (val < pInfo->min) {
- pInfo->min = val;
- }
-
- pResInfo->hasResult = DATA_SET_FLAG;
- pInfo->hasResult = DATA_SET_FLAG;
-
- if (pCtx->stableQuery) {
- memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SSpreadInfo));
- }
-}
-
/*
* here we set the result value back to the intermediate buffer, to apply the finalize the function
* the final result is generated in spread_function_finalizer
@@ -4393,26 +3620,6 @@ static void twa_function(SQLFunctionCtx *pCtx) {
}
}
-static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- int32_t notNullElems = twa_function_impl(pCtx, index, 1);
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
-
- SET_VAL(pCtx, notNullElems, 1);
-
- if (notNullElems > 0) {
- pResInfo->hasResult = DATA_SET_FLAG;
- }
-
- if (pCtx->stableQuery) {
- memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(STwaInfo));
- }
-}
-
/*
* To copy the input to interResBuf to avoid the input buffer space be over writen
* by next input data. The TWA function only applies to each table, so no merge procedure
@@ -4590,23 +3797,6 @@ static void ts_comp_function(SQLFunctionCtx *pCtx) {
pResInfo->hasResult = DATA_SET_FLAG;
}
-static void ts_comp_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- STSCompInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
-
- STSBuf *pTSbuf = pInfo->pTSBuf;
-
- tsBufAppend(pTSbuf, (int32_t)pCtx->param[0].i64, &pCtx->tag, pData, TSDB_KEYSIZE);
- SET_VAL(pCtx, pCtx->size, 1);
-
- pResInfo->hasResult = DATA_SET_FLAG;
-}
-
static void ts_comp_finalize(SQLFunctionCtx *pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
@@ -4736,46 +3926,6 @@ static void rate_function(SQLFunctionCtx *pCtx) {
}
}
-static void rate_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- // NOTE: keep the intermediate result into the interResultBuf
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo);
- TSKEY *primaryKey = GET_TS_LIST(pCtx);
-
- double v = 0;
- GET_TYPED_DATA(v, double, pCtx->inputType, pData);
-
- if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) {
- pRateInfo->firstValue = v;
- pRateInfo->firstKey = primaryKey[index];
- }
-
- if (INT64_MIN == pRateInfo->lastValue) {
- pRateInfo->lastValue = v;
- } else if (v < pRateInfo->lastValue) {
- pRateInfo->correctionValue += pRateInfo->lastValue;
- }
-
- pRateInfo->lastValue = v;
- pRateInfo->lastKey = primaryKey[index];
-
- SET_VAL(pCtx, 1, 1);
-
- // set has result flag
- pRateInfo->hasResult = DATA_SET_FLAG;
- pResInfo->hasResult = DATA_SET_FLAG;
-
- // keep the data into the final output buffer for super table query since this execution may be the last one
- if (pCtx->stableQuery) {
- memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo));
- }
-}
-
static void rate_func_copy(SQLFunctionCtx *pCtx) {
assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY);
@@ -4793,7 +3943,7 @@ static void rate_finalizer(SQLFunctionCtx *pCtx) {
return;
}
- *(double*) pCtx->pOutput = do_calc_rate(pRateInfo, TSDB_TICK_PER_SECOND(pCtx->param[0].i64));
+ *(double*) pCtx->pOutput = do_calc_rate(pRateInfo, (double) TSDB_TICK_PER_SECOND(pCtx->param[0].i64));
// cannot set the numOfIteratedElems again since it is set during previous iteration
pResInfo->numOfRes = 1;
@@ -4846,39 +3996,6 @@ static void irate_function(SQLFunctionCtx *pCtx) {
}
}
-static void irate_function_f(SQLFunctionCtx *pCtx, int32_t index) {
- void *pData = GET_INPUT_DATA(pCtx, index);
- if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
- return;
- }
-
- // NOTE: keep the intermediate result into the interResultBuf
- SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
- SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo);
- TSKEY *primaryKey = GET_TS_LIST(pCtx);
-
- double v = 0;
- GET_TYPED_DATA(v, double, pCtx->inputType, pData);
-
- pRateInfo->firstKey = pRateInfo->lastKey;
- pRateInfo->firstValue = pRateInfo->lastValue;
-
- pRateInfo->lastValue = v;
- pRateInfo->lastKey = primaryKey[index];
-
-// qDebug("====%p irate_function_f() index:%d lastValue:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " firstKey:%" PRId64, pCtx, index, pRateInfo->lastValue, pRateInfo->lastKey, pRateInfo->firstValue , pRateInfo->firstKey);
- SET_VAL(pCtx, 1, 1);
-
- // set has result flag
- pRateInfo->hasResult = DATA_SET_FLAG;
- pResInfo->hasResult = DATA_SET_FLAG;
-
- // keep the data into the final output buffer for super table query since this execution may be the last one
- if (pCtx->stableQuery) {
- memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo));
- }
-}
-
void blockInfo_func(SQLFunctionCtx* pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
STableBlockDist* pDist = (STableBlockDist*) GET_ROWCELL_INTERBUF(pResInfo);
@@ -5047,8 +4164,7 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) {
* function compatible list.
* tag and ts are not involved in the compatibility check
*
- * 1. functions that are not simultaneously present with any other functions. e.g.,
- * diff/ts_z/top/bottom
+ * 1. functions that are not simultaneously present with any other functions. e.g., diff/ts_z/top/bottom
* 2. functions that are only allowed to be present only with same functions. e.g., last_row, interp
* 3. functions that are allowed to be present with other functions.
* e.g., count/sum/avg/min/max/stddev/percentile/apercentile/first/last...
@@ -5062,7 +4178,7 @@ int32_t functionCompatList[] = {
// tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, stddev_dst, interp rate irate
1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1,
// tid_tag, blk_info
- 6, 7
+ 6, 7
};
SAggFunctionInfo aAggs[] = {{
@@ -5073,7 +4189,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO,
function_setup,
count_function,
- count_function_f,
doFinalizer,
count_func_merge,
countRequired,
@@ -5086,7 +4201,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO,
function_setup,
sum_function,
- sum_function_f,
function_finalizer,
sum_func_merge,
statisRequired,
@@ -5099,7 +4213,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO,
function_setup,
avg_function,
- avg_function_f,
avg_finalizer,
avg_func_merge,
statisRequired,
@@ -5112,7 +4225,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY,
min_func_setup,
min_function,
- min_function_f,
function_finalizer,
min_func_merge,
statisRequired,
@@ -5125,7 +4237,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY,
max_func_setup,
max_function,
- max_function_f,
function_finalizer,
max_func_merge,
statisRequired,
@@ -5138,7 +4249,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF,
function_setup,
stddev_function,
- stddev_function_f,
stddev_finalizer,
noop1,
dataBlockRequired,
@@ -5151,7 +4261,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF,
percentile_function_setup,
percentile_function,
- percentile_function_f,
percentile_finalizer,
noop1,
dataBlockRequired,
@@ -5164,7 +4273,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_STABLE,
apercentile_function_setup,
apercentile_function,
- apercentile_function_f,
apercentile_finalizer,
apercentile_func_merge,
dataBlockRequired,
@@ -5177,7 +4285,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY,
function_setup,
first_function,
- first_function_f,
function_finalizer,
noop1,
firstFuncRequired,
@@ -5190,7 +4297,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY,
function_setup,
last_function,
- last_function_f,
function_finalizer,
noop1,
lastFuncRequired,
@@ -5204,7 +4310,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SELECTIVITY,
first_last_function_setup,
last_row_function,
- noop2,
last_row_finalizer,
last_dist_func_merge,
dataBlockRequired,
@@ -5218,7 +4323,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SELECTIVITY,
top_bottom_function_setup,
top_function,
- top_function_f,
top_bottom_func_finalizer,
top_func_merge,
dataBlockRequired,
@@ -5232,7 +4336,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SELECTIVITY,
top_bottom_function_setup,
bottom_function,
- bottom_function_f,
top_bottom_func_finalizer,
bottom_func_merge,
dataBlockRequired,
@@ -5245,7 +4348,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO,
spread_function_setup,
spread_function,
- spread_function_f,
spread_function_finalizer,
spread_func_merge,
countRequired,
@@ -5258,7 +4360,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
twa_function_setup,
twa_function,
- twa_function_f,
twa_function_finalizer,
twa_function_copy,
dataBlockRequired,
@@ -5271,7 +4372,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF,
leastsquares_function_setup,
leastsquares_function,
- leastsquares_function_f,
leastsquares_finalizer,
noop1,
dataBlockRequired,
@@ -5284,7 +4384,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
function_setup,
date_col_output_function,
- date_col_output_function_f,
doFinalizer,
copy_function,
noDataRequired,
@@ -5297,7 +4396,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
function_setup,
noop1,
- noop2,
doFinalizer,
copy_function,
dataBlockRequired,
@@ -5310,7 +4408,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO,
function_setup,
tag_function,
- noop2,
doFinalizer,
copy_function,
noDataRequired,
@@ -5323,7 +4420,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_NEED_TS,
ts_comp_function_setup,
ts_comp_function,
- ts_comp_function_f,
ts_comp_finalize,
copy_function,
dataBlockRequired,
@@ -5336,7 +4432,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO,
function_setup,
tag_function,
- tag_function_f,
doFinalizer,
copy_function,
noDataRequired,
@@ -5349,7 +4444,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_MO | TSDB_FUNCSTATE_NEED_TS,
function_setup,
col_project_function,
- col_project_function_f,
doFinalizer,
copy_function,
dataBlockRequired,
@@ -5362,7 +4456,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_MO,
function_setup,
tag_project_function,
- tag_project_function_f,
doFinalizer,
copy_function,
noDataRequired,
@@ -5375,7 +4468,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS,
function_setup,
arithmetic_function,
- arithmetic_function_f,
doFinalizer,
copy_function,
dataBlockRequired,
@@ -5388,7 +4480,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
diff_function_setup,
diff_function,
- diff_function_f,
doFinalizer,
noop1,
dataBlockRequired,
@@ -5402,7 +4493,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
first_last_function_setup,
first_dist_function,
- first_dist_function_f,
function_finalizer,
first_dist_func_merge,
firstDistFuncRequired,
@@ -5415,7 +4505,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
first_last_function_setup,
last_dist_function,
- last_dist_function_f,
function_finalizer,
last_dist_func_merge,
lastDistFuncRequired,
@@ -5428,7 +4517,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE,
function_setup,
stddev_dst_function,
- stddev_dst_function_f,
stddev_dst_finalizer,
stddev_dst_merge,
dataBlockRequired,
@@ -5441,7 +4529,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS ,
function_setup,
interp_function,
- do_sum_f, // todo filter handle
doFinalizer,
copy_function,
dataBlockRequired,
@@ -5454,7 +4541,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
rate_function_setup,
rate_function,
- rate_function_f,
rate_finalizer,
rate_func_copy,
dataBlockRequired,
@@ -5467,7 +4553,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
rate_function_setup,
irate_function,
- irate_function_f,
rate_finalizer,
rate_func_copy,
dataBlockRequired,
@@ -5480,7 +4565,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE,
function_setup,
noop1,
- noop2,
noop1,
noop1,
dataBlockRequired,
@@ -5492,7 +4576,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
deriv_function_setup,
deriv_function,
- noop2,
doFinalizer,
noop1,
dataBlockRequired,
@@ -5505,7 +4588,6 @@ SAggFunctionInfo aAggs[] = {{
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE,
function_setup,
blockInfo_func,
- noop2,
blockinfo_func_finalizer,
block_func_merge,
dataBlockRequired,
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index aa5ce4fa57ed86751985008d34511ebdcadb0f84..e50c0daa4cf56dbb255a692033df609c732998ea 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -163,7 +163,7 @@ static void setResultOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* pResul
int32_t numOfCols, int32_t* rowCellInfoOffset);
void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset);
-static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId);
+static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx);
static void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColIndex* pColIndex);
@@ -186,7 +186,7 @@ static int32_t getNumOfScanTimes(SQueryAttr* pQueryAttr);
static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
static void destroySFillOperatorInfo(void* param, int32_t numOfOutput);
static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput);
-static void destroyArithOperatorInfo(void* param, int32_t numOfOutput);
+static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput);
static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput);
static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput);
static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput);
@@ -370,7 +370,7 @@ static bool isProjQuery(SQueryAttr *pQueryAttr) {
return true;
}
-static bool hasNullRv(SColIndex* pColIndex, SDataStatis *pStatis) {
+static bool hasNull(SColIndex* pColIndex, SDataStatis *pStatis) {
if (TSDB_COL_IS_TAG(pColIndex->flag) || TSDB_COL_IS_UD_COL(pColIndex->flag) || pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return false;
}
@@ -769,12 +769,13 @@ static int32_t getNumOfRowsInTimeWindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBloc
static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, STimeWindow* pWin, int32_t offset,
int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput) {
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
- bool hasPrev = pCtx[0].preAggVals.isSet;
+ bool hasAggregates = pCtx[0].preAggVals.isSet;
for (int32_t k = 0; k < numOfOutput; ++k) {
- pCtx[k].size = forwardStep;
+ pCtx[k].size = forwardStep;
pCtx[k].startTs = pWin->skey;
+ // keep it temprarily
char* start = pCtx[k].pInput;
int32_t pos = (QUERY_IS_ASC_QUERY(pQueryAttr)) ? offset : offset - (forwardStep - 1);
@@ -786,20 +787,18 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx
pCtx[k].ptsList = &tsCol[pos];
}
- int32_t functionId = pCtx[k].functionId;
-
// not a whole block involved in query processing, statistics data can not be used
// NOTE: the original value of isSet have been changed here
if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) {
pCtx[k].preAggVals.isSet = false;
}
- if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
- aAggs[functionId].xFunction(&pCtx[k]);
+ if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) {
+ aAggs[pCtx[k].functionId].xFunction(&pCtx[k]);
}
// restore it
- pCtx[k].preAggVals.isSet = hasPrev;
+ pCtx[k].preAggVals.isSet = hasAggregates;
pCtx[k].pInput = start;
}
}
@@ -908,9 +907,6 @@ static void setNotInterpoWindowKey(SQLFunctionCtx* pCtx, int32_t numOfOutput, in
}
}
-// window start key interpolation
-
-
static void saveDataBlockLastRow(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pDataBlockInfo, SArray* pDataBlock,
int32_t rowIndex) {
if (pDataBlock == NULL) {
@@ -977,7 +973,7 @@ void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlo
doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order);
}
} else {
- if (/*pCtx[0].pInput == NULL && */pBlock->pDataBlock != NULL) {
+ if (pBlock->pDataBlock != NULL) {
doSetInputDataBlock(pOperator, pCtx, pBlock, order);
} else {
doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order);
@@ -1036,15 +1032,14 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
for (int32_t k = 0; k < pOperator->numOfOutput; ++k) {
- int32_t functionId = pCtx[k].functionId;
- if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
+ if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) {
pCtx[k].startTs = startTs;// this can be set during create the struct
- aAggs[functionId].xFunction(&pCtx[k]);
+ aAggs[pCtx[k].functionId].xFunction(&pCtx[k]);
}
}
}
-static void arithmeticApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t numOfOutput) {
+static void projectApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t numOfOutput) {
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
for (int32_t k = 0; k < numOfOutput; ++k) {
@@ -1348,6 +1343,12 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
return;
}
+ SColumnInfoData* pFirstColData = taosArrayGet(pSDataBlock->pDataBlock, 0);
+ int64_t* tsList = (pFirstColData->info.type == TSDB_DATA_TYPE_TIMESTAMP)? (int64_t*) pFirstColData->pData:NULL;
+
+ STimeWindow w = TSWINDOW_INITIALIZER;
+
+ int32_t num = 0;
for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) {
char* val = ((char*)pColInfoData->pData) + bytes * j;
if (isNull(val, type)) {
@@ -1355,33 +1356,56 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
}
// Compare with the previous row of this column, and do not set the output buffer again if they are identical.
- if (pInfo->prevData == NULL || (memcmp(pInfo->prevData, val, bytes) != 0)) {
- if (pInfo->prevData == NULL) {
- pInfo->prevData = malloc(bytes);
- }
-
+ if (pInfo->prevData == NULL) {
+ pInfo->prevData = malloc(bytes);
memcpy(pInfo->prevData, val, bytes);
+ num++;
+ continue;
+ }
- if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
- setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, bytes);
+ if (IS_VAR_DATA_TYPE(type)) {
+ int32_t len = varDataLen(val);
+ if(len == varDataLen(pInfo->prevData) && memcmp(varDataVal(pInfo->prevData), varDataVal(val), len) == 0) {
+ num++;
+ continue;
}
-
- int32_t ret =
- setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes, item->groupIndex);
- if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
- longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
+ } else {
+ if (memcmp(pInfo->prevData, val, bytes) == 0) {
+ num++;
+ continue;
}
}
+ if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
+ setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo->prevData, bytes);
+ }
- // todo opt perf
- for (int32_t k = 0; k < pOperator->numOfOutput; ++k) {
- pInfo->binfo.pCtx[k].size = 1;
- int32_t functionId = pInfo->binfo.pCtx[k].functionId;
- if (functionNeedToExecute(pRuntimeEnv, &pInfo->binfo.pCtx[k], functionId)) {
- aAggs[functionId].xFunctionF(&pInfo->binfo.pCtx[k], j);
- }
+ int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, bytes, item->groupIndex);
+ if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
+ longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
}
+
+ doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, j - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput);
+
+ num = 1;
+ memcpy(pInfo->prevData, val, bytes);
+ }
+
+ if (num > 0) {
+ char* val = ((char*)pColInfoData->pData) + bytes * (pSDataBlock->info.rows - num);
+ memcpy(pInfo->prevData, val, bytes);
+
+ if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
+ setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, bytes);
+ }
+
+ int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes, item->groupIndex);
+ if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
+ longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, pSDataBlock->info.rows - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput);
+ tfree(pInfo->prevData);
}
}
@@ -1455,9 +1479,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
}
static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) {
- int64_t v = -1;
- GET_TYPED_DATA(v, int64_t, type, pData);
- if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
+ if (IS_VAR_DATA_TYPE(type)) {
if (pResultRow->key == NULL) {
pResultRow->key = malloc(varDataTLen(pData));
varDataCopy(pResultRow->key, pData);
@@ -1465,6 +1487,9 @@ static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) {
assert(memcmp(pResultRow->key, pData, varDataTLen(pData)) == 0);
}
} else {
+ int64_t v = -1;
+ GET_TYPED_DATA(v, int64_t, type, pData);
+
pResultRow->win.skey = v;
pResultRow->win.ekey = v;
}
@@ -1480,7 +1505,7 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasic
// not assign result buffer yet, add new result buffer, TODO remove it
char* d = pData;
int16_t len = bytes;
- if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
+ if (IS_VAR_DATA_TYPE(type)) {
d = varDataVal(pData);
len = varDataLen(pData);
}
@@ -1522,11 +1547,12 @@ static int32_t getGroupbyColumnIndex(SGroupbyExpr *pGroupbyExpr, SSDataBlock* pD
return -1;
}
-static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) {
+static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
// in case of timestamp column, always generated results.
+ int32_t functionId = pCtx->functionId;
if (functionId == TSDB_FUNC_TS) {
return true;
}
@@ -1566,7 +1592,7 @@ void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColInde
pCtx->preAggVals.isSet = false;
}
- pCtx->hasNull = hasNullRv(pColIndex, pStatis);
+ pCtx->hasNull = hasNull(pColIndex, pStatis);
// set the statistics data for primary time stamp column
if (pCtx->functionId == TSDB_FUNC_SPREAD && pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
@@ -1835,17 +1861,17 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
break;
}
- case OP_Arithmetic: { // TODO refactor to remove arith operator.
+ case OP_Project: { // TODO refactor to remove arith operator.
SOperatorInfo* prev = pRuntimeEnv->proot;
if (i == 0) {
- pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
+ pRuntimeEnv->proot = createProjectOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
if (pRuntimeEnv->proot != NULL && prev->operatorType != OP_DummyInput && prev->operatorType != OP_Join) { // TODO refactor
setTableScanFilterOperatorInfo(prev->info, pRuntimeEnv->proot);
}
} else {
prev = pRuntimeEnv->proot;
assert(pQueryAttr->pExpr2 != NULL);
- pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr2, pQueryAttr->numOfExpr2);
+ pRuntimeEnv->proot = createProjectOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr2, pQueryAttr->numOfExpr2);
}
break;
}
@@ -3539,6 +3565,7 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag,
return 0;
}
+// TODO refactor: this funciton should be merged with setparamForStableStddevColumnData function.
void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExprInfo) {
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
@@ -3819,6 +3846,103 @@ int32_t doFillTimeIntervalGapsInResults(SFillInfo* pFillInfo, SSDataBlock *pOutp
return pOutput->info.rows;
}
+void publishOperatorProfEvent(SOperatorInfo* operatorInfo, EQueryProfEventType eventType) {
+ SQueryProfEvent event;
+ event.eventType = eventType;
+ event.eventTime = taosGetTimestampUs();
+ event.operatorType = operatorInfo->operatorType;
+
+ SQInfo* qInfo = operatorInfo->pRuntimeEnv->qinfo;
+ if (qInfo->summary.queryProfEvents) {
+ taosArrayPush(qInfo->summary.queryProfEvents, &event);
+ }
+}
+
+void publishQueryAbortEvent(SQInfo* pQInfo, int32_t code) {
+ SQueryProfEvent event;
+ event.eventType = QUERY_PROF_QUERY_ABORT;
+ event.eventTime = taosGetTimestampUs();
+ event.abortCode = code;
+
+ if (pQInfo->summary.queryProfEvents) {
+ taosArrayPush(pQInfo->summary.queryProfEvents, &event);
+ }
+}
+
+typedef struct {
+ uint8_t operatorType;
+ int64_t beginTime;
+ int64_t endTime;
+ int64_t selfTime;
+ int64_t descendantsTime;
+} SOperatorStackItem;
+
+static void doOperatorExecProfOnce(SOperatorStackItem* item, SQueryProfEvent* event, SArray* opStack, SHashObj* profResults) {
+ item->endTime = event->eventTime;
+ item->selfTime = (item->endTime - item->beginTime) - (item->descendantsTime);
+
+ for (int32_t j = 0; j < taosArrayGetSize(opStack); ++j) {
+ SOperatorStackItem* ancestor = taosArrayGet(opStack, j);
+ ancestor->descendantsTime += item->selfTime;
+ }
+
+ uint8_t operatorType = item->operatorType;
+ SOperatorProfResult* result = taosHashGet(profResults, &operatorType, sizeof(operatorType));
+ if (result != NULL) {
+ result->sumRunTimes++;
+ result->sumSelfTime += item->selfTime;
+ } else {
+ SOperatorProfResult opResult;
+ opResult.operatorType = operatorType;
+ opResult.sumSelfTime = item->selfTime;
+ opResult.sumRunTimes = 1;
+ taosHashPut(profResults, &(operatorType), sizeof(operatorType),
+ &opResult, sizeof(opResult));
+ }
+}
+
+void calculateOperatorProfResults(SQInfo* pQInfo) {
+ if (pQInfo->summary.queryProfEvents == NULL) {
+ qDebug("query prof events array is null");
+ return;
+ }
+
+ if (pQInfo->summary.operatorProfResults == NULL) {
+ qDebug("operator prof results hash is null");
+ return;
+ }
+
+ SArray* opStack = taosArrayInit(32, sizeof(SOperatorStackItem));
+ if (opStack == NULL) {
+ return;
+ }
+
+ size_t size = taosArrayGetSize(pQInfo->summary.queryProfEvents);
+ SHashObj* profResults = pQInfo->summary.operatorProfResults;
+
+ for (int i = 0; i < size; ++i) {
+ SQueryProfEvent* event = taosArrayGet(pQInfo->summary.queryProfEvents, i);
+ if (event->eventType == QUERY_PROF_BEFORE_OPERATOR_EXEC) {
+ SOperatorStackItem opItem;
+ opItem.operatorType = event->operatorType;
+ opItem.beginTime = event->eventTime;
+ opItem.descendantsTime = 0;
+ taosArrayPush(opStack, &opItem);
+ } else if (event->eventType == QUERY_PROF_AFTER_OPERATOR_EXEC) {
+ SOperatorStackItem* item = taosArrayPop(opStack);
+ assert(item->operatorType == event->operatorType);
+ doOperatorExecProfOnce(item, event, opStack, profResults);
+ } else if (event->eventType == QUERY_PROF_QUERY_ABORT) {
+ SOperatorStackItem* item;
+ while ((item = taosArrayPop(opStack)) != NULL) {
+ doOperatorExecProfOnce(item, event, opStack, profResults);
+ }
+ }
+ }
+
+ taosArrayDestroy(opStack);
+}
+
void queryCostStatis(SQInfo *pQInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQueryCostInfo *pSummary = &pQInfo->summary;
@@ -3839,6 +3963,8 @@ void queryCostStatis(SQInfo *pQInfo) {
pSummary->numOfTimeWindows = 0;
}
+ calculateOperatorProfResults(pQInfo);
+
qDebug("QInfo:0x%"PRIx64" :cost summary: elapsed time:%"PRId64" us, first merge:%"PRId64" us, total blocks:%d, "
"load block statis:%d, load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64,
pQInfo->qId, pSummary->elapsedTime, pSummary->firstStageMergeTime, pSummary->totalBlocks, pSummary->loadBlockStatis,
@@ -3846,6 +3972,15 @@ void queryCostStatis(SQInfo *pQInfo) {
qDebug("QInfo:0x%"PRIx64" :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb, hashTable:%.2f Kb", pQInfo->qId, pSummary->winInfoSize/1024.0,
pSummary->numOfTimeWindows, pSummary->tableInfoSize/1024.0, pSummary->hashSize/1024.0);
+
+ if (pSummary->operatorProfResults) {
+ SOperatorProfResult* opRes = taosHashIterate(pSummary->operatorProfResults, NULL);
+ while (opRes != NULL) {
+ qDebug("QInfo:0x%" PRIx64 " :cost summary: operator : %d, exec times: %" PRId64 ", self time: %" PRId64,
+ pQInfo->qId, opRes->operatorType, opRes->sumRunTimes, opRes->sumSelfTime);
+ opRes = taosHashIterate(pSummary->operatorProfResults, opRes);
+ }
+ }
}
//static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo) {
@@ -4247,6 +4382,15 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr
// create runtime environment
int32_t numOfTables = (int32_t)pQueryAttr->tableGroupInfo.numOfTables;
pQInfo->summary.tableInfoSize += (numOfTables * sizeof(STableQueryInfo));
+ pQInfo->summary.queryProfEvents = taosArrayInit(512, sizeof(SQueryProfEvent));
+ if (pQInfo->summary.queryProfEvents == NULL) {
+ qDebug("failed to allocate query prof events array");
+ }
+ pQInfo->summary.operatorProfResults =
+ taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_TINYINT), true, HASH_NO_LOCK);
+ if (pQInfo->summary.operatorProfResults == NULL) {
+ qDebug("failed to allocate operator prof results hash");
+ }
code = setupQueryRuntimeEnv(pRuntimeEnv, (int32_t) pQueryAttr->tableGroupInfo.numOfTables, pOperator, param);
if (code != TSDB_CODE_SUCCESS) {
@@ -4606,8 +4750,8 @@ void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInf
pTableScanInfo->pResultRowInfo = &pInfo->resultRowInfo;
pTableScanInfo->rowCellInfoOffset = pInfo->rowCellInfoOffset;
- } else if (pDownstream->operatorType == OP_Arithmetic) {
- SArithOperatorInfo *pInfo = pDownstream->info;
+ } else if (pDownstream->operatorType == OP_Project) {
+ SProjectOperatorInfo *pInfo = pDownstream->info;
pTableScanInfo->pCtx = pInfo->binfo.pCtx;
pTableScanInfo->pResultRowInfo = &pInfo->binfo.resultRowInfo;
@@ -4744,8 +4888,7 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv,
SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo));
pInfo->resultRowFactor =
- (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery,
- false));
+ (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false));
pRuntimeEnv->scanFlag = MERGE_STAGE; // TODO init when creating pCtx
@@ -4872,7 +5015,10 @@ static SSDataBlock* doAggregate(void* param, bool* newgroup) {
SOperatorInfo* upstream = pOperator->upstream[0];
while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
break;
}
@@ -4927,7 +5073,10 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) {
SOperatorInfo* upstream = pOperator->upstream[0];
while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
break;
}
@@ -4963,23 +5112,23 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) {
return pInfo->pRes;
}
-static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
+static SSDataBlock* doProjectOperation(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
- SArithOperatorInfo* pArithInfo = pOperator->info;
+ SProjectOperatorInfo* pProjectInfo = pOperator->info;
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
- SOptrBasicInfo *pInfo = &pArithInfo->binfo;
+ SOptrBasicInfo *pInfo = &pProjectInfo->binfo;
SSDataBlock* pRes = pInfo->pRes;
int32_t order = pRuntimeEnv->pQueryAttr->order.order;
pRes->info.rows = 0;
- if (pArithInfo->existDataBlock) { // TODO refactor
+ if (pProjectInfo->existDataBlock) { // TODO refactor
STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current;
- SSDataBlock* pBlock = pArithInfo->existDataBlock;
- pArithInfo->existDataBlock = NULL;
+ SSDataBlock* pBlock = pProjectInfo->existDataBlock;
+ pProjectInfo->existDataBlock = NULL;
*newgroup = true;
// todo dynamic set tags
@@ -4989,9 +5138,9 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order);
- updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows);
+ updateOutputBuf(&pProjectInfo->binfo, &pProjectInfo->bufCapacity, pBlock->info.rows);
- arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
+ projectApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
if (pTableQueryInfo != NULL) {
updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order);
}
@@ -5007,7 +5156,10 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
bool prevVal = *newgroup;
// The upstream exec may change the value of the newgroup, so use a local variable instead.
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
assert(*newgroup == false);
@@ -5019,7 +5171,7 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
// Return result of the previous group in the firstly.
if (*newgroup) {
if (pRes->info.rows > 0) {
- pArithInfo->existDataBlock = pBlock;
+ pProjectInfo->existDataBlock = pBlock;
clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput);
return pInfo->pRes;
} else { // init output buffer for a new group data
@@ -5039,9 +5191,9 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order);
- updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows);
+ updateOutputBuf(&pProjectInfo->binfo, &pProjectInfo->bufCapacity, pBlock->info.rows);
- arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
+ projectApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
if (pTableQueryInfo != NULL) {
updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order);
}
@@ -5067,7 +5219,10 @@ static SSDataBlock* doLimit(void* param, bool* newgroup) {
SSDataBlock* pBlock = NULL;
while (1) {
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
@@ -5117,7 +5272,10 @@ static SSDataBlock* doFilter(void* param, bool* newgroup) {
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
while (1) {
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock *pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
break;
}
@@ -5162,7 +5320,10 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
SOperatorInfo* upstream = pOperator->upstream[0];
while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
break;
}
@@ -5215,7 +5376,10 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) {
SOperatorInfo* upstream = pOperator->upstream[0];
while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
break;
}
@@ -5317,6 +5481,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
doApplyFunctions(pRuntimeEnv, pBInfo->pCtx, &pInfo->curWindow, pInfo->start, pInfo->numOfRows, tsList,
pSDataBlock->info.rows, pOperator->numOfOutput);
}
+
static SSDataBlock* doStateWindowAgg(void *param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
@@ -5342,7 +5507,10 @@ static SSDataBlock* doStateWindowAgg(void *param, bool* newgroup) {
STimeWindow win = pQueryAttr->window;
SOperatorInfo* upstream = pOperator->upstream[0];
while (1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
break;
}
@@ -5400,7 +5568,9 @@ static SSDataBlock* doSessionWindowAgg(void* param, bool* newgroup) {
SOperatorInfo* upstream = pOperator->upstream[0];
while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
break;
}
@@ -5451,7 +5621,9 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) {
SOperatorInfo* upstream = pOperator->upstream[0];
while(1) {
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
break;
}
@@ -5520,7 +5692,10 @@ static SSDataBlock* doFill(void* param, bool* newgroup) {
}
while(1) {
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (*newgroup) {
assert(pBlock != NULL);
}
@@ -5680,8 +5855,8 @@ static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput) {
tfree(pInfo->prevData);
}
-static void destroyArithOperatorInfo(void* param, int32_t numOfOutput) {
- SArithOperatorInfo* pInfo = (SArithOperatorInfo*) param;
+static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) {
+ SProjectOperatorInfo* pInfo = (SProjectOperatorInfo*) param;
doDestroyBasicInfo(&pInfo->binfo, numOfOutput);
}
@@ -5727,8 +5902,8 @@ SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SO
return pOperator;
}
-SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
- SArithOperatorInfo* pInfo = calloc(1, sizeof(SArithOperatorInfo));
+SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
+ SProjectOperatorInfo* pInfo = calloc(1, sizeof(SProjectOperatorInfo));
pInfo->seed = rand();
pInfo->bufCapacity = pRuntimeEnv->resultInfo.capacity;
@@ -5741,8 +5916,8 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI
setDefaultOutputBuf(pRuntimeEnv, pBInfo, pInfo->seed, MASTER_SCAN);
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
- pOperator->name = "ArithmeticOperator";
- pOperator->operatorType = OP_Arithmetic;
+ pOperator->name = "ProjectOperator";
+ pOperator->operatorType = OP_Project;
pOperator->blockingOptr = false;
pOperator->status = OP_IN_EXECUTING;
pOperator->info = pInfo;
@@ -5750,8 +5925,8 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI
pOperator->numOfOutput = numOfOutput;
pOperator->pRuntimeEnv = pRuntimeEnv;
- pOperator->exec = doArithmeticOperation;
- pOperator->cleanup = destroyArithOperatorInfo;
+ pOperator->exec = doProjectOperation;
+ pOperator->cleanup = destroyProjectOperatorInfo;
appendUpstream(pOperator, upstream);
return pOperator;
@@ -6190,7 +6365,10 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
pRes->info.rows = 0;
SSDataBlock* pBlock = NULL;
while(1) {
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
@@ -6332,7 +6510,7 @@ static bool validateQueryMsg(SQueryTableMsg *pQueryMsg) {
return true;
}
-static UNUSED_FUNC bool validateQueryTableCols(SQueriedTableInfo* pTableInfo, SSqlExpr** pExpr, int32_t numOfOutput,
+static bool validateQueryTableCols(SQueriedTableInfo* pTableInfo, SSqlExpr** pExpr, int32_t numOfOutput,
SColumnInfo* pTagCols, void* pMsg) {
int32_t numOfTotal = pTableInfo->numOfCols + pTableInfo->numOfTags;
if (pTableInfo->numOfCols < 0 || pTableInfo->numOfTags < 0 || numOfTotal > TSDB_MAX_COLUMNS) {
@@ -6517,6 +6695,7 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
pExprMsg->resType = htons(pExprMsg->resType);
pExprMsg->resBytes = htons(pExprMsg->resBytes);
+ pExprMsg->interBytes = htonl(pExprMsg->interBytes);
pExprMsg->functionId = htons(pExprMsg->functionId);
pExprMsg->numOfParams = htons(pExprMsg->numOfParams);
@@ -6724,41 +6903,41 @@ _cleanup:
return code;
}
- int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) {
- if (filterNum <= 0) {
- return TSDB_CODE_SUCCESS;
- }
-
- *dst = calloc(filterNum, sizeof(*src));
- if (*dst == NULL) {
- return TSDB_CODE_QRY_OUT_OF_MEMORY;
- }
+int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) {
+ if (filterNum <= 0) {
+ return TSDB_CODE_SUCCESS;
+ }
- memcpy(*dst, src, sizeof(*src) * filterNum);
+ *dst = calloc(filterNum, sizeof(*src));
+ if (*dst == NULL) {
+ return TSDB_CODE_QRY_OUT_OF_MEMORY;
+ }
- for (int32_t i = 0; i < filterNum; i++) {
- if ((*dst)[i].filterstr && dst[i]->len > 0) {
- void *pz = calloc(1, (size_t)(*dst)[i].len + 1);
+ memcpy(*dst, src, sizeof(*src) * filterNum);
- if (pz == NULL) {
- if (i == 0) {
- free(*dst);
- } else {
- freeColumnFilterInfo(*dst, i);
- }
+ for (int32_t i = 0; i < filterNum; i++) {
+ if ((*dst)[i].filterstr && dst[i]->len > 0) {
+ void *pz = calloc(1, (size_t)(*dst)[i].len + 1);
- return TSDB_CODE_QRY_OUT_OF_MEMORY;
+ if (pz == NULL) {
+ if (i == 0) {
+ free(*dst);
+ } else {
+ freeColumnFilterInfo(*dst, i);
}
- memcpy(pz, (void *)src->pz, (size_t)src->len + 1);
-
- (*dst)[i].pz = (int64_t)pz;
+ return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
- }
- return TSDB_CODE_SUCCESS;
+ memcpy(pz, (void *)src->pz, (size_t)src->len + 1);
+
+ (*dst)[i].pz = (int64_t)pz;
+ }
}
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t buildArithmeticExprFromMsg(SExprInfo *pExprInfo, void *pQueryMsg) {
qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg);
@@ -6817,8 +6996,8 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp
for (int32_t i = 0; i < numOfOutput; ++i) {
pExprs[i].base = *pExprMsg[i];
- memset(pExprs[i].base.param, 0, sizeof(tVariant) * tListLen(pExprs[i].base.param));
+ memset(pExprs[i].base.param, 0, sizeof(tVariant) * tListLen(pExprs[i].base.param));
for (int32_t j = 0; j < pExprMsg[i]->numOfParams; ++j) {
tVariantAssign(&pExprs[i].base.param[j], &pExprMsg[i]->param[j]);
}
@@ -6893,6 +7072,7 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp
return TSDB_CODE_QRY_INVALID_MSG;
}
+ // todo remove it
if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType, &pExprs[i].base.resBytes,
&pExprs[i].base.interBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) {
tfree(pExprs);
@@ -7074,6 +7254,8 @@ int32_t createFilterInfo(SQueryAttr* pQueryAttr, uint64_t qId) {
doCreateFilterInfo(pQueryAttr->tableCols, pQueryAttr->numOfCols, pQueryAttr->numOfFilterCols,
&pQueryAttr->pFilterInfo, qId);
+ pQueryAttr->createFilterOperator = true;
+
return TSDB_CODE_SUCCESS;
}
@@ -7359,11 +7541,16 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo*
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
STSBuf *pTsBuf = NULL;
- if (pTsBufInfo->tsLen > 0) { // open new file to save the result
- char *tsBlock = start + pTsBufInfo->tsOffset;
+
+ if (pTsBufInfo->tsLen > 0) { // open new file to save the result
+ char* tsBlock = start + pTsBufInfo->tsOffset;
pTsBuf = tsBufCreateFromCompBlocks(tsBlock, pTsBufInfo->tsNumOfBlocks, pTsBufInfo->tsLen, pTsBufInfo->tsOrder,
- pQueryAttr->vgId);
+ pQueryAttr->vgId);
+ if (pTsBuf == NULL) {
+ code = TSDB_CODE_QRY_NO_DISKSPACE;
+ goto _error;
+ }
tsBufResetPos(pTsBuf);
bool ret = tsBufNextPos(pTsBuf);
UNUSED(ret);
@@ -7507,6 +7694,9 @@ void freeQInfo(SQInfo *pQInfo) {
tfree(pQInfo->pBuf);
tfree(pQInfo->sql);
+ taosArrayDestroy(pQInfo->summary.queryProfEvents);
+ taosHashCleanup(pQInfo->summary.operatorProfResults);
+
taosArrayDestroy(pRuntimeEnv->groupResInfo.pRows);
pQInfo->signature = 0;
diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c
index ee587a515dca39559bc6d061501d4e3397c0781a..9c06a87b81c595a01f683c17c87b0418a09a5098 100644
--- a/src/query/src/qPlan.c
+++ b/src/query/src/qPlan.c
@@ -565,7 +565,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
taosArrayPush(plan, &op);
if (pQueryAttr->pExpr2 != NULL) {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
@@ -585,7 +585,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
}
if (pQueryAttr->pExpr2 != NULL) {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
} else if (pQueryAttr->sw.gap > 0) {
@@ -593,7 +593,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
taosArrayPush(plan, &op);
if (pQueryAttr->pExpr2 != NULL) {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
} else if (pQueryAttr->stateWindow) {
@@ -601,7 +601,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
taosArrayPush(plan, &op);
if (pQueryAttr->pExpr2 != NULL) {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
} else if (pQueryAttr->simpleAgg) {
@@ -619,15 +619,15 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
}
if (pQueryAttr->pExpr2 != NULL && !pQueryAttr->stableQuery) {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
} else { // diff/add/multiply/subtract/division
- if (pQueryAttr->numOfFilterCols > 0 && pQueryAttr->vgId == 0) { // todo refactor
+ if (pQueryAttr->numOfFilterCols > 0 && pQueryAttr->createFilterOperator && pQueryAttr->vgId == 0) { // todo refactor
op = OP_Filter;
taosArrayPush(plan, &op);
} else {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
}
@@ -665,7 +665,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
}
if (pQueryAttr->pExpr2 != NULL) {
- op = OP_Arithmetic;
+ op = OP_Project;
taosArrayPush(plan, &op);
}
}
diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c
index cf67e37cf2e47c0d305c8f04766d51f71989daf5..27c877fafa8640ceb4c3b380d32784f188c4a5f8 100644
--- a/src/query/src/qTsbuf.c
+++ b/src/query/src/qTsbuf.c
@@ -2,6 +2,7 @@
#include "taoserror.h"
#include "tscompression.h"
#include "tutil.h"
+#include "queryLog.h"
static int32_t getDataStartOffset();
static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo);
@@ -633,10 +634,15 @@ int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader) {
int32_t r = fseek(pTSBuf->f, 0, SEEK_SET);
if (r != 0) {
+ qError("fseek failed, errno:%d", errno);
+ return -1;
+ }
+
+ size_t ws = fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f);
+ if (ws != 1) {
+ qError("ts update header fwrite failed, size:%d, expected size:%d", (int32_t)ws, (int32_t)sizeof(STSBufFileHeader));
return -1;
}
-
- fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f);
return 0;
}
@@ -853,9 +859,17 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_
TSBufUpdateGroupInfo(pTSBuf, pTSBuf->numOfGroups - 1, pBlockInfo);
int32_t ret = fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET);
- UNUSED(ret);
+ if (ret == -1) {
+ qError("fseek failed, errno:%d", errno);
+ tsBufDestroy(pTSBuf);
+ return NULL;
+ }
size_t sz = fwrite((void*)pData, 1, len, pTSBuf->f);
- UNUSED(sz);
+ if (sz != len) {
+ qError("ts data fwrite failed, write size:%d, expected size:%d", (int32_t)sz, len);
+ tsBufDestroy(pTSBuf);
+ return NULL;
+ }
pTSBuf->fileSize += len;
pTSBuf->tsOrder = order;
@@ -863,9 +877,16 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_
STSBufFileHeader header = {
.magic = TS_COMP_FILE_MAGIC, .numOfGroup = pTSBuf->numOfGroups, .tsOrder = pTSBuf->tsOrder};
- STSBufUpdateHeader(pTSBuf, &header);
+ if (STSBufUpdateHeader(pTSBuf, &header) < 0) {
+ tsBufDestroy(pTSBuf);
+ return NULL;
+ }
- taosFsync(fileno(pTSBuf->f));
+ if (taosFsync(fileno(pTSBuf->f)) == -1) {
+ qError("fsync failed, errno:%d", errno);
+ tsBufDestroy(pTSBuf);
+ return NULL;
+ }
return pTSBuf;
}
diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c
index 38ef81e7938a3635273c0cfa2cb4e86ca2e35c1e..787cb2f7d1a34f8958977eb85cd3c2621ff9a047 100644
--- a/src/query/src/queryMain.c
+++ b/src/query/src/queryMain.c
@@ -232,6 +232,7 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
// error occurs, record the error code and return to client
int32_t ret = setjmp(pQInfo->runtimeEnv.env);
if (ret != TSDB_CODE_SUCCESS) {
+ publishQueryAbortEvent(pQInfo, ret);
pQInfo->code = ret;
qDebug("QInfo:0x%"PRIx64" query abort due to error/cancel occurs, code:%s", pQInfo->qId, tstrerror(pQInfo->code));
return doBuildResCheck(pQInfo);
@@ -240,7 +241,9 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
qDebug("QInfo:0x%"PRIx64" query task is launched", pQInfo->qId);
bool newgroup = false;
+ publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_BEFORE_OPERATOR_EXEC);
pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot, &newgroup);
+ publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_AFTER_OPERATOR_EXEC);
pRuntimeEnv->resultInfo.total += GET_NUM_OF_RESULTS(pRuntimeEnv);
if (isQueryKilled(pQInfo)) {
diff --git a/src/query/tests/astTest.cpp b/src/query/tests/astTest.cpp
index ce7b2f94a177576c8046b299bcb2e695fb5ead2d..1143d00e8da9e77cd0f740d98fe77ffd1beac4bc 100644
--- a/src/query/tests/astTest.cpp
+++ b/src/query/tests/astTest.cpp
@@ -10,6 +10,7 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wwrite-strings"
+#pragma GCC diagnostic ignored "-Wunused-function"
typedef struct ResultObj {
int32_t numOfResult;
diff --git a/src/query/tests/histogramTest.cpp b/src/query/tests/histogramTest.cpp
index 3088d6f8078483c39ec3780250e8bfa2d613f218..0266ecffc11348dcd0184030584ed7b721d39aff 100644
--- a/src/query/tests/histogramTest.cpp
+++ b/src/query/tests/histogramTest.cpp
@@ -5,6 +5,10 @@
#include "taos.h"
#include "qHistogram.h"
+
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+
namespace {
void doHistogramAddTest() {
SHistogramInfo* pHisto = NULL;
diff --git a/src/query/tests/patternMatchTest.cpp b/src/query/tests/patternMatchTest.cpp
index f3e0d3e119259d0a7cc8a94a6b43f4c71558bf78..091604c65c0e8b7fcf998fdd69f6f82f101f8157 100644
--- a/src/query/tests/patternMatchTest.cpp
+++ b/src/query/tests/patternMatchTest.cpp
@@ -6,6 +6,9 @@
#include "qAggMain.h"
#include "tcompare.h"
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+
TEST(testCase, patternMatchTest) {
SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER;
diff --git a/src/query/tests/percentileTest.cpp b/src/query/tests/percentileTest.cpp
index 104bfb3c06a9613bafcd4e0b3f39af4f9d102b04..1b6951201af5908378fb253b38cea01de1210d57 100644
--- a/src/query/tests/percentileTest.cpp
+++ b/src/query/tests/percentileTest.cpp
@@ -7,6 +7,9 @@
#include "qPercentile.h"
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+
namespace {
tMemBucket *createBigIntDataBucket(int32_t start, int32_t end) {
tMemBucket *pBucket = tMemBucketCreate(sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, start, end);
diff --git a/src/query/tests/resultBufferTest.cpp b/src/query/tests/resultBufferTest.cpp
index 491d75ccb9c8104a8e7760aa15918bd212646de6..54ac0bf4e5c78f2fcc7f0e3271eb3409ea072db7 100644
--- a/src/query/tests/resultBufferTest.cpp
+++ b/src/query/tests/resultBufferTest.cpp
@@ -6,6 +6,9 @@
#include "taos.h"
#include "tsdb.h"
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+
namespace {
// simple test
void simpleTest() {
diff --git a/src/query/tests/tsBufTest.cpp b/src/query/tests/tsBufTest.cpp
index dd7f03a494ac37229b117c678fc461d455067850..04c5a152520d08329408253af271c4d43c5c0fe3 100644
--- a/src/query/tests/tsBufTest.cpp
+++ b/src/query/tests/tsBufTest.cpp
@@ -9,6 +9,10 @@
#include "ttoken.h"
#include "tutil.h"
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+
namespace {
/**
*
diff --git a/src/query/tests/unitTest.cpp b/src/query/tests/unitTest.cpp
index 33ba8200d3afb9cff00f150ab5bef799f3fa1e86..e5487a061d8c2b79fbd7d321c256443c3ddab97b 100644
--- a/src/query/tests/unitTest.cpp
+++ b/src/query/tests/unitTest.cpp
@@ -6,14 +6,17 @@
#include "taos.h"
#include "tsdb.h"
+#pragma GCC diagnostic ignored "-Wwrite-strings"
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+#pragma GCC diagnostic ignored "-Wsign-compare"
+
#include "../../client/inc/tscUtil.h"
#include "tutil.h"
#include "tvariant.h"
#include "ttokendef.h"
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wwrite-strings"
-
namespace {
int32_t testValidateName(char* name) {
SStrToken token = {0};
diff --git a/src/tfs/src/tfs.c b/src/tfs/src/tfs.c
index f78535b8ed5bd17cb232fbcbc73f9831db099547..9dc68dcdfdb73fec46bebd4d9b27e361cfc2d570 100644
--- a/src/tfs/src/tfs.c
+++ b/src/tfs/src/tfs.c
@@ -480,11 +480,13 @@ static int tfsFormatDir(char *idir, char *odir) {
return -1;
}
- if (realpath(wep.we_wordv[0], odir) == NULL) {
+ char tmp[PATH_MAX] = {0};
+ if (realpath(wep.we_wordv[0], tmp) == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
wordfree(&wep);
return -1;
}
+ strcpy(odir, tmp);
wordfree(&wep);
return 0;
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index 92edd4d160f56973d465a80018ebac8df3efce50..92a0d489b3b28820a20706318883bb7b6a280820 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -3364,7 +3364,7 @@ static bool tableFilterFp(const void* pNode, void* param) {
GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val);
return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
}
- else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_DOUBLE) {
+ else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
double v;
GET_TYPED_DATA(v, double, pInfo->sch.type, val);
return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
diff --git a/src/util/inc/tsched.h b/src/util/inc/tsched.h
index 3e481cbc327b495975fb03bc4e4d850e4372f044..a1591512c1f87f524837a7986e3c8b3e14e25924 100644
--- a/src/util/inc/tsched.h
+++ b/src/util/inc/tsched.h
@@ -28,10 +28,41 @@ typedef struct SSchedMsg {
void *thandle;
} SSchedMsg;
-void *taosInitScheduler(int queueSize, int numOfThreads, const char *label);
-void *taosInitSchedulerWithInfo(int queueSize, int numOfThreads, const char *label, void *tmrCtrl);
-int taosScheduleTask(void *qhandle, SSchedMsg *pMsg);
-void taosCleanUpScheduler(void *param);
+/**
+ * Create a thread-safe ring-buffer based task queue and return the instance. A thread
+ * pool will be created to consume the messages in the queue.
+ * @param capacity the queue capacity
+ * @param numOfThreads the number of threads for the thread pool
+ * @param label the label of the queue
+ * @return the created queue scheduler
+ */
+void *taosInitScheduler(int capacity, int numOfThreads, const char *label);
+
+/**
+ * Create a thread-safe ring-buffer based task queue and return the instance.
+ * Same as taosInitScheduler, and it also print the queue status every 1 minite.
+ * @param capacity the queue capacity
+ * @param numOfThreads the number of threads for the thread pool
+ * @param label the label of the queue
+ * @param tmrCtrl the timer controller, tmr_ctrl_t*
+ * @return the created queue scheduler
+ */
+void *taosInitSchedulerWithInfo(int capacity, int numOfThreads, const char *label, void *tmrCtrl);
+
+/**
+ * Clean up the queue scheduler instance and free the memory.
+ * @param queueScheduler the queue scheduler to free
+ */
+void taosCleanUpScheduler(void *queueScheduler);
+
+/**
+ * Schedule a new task to run, the task is described by pMsg.
+ * The function may be blocked if no thread is available to execute the task.
+ * That may happen when all threads are busy.
+ * @param queueScheduler the queue scheduler instance
+ * @param pMsg the message for the task
+ */
+void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg);
#ifdef __cplusplus
}
diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c
index c4bd57760222ac3da7d25510cc2f434fe0cf0cac..442e83bb4f76499d7ce39792fc188d61536910c2 100644
--- a/src/util/src/tconfig.c
+++ b/src/util/src/tconfig.c
@@ -151,7 +151,7 @@ static bool taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) {
wordfree(&full_path);
- char tmp[1025] = {0};
+ char tmp[PATH_MAX] = {0};
if (realpath(option, tmp) != NULL) {
strcpy(option, tmp);
}
diff --git a/src/util/src/tsched.c b/src/util/src/tsched.c
index f014dd0fab5494bd85f197e2c79fac53359e8edf..16142470c95678b8663f3bd437357dcdb22635a5 100644
--- a/src/util/src/tsched.c
+++ b/src/util/src/tsched.c
@@ -108,39 +108,47 @@ void *taosInitScheduler(int queueSize, int numOfThreads, const char *label) {
void *taosInitSchedulerWithInfo(int queueSize, int numOfThreads, const char *label, void *tmrCtrl) {
SSchedQueue* pSched = taosInitScheduler(queueSize, numOfThreads, label);
-
+
if (tmrCtrl != NULL && pSched != NULL) {
pSched->pTmrCtrl = tmrCtrl;
taosTmrReset(taosDumpSchedulerStatus, DUMP_SCHEDULER_TIME_WINDOW, pSched, pSched->pTmrCtrl, &pSched->pTimer);
}
-
+
return pSched;
}
-void *taosProcessSchedQueue(void *param) {
+void *taosProcessSchedQueue(void *scheduler) {
SSchedMsg msg;
- SSchedQueue *pSched = (SSchedQueue *)param;
+ SSchedQueue *pSched = (SSchedQueue *)scheduler;
+ int ret = 0;
while (1) {
- if (tsem_wait(&pSched->fullSem) != 0) {
- uError("wait %s fullSem failed(%s)", pSched->label, strerror(errno));
+ if ((ret = tsem_wait(&pSched->fullSem)) != 0) {
+ uFatal("wait %s fullSem failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
}
if (pSched->stop) {
break;
}
- if (pthread_mutex_lock(&pSched->queueMutex) != 0)
- uError("lock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ if ((ret = pthread_mutex_lock(&pSched->queueMutex)) != 0) {
+ uFatal("lock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
+ }
msg = pSched->queue[pSched->fullSlot];
memset(pSched->queue + pSched->fullSlot, 0, sizeof(SSchedMsg));
pSched->fullSlot = (pSched->fullSlot + 1) % pSched->queueSize;
- if (pthread_mutex_unlock(&pSched->queueMutex) != 0)
- uError("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ if ((ret = pthread_mutex_unlock(&pSched->queueMutex)) != 0) {
+ uFatal("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
+ }
- if (tsem_post(&pSched->emptySem) != 0)
- uError("post %s emptySem failed(%s)", pSched->label, strerror(errno));
+ if ((ret = tsem_post(&pSched->emptySem)) != 0) {
+ uFatal("post %s emptySem failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
+ }
if (msg.fp)
(*(msg.fp))(&msg);
@@ -151,30 +159,37 @@ void *taosProcessSchedQueue(void *param) {
return NULL;
}
-int taosScheduleTask(void *qhandle, SSchedMsg *pMsg) {
- SSchedQueue *pSched = (SSchedQueue *)qhandle;
+void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) {
+ SSchedQueue *pSched = (SSchedQueue *)queueScheduler;
+ int ret = 0;
+
if (pSched == NULL) {
uError("sched is not ready, msg:%p is dropped", pMsg);
- return 0;
+ return;
}
- if (tsem_wait(&pSched->emptySem) != 0) {
- uError("wait %s emptySem failed(%s)", pSched->label, strerror(errno));
+ if ((ret = tsem_wait(&pSched->emptySem)) != 0) {
+ uFatal("wait %s emptySem failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
}
- if (pthread_mutex_lock(&pSched->queueMutex) != 0)
- uError("lock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ if ((ret = pthread_mutex_lock(&pSched->queueMutex)) != 0) {
+ uFatal("lock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
+ }
pSched->queue[pSched->emptySlot] = *pMsg;
pSched->emptySlot = (pSched->emptySlot + 1) % pSched->queueSize;
- if (pthread_mutex_unlock(&pSched->queueMutex) != 0)
- uError("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno));
-
- if (tsem_post(&pSched->fullSem) != 0)
- uError("post %s fullSem failed(%s)", pSched->label, strerror(errno));
+ if ((ret = pthread_mutex_unlock(&pSched->queueMutex)) != 0) {
+ uFatal("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
+ }
- return 0;
+ if ((ret = tsem_post(&pSched->fullSem)) != 0) {
+ uFatal("post %s fullSem failed(%s)", pSched->label, strerror(errno));
+ exit(ret);
+ }
}
void taosCleanUpScheduler(void *param) {
@@ -219,4 +234,4 @@ void taosDumpSchedulerStatus(void *qhandle, void *tmrId) {
}
taosTmrReset(taosDumpSchedulerStatus, DUMP_SCHEDULER_TIME_WINDOW, pSched, pSched->pTmrCtrl, &pSched->pTimer);
-}
+}
\ No newline at end of file
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index ed298a1a8d7a48982dac22d26d96bd30de08b1e1..c66ccc547740b8514984debe5aab05c5ed844254 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -235,6 +235,8 @@ python3 ./test.py -f query/queryTscomputWithNow.py
python3 ./test.py -f query/computeErrorinWhere.py
python3 ./test.py -f query/queryTsisNull.py
python3 ./test.py -f query/subqueryFilter.py
+# python3 ./test.py -f query/nestedQuery/queryInterval.py
+python3 ./test.py -f query/queryStateWindow.py
#stream
@@ -325,6 +327,7 @@ python3 ./test.py -f query/queryGroupbySort.py
python3 ./test.py -f functions/queryTestCases.py
python3 ./test.py -f functions/function_stateWindow.py
python3 ./test.py -f functions/function_derivative.py
+python3 ./test.py -f functions/function_irate.py
python3 ./test.py -f insert/unsignedInt.py
python3 ./test.py -f insert/unsignedBigint.py
diff --git a/tests/pytest/functions/function_irate.py b/tests/pytest/functions/function_irate.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c85e1bbdd088ecc61eb063f6567d12f1faeebfe
--- /dev/null
+++ b/tests/pytest/functions/function_irate.py
@@ -0,0 +1,228 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 100
+ self.ts = 1537146000000
+ self.ts1 = 1537146000000000
+
+
+ def run(self):
+ # db precison ms
+ tdSql.prepare()
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20), tag1 int)''')
+ tdSql.execute("create table test1 using test tags('beijing', 10)")
+ tdSql.execute("create table gtest1 (ts timestamp, col1 float)")
+ tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)")
+
+
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ % (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+
+ tdSql.execute("insert into gtest1 values(1537146000000,0);")
+ tdSql.execute("insert into gtest1 values(1537146001100,1.2);")
+ tdSql.execute("insert into gtest2 values(1537146001001,1);")
+ tdSql.execute("insert into gtest2 values(1537146001101,2);")
+ tdSql.execute("insert into gtest3 values(1537146001101,2);")
+ tdSql.execute("insert into gtest4(ts) values(1537146001101);")
+ tdSql.execute("insert into gtest5 values(1537146001002,4);")
+ tdSql.execute("insert into gtest5 values(1537146002202,4);")
+ tdSql.execute("insert into gtest6 values(1537146000000,5);")
+ tdSql.execute("insert into gtest6 values(1537146001000,2);")
+ tdSql.execute("insert into gtest7 values(1537146001000,1);")
+ tdSql.execute("insert into gtest7 values(1537146008000,2);")
+ tdSql.execute("insert into gtest7 values(1537146009000,6);")
+ tdSql.execute("insert into gtest7 values(1537146012000,3);")
+ tdSql.execute("insert into gtest7 values(1537146015000,3);")
+ tdSql.execute("insert into gtest7 values(1537146017000,1);")
+ tdSql.execute("insert into gtest7 values(1537146019000,3);")
+ tdSql.execute("insert into gtest8 values(1537146000002,4);")
+ tdSql.execute("insert into gtest8 values(1537146002202,4);")
+
+ # irate verifacation
+ tdSql.query("select irate(col1) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col1) from test1 interval(10s);")
+ tdSql.checkData(0, 1, 1)
+ tdSql.query("select irate(col1) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col2) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col3) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col4) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col5) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col6) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col11) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col12) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col13) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col14) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col2) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col2) from test1;")
+ tdSql.checkData(0, 0, 1)
+
+ tdSql.query("select irate(col1) from gtest1;")
+ tdSql.checkData(0, 0, 1.2/1.1)
+ tdSql.query("select irate(col1) from gtest2;")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select irate(col1) from gtest3;")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query("select irate(col1) from gtest4;")
+ tdSql.checkRows(0)
+ tdSql.query("select irate(col1) from gtest5;")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query("select irate(col1) from gtest6;")
+ tdSql.checkData(0, 0, 2)
+ tdSql.query("select irate(col1) from gtest7;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;")
+ tdSql.checkData(1, 1, 4)
+ tdSql.checkData(2, 1, 0)
+ tdSql.checkData(3, 1, 1)
+ tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;")
+ tdSql.checkData(1, 1, 0)
+ tdSql.checkData(2, 1, 4)
+ tdSql.checkData(3, 1, 0)
+
+ #error
+ tdSql.error("select irate(col1) from test")
+ tdSql.error("select irate(ts) from test1")
+ tdSql.error("select irate(col7) from test1")
+ tdSql.error("select irate(col8) from test1")
+ tdSql.error("select irate(col9) from test1")
+ tdSql.error("select irate(loc) from test1")
+ tdSql.error("select irate(tag1) from test1")
+
+ # use db1 precision us
+ tdSql.execute("create database db1 precision 'us' keep 3650 UPDATE 1")
+ tdSql.execute("use db1 ")
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ tdSql.execute("create table gtest1 (ts timestamp, col1 float)")
+ tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)")
+ tdSql.execute("create table gtest9 (ts timestamp, col1 tinyint)")
+
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ % (self.ts1 + i*1000000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+
+ tdSql.execute("insert into gtest1 values(1537146000000000,0);")
+ tdSql.execute("insert into gtest1 values(1537146001100000,1.2);")
+ tdSql.execute("insert into gtest2 values(1537146001001000,1);")
+ tdSql.execute("insert into gtest2 values(1537146001101000,2);")
+ tdSql.execute("insert into gtest3 values(1537146001101000,2);")
+ tdSql.execute("insert into gtest4(ts) values(1537146001101000);")
+ tdSql.execute("insert into gtest5 values(1537146001002000,4);")
+ tdSql.execute("insert into gtest5 values(1537146002202000,4);")
+ tdSql.execute("insert into gtest6 values(1537146000000000,5);")
+ tdSql.execute("insert into gtest6 values(1537146001000000,2);")
+ tdSql.execute("insert into gtest7 values(1537146001000000,1);")
+ tdSql.execute("insert into gtest7 values(1537146008000000,2);")
+ tdSql.execute("insert into gtest7 values(1537146009000000,6);")
+ tdSql.execute("insert into gtest7 values(1537146012000000,3);")
+ tdSql.execute("insert into gtest7 values(1537146015000000,3);")
+ tdSql.execute("insert into gtest7 values(1537146017000000,1);")
+ tdSql.execute("insert into gtest7 values(1537146019000000,3);")
+ tdSql.execute("insert into gtest8 values(1537146000002000,3);")
+ tdSql.execute("insert into gtest8 values(1537146001003000,4);")
+ tdSql.execute("insert into gtest9 values(1537146000000000,4);")
+ tdSql.execute("insert into gtest9 values(1537146000000001,5);")
+
+
+ # irate verifacation
+ tdSql.query("select irate(col1) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col1) from test1 interval(10s);")
+ tdSql.checkData(0, 1, 1)
+ tdSql.query("select irate(col1) from test1;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col1) from gtest1;")
+ tdSql.checkData(0, 0, 1.2/1.1)
+ tdSql.query("select irate(col1) from gtest2;")
+ tdSql.checkData(0, 0, 10)
+ tdSql.query("select irate(col1) from gtest3;")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query("select irate(col1) from gtest4;")
+ tdSql.checkRows(0)
+ tdSql.query("select irate(col1) from gtest5;")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query("select irate(col1) from gtest6;")
+ tdSql.checkData(0, 0, 2)
+ tdSql.query("select irate(col1) from gtest7;")
+ tdSql.checkData(0, 0, 1)
+ tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;")
+ tdSql.checkData(1, 1, 4)
+ tdSql.checkData(2, 1, 0)
+ tdSql.checkData(3, 1, 1)
+ tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;")
+ tdSql.checkData(1, 1, 0)
+ tdSql.checkData(2, 1, 4)
+ tdSql.checkData(3, 1, 0)
+ tdSql.query("select irate(col1) from gtest8;")
+ tdSql.checkData(0, 0, 1/1.001)
+ tdSql.query("select irate(col1) from gtest9;")
+ tdSql.checkData(0, 0, 1000000)
+
+ #error
+ tdSql.error("select irate(col1) from test")
+ tdSql.error("select irate(ts) from test1")
+ tdSql.error("select irate(col7) from test1")
+ tdSql.error("select irate(col8) from test1")
+ tdSql.error("select irate(col9) from test1")
+ tdSql.error("select irate(loc) from test1")
+ tdSql.error("select irate(tag1) from test1")
+
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/insert/in_function.py b/tests/pytest/insert/in_function.py
index d1fbfd702a857ab6d966aff46e04a32ac933c384..3f2e1a03cad0a74c665341ac04250ec8a239ad6f 100644
--- a/tests/pytest/insert/in_function.py
+++ b/tests/pytest/insert/in_function.py
@@ -18,7 +18,6 @@ from util.log import *
from util.cases import *
from util.sql import *
-
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@@ -27,6 +26,7 @@ class TDTestCase:
def run(self):
tdSql.prepare()
# test case for https://jira.taosdata.com:18080/browse/TD-4568
+ # test case for https://jira.taosdata.com:18080/browse/TD-4824
tdLog.info("=============== step1,check bool and tinyint data type")
@@ -137,8 +137,28 @@ class TDTestCase:
tdSql.checkData(0,1,'True')
tdSql.checkData(0,2,'0')
+ tdLog.info("=============== step1.3,multiple column and multiple tag check in function")
+ cmd1 = '''select * from in_stable_1
+ where in_bool in (true,false) and in_tinyint in (0,127,-127)
+ and tin_bool in (true,false) and tin_tinyint in (0,127,-127)
+ order by ts desc ;'''
+ tdLog.info(cmd1)
+ tdSql.query(cmd1)
+ tdSql.checkData(0,1,'True')
+ tdSql.checkData(0,2,'0')
+ tdSql.checkData(0,3,'False')
+ tdSql.checkData(0,4,'0')
+ tdSql.checkData(1,1,'False')
+ tdSql.checkData(1,2,'127')
+ tdSql.checkData(1,3,'False')
+ tdSql.checkData(1,4,'-127')
+ tdSql.checkData(2,1,'True')
+ tdSql.checkData(2,2,'-127')
+ tdSql.checkData(2,3,'True')
+ tdSql.checkData(2,4,'127')
+
- tdLog.info("=============== step1.3,drop normal table && create table")
+ tdLog.info("=============== step1.4,drop normal table && create table")
cmd1 = 'drop table if exists normal_in_bool_tinyint_1 ;'
cmd2 = 'create table normal_in_bool_tinyint_1 (ts timestamp,in_bool bool,in_tinyint tinyint) ; '
tdLog.info(cmd1)
@@ -147,7 +167,7 @@ class TDTestCase:
tdSql.execute(cmd2)
- tdLog.info("=============== step1.4,insert normal table right data and check in function")
+ tdLog.info("=============== step1.5,insert normal table right data and check in function")
cmd1 = 'insert into normal_in_bool_tinyint_1 values(now,\'true\',\'-127\') ;'
tdLog.info(cmd1)
tdSql.execute(cmd1)
@@ -175,6 +195,17 @@ class TDTestCase:
tdSql.checkData(0,1,'True')
tdSql.checkData(0,2,'0')
+ cmd4 = '''select * from normal_in_bool_tinyint_1
+ where in_bool in (true,false) and in_tinyint in (0,127,-127)
+ order by ts desc ;'''
+ tdLog.info(cmd4)
+ tdSql.query(cmd4)
+ tdSql.checkData(0,1,'True')
+ tdSql.checkData(0,2,'0')
+ tdSql.checkData(1,1,'False')
+ tdSql.checkData(1,2,'127')
+ tdSql.checkData(2,1,'True')
+ tdSql.checkData(2,2,'-127')
tdLog.info("=============== step2,check int、smallint and bigint data type")
@@ -378,10 +409,39 @@ class TDTestCase:
tdSql.query('select * from in_int_smallint_bigint_3 where in_big in (-9223372036854775807) order by ts desc')
tdSql.checkData(0,1,'0')
tdSql.checkData(0,2,'32767')
- tdSql.checkData(0,3,'-9223372036854775807')
+ tdSql.checkData(0,3,'-9223372036854775807')
+
+
+ tdLog.info("=============== step2.3,multiple column and multiple tag check in function")
+ cmd1 = '''select * from in_stable_2
+ where in_int in (0,2147483647,-2147483647) and in_small in (0,32767,-32767)
+ and in_big in (0,9223372036854775807,-9223372036854775807)
+ and tin_int in (0,2147483647,-2147483647) and tin_small in (0,32767,-32767)
+ and tin_big in (0,9223372036854775807,-9223372036854775807)
+ order by ts desc ;'''
+ tdLog.info(cmd1)
+ tdSql.query(cmd1)
+ tdSql.checkData(0,1,'0')
+ tdSql.checkData(0,2,'32767')
+ tdSql.checkData(0,3,'-9223372036854775807')
+ tdSql.checkData(0,4,'0')
+ tdSql.checkData(0,5,'32767')
+ tdSql.checkData(0,6,'-9223372036854775807')
+ tdSql.checkData(1,1,'-2147483647')
+ tdSql.checkData(1,2,'0')
+ tdSql.checkData(1,3,'9223372036854775807')
+ tdSql.checkData(1,4,'-2147483647')
+ tdSql.checkData(1,5,'0')
+ tdSql.checkData(1,6,'9223372036854775807')
+ tdSql.checkData(2,1,'2147483647')
+ tdSql.checkData(2,2,'-32767')
+ tdSql.checkData(2,3,'0')
+ tdSql.checkData(2,4,'2147483647')
+ tdSql.checkData(2,5,'-32767')
+ tdSql.checkData(2,6,'0')
- tdLog.info("=============== step2.3,drop normal table && create table")
+ tdLog.info("=============== step2.4,drop normal table && create table")
cmd1 = 'drop table if exists normal_int_smallint_bigint_1 ;'
cmd2 = 'create table normal_int_smallint_bigint_1 (ts timestamp,in_int int,in_small smallint , in_big bigint) ; '
tdLog.info(cmd1)
@@ -390,7 +450,7 @@ class TDTestCase:
tdSql.execute(cmd2)
- tdLog.info("=============== step2.4,insert normal table right data and check in function")
+ tdLog.info("=============== step2.5,insert normal table right data and check in function")
cmd1 = 'insert into normal_int_smallint_bigint_1 values(now,\'2147483647\',\'-32767\',\'0\') ;'
tdLog.info(cmd1)
tdSql.execute(cmd1)
@@ -437,7 +497,23 @@ class TDTestCase:
tdSql.query('select * from normal_int_smallint_bigint_1 where in_big in (-9223372036854775807) order by ts desc')
tdSql.checkData(0,1,'0')
tdSql.checkData(0,2,'32767')
- tdSql.checkData(0,3,'-9223372036854775807')
+ tdSql.checkData(0,3,'-9223372036854775807')
+
+ cmd4 = '''select * from normal_int_smallint_bigint_1
+ where in_int in (0,2147483647,-2147483647) and in_small in (0,32767,-32767)
+ and in_big in (0,9223372036854775807,-9223372036854775807)
+ order by ts desc ;'''
+ tdLog.info(cmd4)
+ tdSql.query(cmd4)
+ tdSql.checkData(0,1,'0')
+ tdSql.checkData(0,2,'32767')
+ tdSql.checkData(0,3,'-9223372036854775807')
+ tdSql.checkData(1,1,'-2147483647')
+ tdSql.checkData(1,2,'0')
+ tdSql.checkData(1,3,'9223372036854775807')
+ tdSql.checkData(2,1,'2147483647')
+ tdSql.checkData(2,2,'-32767')
+ tdSql.checkData(2,3,'0')
tdLog.info("=============== step3,check binary and nchar data type")
@@ -560,7 +636,30 @@ class TDTestCase:
tdSql.checkData(0,2,'北京涛思数据科技有限公司')
- tdLog.info("=============== step3.3,drop normal table && create table")
+ tdLog.info("=============== step3.3,multiple column and multiple tag check in function")
+ cmd1 = '''select * from in_stable_3
+ where in_binary in (\'0\',\'TDengine\',\'TAOS\')
+ and in_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'涛思数据TAOSdata\')
+ and tin_binary in (\'0\',\'TDengine\',\'taosdataTDengine\')
+ and tin_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'北京涛思数据科技有限公司TDengine\')
+ order by ts desc ;'''
+ tdLog.info(cmd1)
+ tdSql.query(cmd1)
+ tdSql.checkData(0,1,'TDengine')
+ tdSql.checkData(0,2,'北京涛思数据科技有限公司')
+ tdSql.checkData(0,3,'taosdataTDengine')
+ tdSql.checkData(0,4,'北京涛思数据科技有限公司TDengine')
+ tdSql.checkData(1,1,'TAOS')
+ tdSql.checkData(1,2,'涛思数据TAOSdata')
+ tdSql.checkData(1,3,'TDengine')
+ tdSql.checkData(1,4,'北京涛思数据科技有限公司')
+ tdSql.checkData(2,1,'0')
+ tdSql.checkData(2,2,'0')
+ tdSql.checkData(2,3,'0')
+ tdSql.checkData(2,4,'0')
+
+
+ tdLog.info("=============== step3.4,drop normal table && create table")
cmd1 = 'drop table if exists normal_in_binary_nchar_1 ;'
cmd2 = 'create table normal_in_binary_nchar_1 (ts timestamp,in_binary binary(8),in_nchar nchar(12)) ; '
tdLog.info(cmd1)
@@ -569,7 +668,7 @@ class TDTestCase:
tdSql.execute(cmd2)
- tdLog.info("=============== step3.4,insert normal table right data and check in function")
+ tdLog.info("=============== step3.5,insert normal table right data and check in function")
cmd1 = 'insert into normal_in_binary_nchar_1 values(now,\'0\',\'0\') ;'
tdLog.info(cmd1)
tdSql.execute(cmd1)
@@ -598,124 +697,413 @@ class TDTestCase:
tdSql.checkData(0,2,'北京涛思数据科技有限公司')
tdSql.query('select * from normal_in_binary_nchar_1 where in_nchar in (\'北京涛思数据科技有限公司\') order by ts desc')
tdSql.checkData(0,1,'TDengine')
- tdSql.checkData(0,2,'北京涛思数据科技有限公司')
+ tdSql.checkData(0,2,'北京涛思数据科技有限公司')
+
+ cmd4 = '''select * from normal_in_binary_nchar_1
+ where in_binary in (\'0\',\'TDengine\',\'TAOS\')
+ and in_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'涛思数据TAOSdata\')
+ order by ts desc ;'''
+ tdLog.info(cmd4)
+ tdSql.query(cmd4)
+ tdSql.checkData(0,1,'TDengine')
+ tdSql.checkData(0,2,'北京涛思数据科技有限公司')
+ tdSql.checkData(1,1,'TAOS')
+ tdSql.checkData(1,2,'涛思数据TAOSdata')
+ tdSql.checkData(2,1,'0')
+ tdSql.checkData(2,2,'0')
+
- tdLog.info("=============== step4,check float and double data type,not support")
+ tdLog.info("=============== step4,check float and double data type")
tdLog.info("=============== step4.1,drop table && create table")
- cmd1 = 'drop table if exists in_float_double_1 ;'
+ cmd1 = 'drop table if exists in_ts_float_double_1 ;'
+ cmd2 = 'drop table if exists in_ts_float_double_2 ;'
+ cmd3 = 'drop table if exists in_ts_float_double_3 ;'
cmd10 = 'drop table if exists in_stable_4 ;'
- cmd11 = 'create stable in_stable_4(ts timestamp,in_float float,in_double double) tags (tin_float float,tin_double double) ;'
- cmd12 = 'create table in_float_double_1 using in_stable_4 tags(\'666\',\'88888\') ; '
+ cmd11 = 'create stable in_stable_4(ts timestamp,in_ts timestamp,in_float float,in_double double) tags (tin_ts timestamp,tin_float float,tin_double double) ;'
+ cmd12 = 'create table in_ts_float_double_1 using in_stable_4 tags(\'0\',\'0\',\'0\') ; '
+ cmd13 = 'create table in_ts_float_double_2 using in_stable_4 tags(\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ; '
+ cmd14 = 'create table in_ts_float_double_3 using in_stable_4 tags(\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ; '
tdLog.info(cmd1)
tdSql.execute(cmd1)
+ tdLog.info(cmd2)
+ tdSql.execute(cmd2)
+ tdLog.info(cmd3)
+ tdSql.execute(cmd3)
tdLog.info(cmd10)
tdSql.execute(cmd10)
tdLog.info(cmd11)
tdSql.execute(cmd11)
tdLog.info(cmd12)
tdSql.execute(cmd12)
+ tdLog.info(cmd13)
+ tdSql.execute(cmd13)
+ tdLog.info(cmd14)
+ tdSql.execute(cmd14)
tdLog.info("=============== step4.2,insert stable right data and check in function")
- cmd1 = 'insert into in_float_double_1 values(now,\'888\',\'66666\') ;'
+ cmd1 = 'insert into in_ts_float_double_1 values(now,\'0\',\'0\',\'0\') ;'
tdLog.info(cmd1)
- tdSql.execute(cmd1)
+ tdSql.execute(cmd1)
+
+ tdSql.query('select * from in_stable_4 where in_ts in (\'0\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+ tdSql.query('select * from in_stable_4 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+ tdSql.query('select * from in_stable_4 where in_float in (0.00000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+ tdSql.query('select * from in_stable_4 where in_double in (0.000000000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+
+ tdSql.query('select * from in_stable_4 where tin_ts in (\'0\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+ tdSql.query('select * from in_stable_4 where tin_ts in (\'1970-01-01 08:00:00.000\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+ tdSql.query('select * from in_stable_4 where tin_float in (0.00000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+ tdSql.query('select * from in_stable_4 where tin_double in (0.000000000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,5,0.00000)
+ tdSql.checkData(0,6,0.000000000)
+
+ tdSql.query('select * from in_ts_float_double_1 where in_ts in (\'0\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.query('select * from in_ts_float_double_1 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.query('select * from in_ts_float_double_1 where in_float in (0.00000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.query('select * from in_ts_float_double_1 where in_double in (0.000000000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
- cmd2 = 'select * from in_stable_4 where in_float in (\'888\');'
+ cmd2 = 'insert into in_ts_float_double_2 values(now,\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ;'
tdLog.info(cmd2)
- tdSql.error(cmd2)
- try:
- tdSql.execute(cmd2)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
+ tdSql.execute(cmd2)
+
+ tdSql.query('select * from in_stable_4 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+ tdSql.query('select * from in_stable_4 where in_ts in (\'1577836800001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+ tdSql.query('select * from in_stable_4 where in_float in (666.00000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+ tdSql.query('select * from in_stable_4 where in_double in (-88888.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+
+ tdSql.query('select * from in_stable_4 where tin_ts in (\'2020-01-01 08:00:00.001000\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+ tdSql.query('select * from in_stable_4 where tin_ts in (\'1577836800001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+ tdSql.query('select * from in_stable_4 where tin_float in (666.00000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+ tdSql.query('select * from in_stable_4 where tin_double in (-88888.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,5,666.00000)
+ tdSql.checkData(0,6,-88888.000000000)
+
+ tdSql.query('select * from in_ts_float_double_2 where in_ts in (\'1577836800001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.query('select * from in_ts_float_double_2 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.query('select * from in_ts_float_double_2 where in_float in (666.00000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.query('select * from in_ts_float_double_2 where in_double in (-88888.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
- cmd3 = 'select * from in_stable_4 where in_double in (\'66666\');'
+ cmd3 = 'insert into in_ts_float_double_3 values(now,\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ;'
tdLog.info(cmd3)
- tdSql.error(cmd3)
- try:
- tdSql.execute(cmd3)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
-
- cmd4 = 'select * from in_stable_4 where tin_float in (\'666\');'
- tdLog.info(cmd4)
- tdSql.error(cmd4)
- try:
- tdSql.execute(cmd4)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
-
- cmd5 = 'select * from in_stable_4 where tin_double in (\'88888\');'
- tdLog.info(cmd5)
- tdSql.error(cmd5)
- try:
- tdSql.execute(cmd5)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
-
- cmd6 = 'select * from in_float_double_1 where in_float in (\'888\');'
- tdLog.info(cmd6)
- tdSql.error(cmd6)
- try:
- tdSql.execute(cmd6)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
-
- cmd7 = 'select * from in_float_double_1 where in_double in (\'66666\');'
- tdLog.info(cmd7)
- tdSql.error(cmd7)
- try:
- tdSql.execute(cmd7)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
+ tdSql.execute(cmd3)
+
+ tdSql.query('select * from in_stable_4 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.query('select * from in_stable_4 where in_ts in (\'1609459200001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.query('select * from in_stable_4 where in_float in (-888.00000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.query('select * from in_stable_4 where in_double in (66666.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.query('select * from in_stable_4 where tin_ts in (\'2021-01-01 08:00:00.001000\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.query('select * from in_stable_4 where tin_ts in (\'1609459200001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.query('select * from in_stable_4 where tin_float in (-888.00000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.query('select * from in_stable_4 where tin_double in (66666.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+
+ tdSql.query('select * from in_ts_float_double_3 where in_ts in (\'1609459200001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.query('select * from in_ts_float_double_3 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.query('select * from in_ts_float_double_3 where in_float in (-888.00000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.query('select * from in_ts_float_double_3 where in_double in (66666.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+
+
+ tdLog.info("=============== step4.3,multiple column and multiple tag check in function")
+ cmd1 = '''select * from in_stable_4
+ where in_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\')
+ and in_float in (0.00000,666.00000,-888.00000)
+ and in_double in (0.000000000,66666.000000000,-88888.000000000)
+ and tin_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\')
+ and tin_float in (0.00000,666.00000,-888.00000)
+ and tin_double in (0.000000000,66666.000000000,-88888.000000000)
+ order by ts desc ;'''
+ tdLog.info(cmd1)
+ tdSql.query(cmd1)
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,5,-888.00000)
+ tdSql.checkData(0,6,66666.000000000)
+ tdSql.checkData(1,1,'2020-01-01 08:00:00.001000')
+ tdSql.checkData(1,2,666.00000)
+ tdSql.checkData(1,3,-88888.000000000)
+ tdSql.checkData(1,4,'2020-01-01 08:00:00.001')
+ tdSql.checkData(1,5,666.00000)
+ tdSql.checkData(1,6,-88888.000000000)
+ tdSql.checkData(2,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(2,2,0.00000)
+ tdSql.checkData(2,3,0.000000000)
+ tdSql.checkData(2,4,'1970-01-01 08:00:00.000')
+ tdSql.checkData(2,5,0.00000)
+ tdSql.checkData(2,6,0.000000000)
+
+
- tdLog.info("=============== step4.3,drop normal table && create table")
- cmd1 = 'drop table if exists normal_in_float_double_1 ;'
- cmd2 = 'create table normal_in_float_double_1 (ts timestamp,in_float float,in_double double) ; '
+ tdLog.info("=============== step4.4,drop normal table && create table")
+ cmd1 = 'drop table if exists normal_in_ts_float_double_1 ;'
+ cmd2 = 'create table normal_in_ts_float_double_1 (ts timestamp,in_ts timestamp,in_float float,in_double double) ; '
tdLog.info(cmd1)
tdSql.execute(cmd1)
tdLog.info(cmd2)
tdSql.execute(cmd2)
- tdLog.info("=============== step4.4,insert normal table right data and check in function")
- cmd1 = 'insert into normal_in_float_double_1 values(now,\'888\',\'666666\') ;'
+ tdLog.info("=============== step4.5,insert normal table right data and check in function")
+ cmd1 = 'insert into normal_in_ts_float_double_1 values(now,\'0\',\'0\',\'0\') ;'
tdLog.info(cmd1)
- tdSql.execute(cmd1)
+ tdSql.execute(cmd1)
- cmd2 = 'select * from normal_in_float_double_1 where in_float in (\'888\');'
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'0\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (0.00000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (0.000000000) order by ts desc')
+ tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(0,2,0.00000)
+ tdSql.checkData(0,3,0.000000000)
+
+ cmd2 = 'insert into normal_in_ts_float_double_1 values(now,\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ;'
tdLog.info(cmd2)
- tdSql.error(cmd2)
- try:
- tdSql.execute(cmd2)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
-
- cmd3 = 'select * from normal_in_float_double_1 where in_double in (\'66666\');'
+ tdSql.execute(cmd2)
+
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1577836800001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (666.00000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (-88888.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(0,2,666.00000)
+ tdSql.checkData(0,3,-88888.000000000)
+
+ cmd3 = 'insert into normal_in_ts_float_double_1 values(now,\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ;'
tdLog.info(cmd3)
- tdSql.error(cmd3)
- try:
- tdSql.execute(cmd3)
- tdLog.exit("invalid operation: not supported filter condition")
- except Exception as e:
- tdLog.info(repr(e))
- tdLog.info("invalid operation: not supported filter condition")
+ tdSql.execute(cmd3)
+
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1609459200001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (-888.00000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (66666.000000000) order by ts desc')
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+
+ cmd4 = '''select * from normal_in_ts_float_double_1
+ where in_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\')
+ and in_double in (0.000000000,66666.000000000,-88888.000000000)
+ and in_float in (0.00000,666.00000,-888.00000)
+ order by ts desc ;'''
+ tdLog.info(cmd4)
+ tdSql.query(cmd4)
+ tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
+ tdSql.checkData(0,2,-888.00000)
+ tdSql.checkData(0,3,66666.000000000)
+ tdSql.checkData(1,1,'2020-01-01 08:00:00.001')
+ tdSql.checkData(1,2,666.00000)
+ tdSql.checkData(1,3,-88888.000000000)
+ tdSql.checkData(2,1,'1970-01-01 08:00:00.000')
+ tdSql.checkData(2,2,0.00000)
+ tdSql.checkData(2,3,0.000000000)
+
+
def stop(self):
tdSql.close()
diff --git a/tests/pytest/query/nestedQuery/insertData.json b/tests/pytest/query/nestedQuery/insertData.json
new file mode 100644
index 0000000000000000000000000000000000000000..d4ef8dbe97ca144f59c0b1c961fe930bfcdbfcb2
--- /dev/null
+++ b/tests/pytest/query/nestedQuery/insertData.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file":"./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 365,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 1,
+ "childtable_prefix": "stb0_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 100000,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1,
+ "timestamp_step": 1000,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./tools/taosdemoAllTest/sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BOOL"}],
+ "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":1}]
+ }]
+ }]
+}
diff --git a/tests/pytest/query/nestedQuery/nestedQueryJson.py b/tests/pytest/query/nestedQuery/nestedQueryJson.py
new file mode 100644
index 0000000000000000000000000000000000000000..36a231a9165a15cb46cbc0c1d37152f90e54b03e
--- /dev/null
+++ b/tests/pytest/query/nestedQuery/nestedQueryJson.py
@@ -0,0 +1,81 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ binPath = buildPath+ "/build/bin/"
+
+ # insert: create one or mutiple tables per sql and insert multiple rows per sql
+ os.system("%staosdemo -f query/nestedQuery/insertData.json -y " % binPath)
+ tdSql.execute("use db")
+ tdSql.query("select count (tbname) from stb0")
+ tdSql.checkData(0, 0, 1000)
+ tdSql.query("select count (tbname) from stb1")
+ tdSql.checkData(0, 0, 1000)
+ tdSql.query("select count(*) from stb00_0")
+ tdSql.checkData(0, 0, 100)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 100000)
+ tdSql.query("select count(*) from stb01_1")
+ tdSql.checkData(0, 0, 200)
+ tdSql.query("select count(*) from stb1")
+ tdSql.checkData(0, 0, 200000)
+
+
+
+ testcaseFilename = os.path.split(__file__)[-1]
+ os.system("rm -rf ./insert_res.txt")
+ os.system("rm -rf query/nestedQuery/%s.sql" % testcaseFilename )
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/nestedQuery/queryInterval.py b/tests/pytest/query/nestedQuery/queryInterval.py
new file mode 100644
index 0000000000000000000000000000000000000000..11c42c463ee7d863393f2921db24244718a49df8
--- /dev/null
+++ b/tests/pytest/query/nestedQuery/queryInterval.py
@@ -0,0 +1,106 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+from util.dnodes import tdDnodes
+import random
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts1 = 1593548685000
+ self.ts2 = 1593548785000
+
+
+ def run(self):
+ # tdSql.execute("drop database db ")
+ tdSql.prepare()
+ tdSql.execute("create table st (ts timestamp, num int, value int , t_instance int) tags (loc nchar(30))")
+ node = 5
+ number = 10
+ for n in range(node):
+ for m in range(number):
+ dt= m*300000+n*60000 # collecting'frequency is 10s
+ args1=(n,n,self.ts1+dt,n,100+2*m+2*n,10+m+n)
+ # args2=(n,self.ts2+dt,n,120+n,15+n)
+ tdSql.execute("insert into t%d using st tags('beijing%d') values(%d, %d, %d, %d)" % args1)
+ # tdSql.execute("insert into t1 using st tags('shanghai') values(%d, %d, %d, %d)" % args2)
+
+ # interval function
+ tdSql.query("select avg(value) from st interval(10m)")
+ # print(tdSql.queryResult)
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, "2020-07-01 04:20:00")
+ tdSql.checkData(1, 1, 107.4)
+
+ # subquery with interval
+ tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));")
+ tdSql.checkData(0, 0, 109.0)
+
+ # subquery with interval and select two Column in parent query
+ tdSql.error("select ts,avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));")
+
+ # subquery with interval and sliding
+ tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(30s) limit 1;")
+ tdSql.checkData(0, 0, "2020-07-01 04:17:00")
+ tdSql.checkData(0, 1, 100)
+ tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(30s));")
+ tdSql.checkData(0, 0, 111)
+
+ # subquery with interval and offset
+ tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m);")
+ tdSql.checkData(0, 0, "2020-07-01 04:21:00")
+ tdSql.checkData(0, 1, 100)
+ tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m) group by loc);")
+ tdSql.checkData(0, 0, 109)
+
+ # subquery with interval,sliding and group by ; parent query with interval
+ tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(1m) group by loc limit 1 offset 52 ;")
+ tdSql.checkData(0, 0, "2020-07-01 05:09:00")
+ tdSql.checkData(0, 1, 118)
+ tdSql.query("select avg(avg_val) as ncst from(select avg(value) as avg_val from st where loc!='beijing0' interval(8m) sliding(1m) group by loc ) interval(5m);")
+ tdSql.checkData(1, 1, 105)
+
+ # # subquery and parent query with interval and sliding
+ tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(5m)) interval(10m) sliding(2m);")
+ tdSql.checkData(29, 0, "2020-07-01 05:10:00.000")
+
+ # subquery and parent query with top and bottom
+ tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val desc;")
+ tdSql.checkData(0, 1, 117)
+ tdSql.query("select bottom(avg_val,3) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val asc;")
+ tdSql.checkData(0, 1, 111)
+
+ #
+ tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(3m));")
+ tdSql.checkData(0, 1, 120)
+
+ # clear env
+ testcaseFilename = os.path.split(__file__)[-1]
+ os.system("rm -rf ./insert_res.txt")
+ os.system("rm -rf wal/%s.sql" % testcaseFilename )
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/queryStateWindow.py b/tests/pytest/query/queryStateWindow.py
new file mode 100644
index 0000000000000000000000000000000000000000..251dbef65841cc17b31046320a7e966426c5eeb1
--- /dev/null
+++ b/tests/pytest/query/queryStateWindow.py
@@ -0,0 +1,111 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+ self.rowNum = 100000
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ print("==============step1")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, t1 int, t2 timestamp, t3 bigint, t4 float, t5 double, t6 binary(10), t7 smallint, t8 tinyint, t9 bool, t10 nchar(10), t11 int unsigned, t12 bigint unsigned, t13 smallint unsigned, t14 tinyint unsigned ,t15 int) tags(dev nchar(50), tag2 binary(16))")
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_001 using st tags("dev_01", "tag_01")')
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags("dev_02", "tag_02")')
+
+ print("==============step2")
+
+ tdSql.execute(
+ "INSERT INTO dev_001 VALUES('2020-05-13 10:00:00.000', 1, '2020-05-13 10:00:00.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 254, 1)('2020-05-13 10:00:01.000', 1, '2020-05-13 10:00:01.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 253, 5)('2020-05-13 10:00:02.000', 10, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', 10, -127, false, '测试', 15, 10, 65534, 253, 10)('2020-05-13 10:00:03.000', 1, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', -10, -126, true, '测试', 14, 12, 65532, 254, 15)")
+
+ for i in range(self.rowNum):
+ tdSql.execute("insert into dev_002 (ts,t1) values(%d, %d,)" % (self.ts + i, i + 1))
+
+ tdSql.query("select count(ts) from dev_001 state_window(t1)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, 2)
+ tdSql.query("select count(ts) from dev_001 state_window(t3)")
+ tdSql.checkRows(2)
+ tdSql.checkData(1, 0, 2)
+ tdSql.query("select count(ts) from dev_001 state_window(t7)")
+ tdSql.checkRows(3)
+ tdSql.checkData(1, 0, 1)
+ tdSql.query("select count(ts) from dev_001 state_window(t8)")
+ tdSql.checkRows(3)
+ tdSql.checkData(2, 0, 1)
+ tdSql.query("select count(ts) from dev_001 state_window(t11)")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 0, 3)
+ tdSql.query("select count(ts) from dev_001 state_window(t12)")
+ tdSql.checkRows(2)
+ tdSql.checkData(1, 0, 1)
+ tdSql.query("select count(ts) from dev_001 state_window(t13)")
+ tdSql.checkRows(2)
+ tdSql.checkData(1, 0, 1)
+ tdSql.query("select count(ts) from dev_001 state_window(t14)")
+ tdSql.checkRows(3)
+ tdSql.checkData(1, 0, 2)
+ tdSql.query("select count(ts) from dev_002 state_window(t1)")
+ tdSql.checkRows(100000)
+
+ # with all aggregate function
+ tdSql.query("select count(*),sum(t1),avg(t1),twa(t1),stddev(t15),leastsquares(t15,1,1),first(t15),last(t15),spread(t15),percentile(t15,90),t9 from dev_001 state_window(t9);")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, 2)
+ tdSql.checkData(1, 1, 10)
+ tdSql.checkData(0, 2, 1)
+ # tdSql.checkData(0, 3, 1)
+ tdSql.checkData(0, 4, np.std([1,5]))
+ # tdSql.checkData(0, 5, 1)
+ tdSql.checkData(0, 6, 1)
+ tdSql.checkData(0, 7, 5)
+ tdSql.checkData(0, 8, 4)
+ tdSql.checkData(0, 9, 4.6)
+ tdSql.checkData(0, 10, 'True')
+
+ # with where
+ tdSql.query("select avg(t15),t9 from dev_001 where t9='true' state_window(t9);")
+ tdSql.checkData(0, 0, 7)
+ tdSql.checkData(0, 1, 'True')
+
+ # error
+ tdSql.error("select count(*) from dev_001 state_window(t2)")
+ tdSql.error("select count(*) from st state_window(t3)")
+ tdSql.error("select count(*) from dev_001 state_window(t4)")
+ tdSql.error("select count(*) from dev_001 state_window(t5)")
+ tdSql.error("select count(*) from dev_001 state_window(t6)")
+ tdSql.error("select count(*) from dev_001 state_window(t10)")
+ tdSql.error("select count(*) from dev_001 state_window(tag2)")
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdumpTest.py b/tests/pytest/tools/taosdumpTest.py
index 534a477b340210b8ee6bcd77fe6010c6d3d261e0..8746f4ecdff32e740467d7caf0c5808dd91a72d5 100644
--- a/tests/pytest/tools/taosdumpTest.py
+++ b/tests/pytest/tools/taosdumpTest.py
@@ -45,7 +45,9 @@ class TDTestCase:
for i in range(100):
sql += "(%d, %d, 'nchar%d')" % (currts + i, i % 100, i % 100)
tdSql.execute(sql)
+
+ os.system("rm /tmp/*.sql")
os.system("taosdump --databases db -o /tmp")
tdSql.execute("drop database db")
diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py
new file mode 100644
index 0000000000000000000000000000000000000000..51a73555a805ea503bdf560cbc3773e85b6d35ce
--- /dev/null
+++ b/tests/pytest/tools/taosdumpTest2.py
@@ -0,0 +1,74 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.ts = 1601481600000
+ self.numberOfTables = 1
+ self.numberOfRecords = 15000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute("create table st(ts timestamp, c1 timestamp, c2 int, c3 bigint, c4 float, c5 double, c6 binary(8), c7 smallint, c8 tinyint, c9 bool, c10 nchar(8)) tags(t1 int)")
+ tdSql.execute("create table t1 using st tags(0)")
+ currts = self.ts
+ finish = 0
+ while(finish < self.numberOfRecords):
+ sql = "insert into t1 values"
+ for i in range(finish, self.numberOfRecords):
+ sql += "(%d, 1019774612, 29931, 1442173978, 165092.468750, 1128.643179, 'MOCq1pTu', 18405, 82, 0, 'g0A6S0Fu')" % (currts + i)
+ finish = i + 1
+ if (1048576 - len(sql)) < 16384:
+ break
+ tdSql.execute(sql)
+
+ os.system("rm /tmp/*.sql")
+ os.system("taosdump --databases db -o /tmp -B 32766 -L 1048576")
+
+ tdSql.execute("drop database db")
+ tdSql.query("show databases")
+ tdSql.checkRows(0)
+
+ os.system("taosdump -i /tmp")
+
+ tdSql.query("show databases")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 'db')
+
+ tdSql.execute("use db")
+ tdSql.query("show stables")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 'st')
+
+ tdSql.query("select count(*) from t1")
+ tdSql.checkData(0, 0, self.numberOfRecords)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/util/taosdemoCfg.py b/tests/pytest/util/taosdemoCfg.py
new file mode 100644
index 0000000000000000000000000000000000000000..5071e915a5b2117465247a3bc762f77bbb59159f
--- /dev/null
+++ b/tests/pytest/util/taosdemoCfg.py
@@ -0,0 +1,450 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+import time
+import datetime
+import inspect
+import psutil
+import shutil
+import json
+from util.log import *
+from multiprocessing import cpu_count
+
+
+# TODO: fully test the function. Handle exceptions.
+# Handle json format not accepted by taosdemo
+class TDTaosdemoCfg:
+ def __init__(self):
+ self.insert_cfg = {
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": cpu_count(),
+ "thread_count_create_tbl": cpu_count(),
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "num_of_records_per_req": 32766,
+ "max_sql_len": 32766,
+ "databases": None
+ }
+
+ self.db = {
+ "name": 'db',
+ "drop": 'yes',
+ "replica": 1,
+ "days": 10,
+ "cache": 16,
+ "blocks": 6,
+ "precision": "ms",
+ "keep": 3650,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp": 2,
+ "walLevel": 1,
+ "cachelast": 0,
+ "quorum": 1,
+ "fsync": 3000,
+ "update": 0
+ }
+
+ self.query_cfg = {
+ "filetype": "query",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "confirm_parameter_prompt": "no",
+ "databases": "db",
+ "query_times": 2,
+ "query_mode": "taosc",
+ "specified_table_query": None,
+ "super_table_query": None
+ }
+
+ self.table_query = {
+ "query_interval": 1,
+ "concurrent": 3,
+ "sqls": None
+ }
+
+ self.stable_query = {
+ "stblname": "stb",
+ "query_interval": 1,
+ "threads": 3,
+ "sqls": None
+ }
+
+ self.sub_cfg = {
+ "filetype": "subscribe",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "db",
+ "confirm_parameter_prompt": "no",
+ "specified_table_query": None,
+ "super_table_query": None
+ }
+
+ self.table_sub = {
+ "concurrent": 1,
+ "mode": "sync",
+ "interval": 10000,
+ "restart": "yes",
+ "keepProgress": "yes",
+ "sqls": None
+ }
+
+ self.stable_sub = {
+ "stblname": "stb",
+ "threads": 1,
+ "mode": "sync",
+ "interval": 10000,
+ "restart": "yes",
+ "keepProgress": "yes",
+ "sqls": None
+ }
+
+ self.stbs = []
+ self.stb_template = {
+ "name": "stb",
+ "child_table_exists": "no",
+ "childtable_count": 100,
+ "childtable_prefix": "stb_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 5,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 100,
+ "childtable_limit": 10,
+ "childtable_offset": 0,
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 32766,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT", "count": 1}],
+ "tags": [{"type": "BIGINT", "count": 1}]
+ }
+
+ self.tb_query_sql = []
+ self.tb_query_sql_template = {
+ "sql": "select last_row(*) from stb_0 ",
+ "result": "temp/query_res0.txt"
+ }
+
+ self.stb_query_sql = []
+ self.stb_query_sql_template = {
+ "sql": "select last_row(ts) from xxxx",
+ "result": "temp/query_res2.txt"
+ }
+
+ self.tb_sub_sql = []
+ self.tb_sub_sql_template = {
+ "sql": "select * from stb_0 ;",
+ "result": "temp/subscribe_res0.txt"
+ }
+
+ self.stb_sub_sql = []
+ self.stb_sub_sql_template = {
+ "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;",
+ "result": "temp/subscribe_res1.txt"
+ }
+
+ # The following functions are import functions for different dicts and lists
+ # except import_sql, all other import functions will a dict and overwrite the origional dict
+ # dict_in: the dict used to overwrite the target
+ def import_insert_cfg(self, dict_in):
+ self.insert_cfg = dict_in
+
+ def import_db(self, dict_in):
+ self.db = dict_in
+
+ def import_stbs(self, dict_in):
+ self.stbs = dict_in
+
+ def import_query_cfg(self, dict_in):
+ self.query_cfg = dict_in
+
+ def import_table_query(self, dict_in):
+ self.table_query = dict_in
+
+ def import_stable_query(self, dict_in):
+ self.stable_query = dict_in
+
+ def import_sub_cfg(self, dict_in):
+ self.sub_cfg = dict_in
+
+ def import_table_sub(self, dict_in):
+ self.table_sub = dict_in
+
+ def import_stable_sub(self, dict_in):
+ self.stable_sub = dict_in
+
+ def import_sql(self, Sql_in, mode):
+ """used for importing the sql later used
+
+ Args:
+ Sql_in (dict): the imported sql dict
+ mode (str): the sql storing location within TDTaosdemoCfg
+ format: 'fileType_tableType'
+ fileType: query, sub
+ tableType: table, stable
+ """
+ if mode == 'query_table':
+ self.tb_query_sql = Sql_in
+ elif mode == 'query_stable':
+ self.stb_query_sql = Sql_in
+ elif mode == 'sub_table':
+ self.tb_sub_sql = Sql_in
+ elif mode == 'sub_stable':
+ self.stb_sub_sql = Sql_in
+ # import functions end
+
+ # The following functions are alter functions for different dicts
+ # Args:
+ # key: the key that is going to be modified
+ # value: the value of the key that is going to be modified
+ # if key = 'databases' | "specified_table_query" | "super_table_query"|"sqls"
+ # value will not be used
+
+ def alter_insert_cfg(self, key, value):
+
+ if key == 'databases':
+ self.insert_cfg[key] = [
+ {
+ 'dbinfo': self.db,
+ 'super_tables': self.stbs
+ }
+ ]
+ else:
+ self.insert_cfg[key] = value
+
+ def alter_db(self, key, value):
+ self.db[key] = value
+
+ def alter_query_tb(self, key, value):
+ if key == "sqls":
+ self.table_query[key] = self.tb_query_sql
+ else:
+ self.table_query[key] = value
+
+ def alter_query_stb(self, key, value):
+ if key == "sqls":
+ self.stable_query[key] = self.stb_query_sql
+ else:
+ self.stable_query[key] = value
+
+ def alter_query_cfg(self, key, value):
+ if key == "specified_table_query":
+ self.query_cfg["specified_table_query"] = self.table_query
+ elif key == "super_table_query":
+ self.query_cfg["super_table_query"] = self.stable_query
+ else:
+ self.table_query[key] = value
+
+ def alter_sub_cfg(self, key, value):
+ if key == "specified_table_query":
+ self.sub_cfg["specified_table_query"] = self.table_sub
+ elif key == "super_table_query":
+ self.sub_cfg["super_table_query"] = self.stable_sub
+ else:
+ self.table_query[key] = value
+
+ def alter_sub_stb(self, key, value):
+ if key == "sqls":
+ self.stable_sub[key] = self.stb_sub_sql
+ else:
+ self.stable_sub[key] = value
+
+ def alter_sub_tb(self, key, value):
+ if key == "sqls":
+ self.table_sub[key] = self.tb_sub_sql
+ else:
+ self.table_sub[key] = value
+ # alter function ends
+
+ # the following functions are for handling the sql lists
+ def append_sql_stb(self, target, value):
+ """for appending sql dict into specific sql list
+
+ Args:
+ target (str): the target append list
+ format: 'fileType_tableType'
+ fileType: query, sub
+ tableType: table, stable
+ unique: 'insert_stbs'
+ value (dict): the sql dict going to be appended
+ """
+ if target == 'insert_stbs':
+ self.stbs.append(value)
+ elif target == 'query_table':
+ self.tb_query_sql.append(value)
+ elif target == 'query_stable':
+ self.stb_query_sql.append(value)
+ elif target == 'sub_table':
+ self.tb_sub_sql.append(value)
+ elif target == 'sub_stable':
+ self.stb_sub_sql.append(value)
+
+ def pop_sql_stb(self, target, index):
+ """for poping a sql dict from specific sql list
+
+ Args:
+ target (str): the target append list
+ format: 'fileType_tableType'
+ fileType: query, sub
+ tableType: table, stable
+ unique: 'insert_stbs'
+ index (int): the sql dict that is going to be popped
+ """
+ if target == 'insert_stbs':
+ self.stbs.pop(index)
+ elif target == 'query_table':
+ self.tb_query_sql.pop(index)
+ elif target == 'query_stable':
+ self.stb_query_sql.pop(index)
+ elif target == 'sub_table':
+ self.tb_sub_sql.pop(index)
+ elif target == 'sub_stable':
+ self.stb_sub_sql.pop(index)
+ # sql list modification function end
+
+ # The following functions are get functions for different dicts
+ def get_db(self):
+ return self.db
+
+ def get_stb(self):
+ return self.stbs
+
+ def get_insert_cfg(self):
+ return self.insert_cfg
+
+ def get_query_cfg(self):
+ return self.query_cfg
+
+ def get_tb_query(self):
+ return self.table_query
+
+ def get_stb_query(self):
+ return self.stable_query
+
+ def get_sub_cfg(self):
+ return self.sub_cfg
+
+ def get_tb_sub(self):
+ return self.table_sub
+
+ def get_stb_sub(self):
+ return self.stable_sub
+
+ def get_sql(self, target):
+ """general get function for all sql lists
+
+ Args:
+ target (str): the sql list want to get
+ format: 'fileType_tableType'
+ fileType: query, sub
+ tableType: table, stable
+ unique: 'insert_stbs'
+ """
+ if target == 'query_table':
+ return self.tb_query_sql
+ elif target == 'query_stable':
+ return self.stb_query_sql
+ elif target == 'sub_table':
+ return self.tb_sub_sql
+ elif target == 'sub_stable':
+ return self.stb_sub_sql
+
+ def get_template(self, target):
+ """general get function for the default sql template
+
+ Args:
+ target (str): the sql list want to get
+ format: 'fileType_tableType'
+ fileType: query, sub
+ tableType: table, stable
+ unique: 'insert_stbs'
+ """
+ if target == 'insert_stbs':
+ return self.stb_template
+ elif target == 'query_table':
+ return self.tb_query_sql_template
+ elif target == 'query_stable':
+ return self.stb_query_sql_template
+ elif target == 'sub_table':
+ return self.tb_sub_sql_template
+ elif target == 'sub_stable':
+ return self.stb_sub_sql_template
+ else:
+ print(f'did not find {target}')
+
+ # the folloing are the file generation functions
+ """defalut document:
+ generator functio for generating taosdemo json file
+ will assemble the dicts and dump the final json
+
+ Args:
+ pathName (str): the directory wanting the json file to be
+ fileName (str): the name suffix of the json file
+ Returns:
+ str: [pathName]/[filetype]_[filName].json
+ """
+
+ def generate_insert_cfg(self, pathName, fileName):
+ cfgFileName = f'{pathName}/insert_{fileName}.json'
+ self.alter_insert_cfg('databases', None)
+ with open(cfgFileName, 'w') as file:
+ json.dump(self.insert_cfg, file)
+ return cfgFileName
+
+ def generate_query_cfg(self, pathName, fileName):
+ cfgFileName = f'{pathName}/query_{fileName}.json'
+ self.alter_query_tb('sqls', None)
+ self.alter_query_stb('sqls', None)
+ self.alter_query_cfg('specified_table_query', None)
+ self.alter_query_cfg('super_table_query', None)
+ with open(cfgFileName, 'w') as file:
+ json.dump(self.query_cfg, file)
+ return cfgFileName
+
+ def generate_subscribe_cfg(self, pathName, fileName):
+ cfgFileName = f'{pathName}/subscribe_{fileName}.json'
+ self.alter_sub_tb('sqls', None)
+ self.alter_sub_stb('sqls', None)
+ self.alter_sub_cfg('specified_table_query', None)
+ self.alter_sub_cfg('super_table_query', None)
+ with open(cfgFileName, 'w') as file:
+ json.dump(self.sub_cfg, file)
+ return cfgFileName
+ # file generation functions ends
+
+ def drop_cfg_file(self, fileName):
+ os.remove(f'{fileName}')
+
+
+taosdemoCfg = TDTaosdemoCfg()
diff --git a/tests/script/general/parser/gendata.sh b/tests/script/general/parser/gendata.sh
index f56fdc34680f6fda559136a68f34ad38ed406bbd..b2074147ca0a0a4483d19192b45d875ad24a1541 100755
--- a/tests/script/general/parser/gendata.sh
+++ b/tests/script/general/parser/gendata.sh
@@ -4,3 +4,5 @@ Cur_Dir=$(pwd)
echo $Cur_Dir
echo "'2020-1-1 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql
+echo "'2020-1-2 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql
+echo "'2020-1-3 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql
diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim
index 124e76e85cb7451fe5f0850985c5b30f90587fee..6ae5d420d878c462477aa41c245d146dba95ce5e 100644
--- a/tests/script/general/parser/groupby.sim
+++ b/tests/script/general/parser/groupby.sim
@@ -654,53 +654,131 @@ if $data31 != @20-03-27 05:10:19.000@ then
return -1
endi
-#sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2;
-#if $rows != 40 then
-# return -1
-#endi
-#
-#if $data01 != 1.000000000 then
-# return -1
-#endi
-#if $data02 != t1 then
-# return -1
-#endi
-#if $data03 != 1 then
-# return -1
-#endi
-#if $data04 != 1 then
-# return -1
-#endi
-#
-#if $data11 != 1.000000000 then
-# return -1
-#endi
-#if $data12 != t1 then
-# return -1
-#endi
-#if $data13 != 1 then
-# return -1
-#endi
-#if $data14 != 1 then
-# return -1
-#endi
-#
-#sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1;
-#if $rows != 2 then
-# return -1
-#endi
-#
-#if $data11 != 1.000000000 then
-# return -1
-#endi
-#if $data12 != t2 then
-# return -1
-#endi
-#if $data13 != 1 then
-# return -1
-#endi
-#if $data14 != 2 then
-# return -1
-#endi
+print ===============>
+sql select stddev(c),c from st where t2=1 or t2=2 group by c;
+if $rows != 4 then
+ return -1
+endi
+
+if $data00 != 0.000000000 then
+ return -1
+endi
+
+if $data01 != 1 then
+ return -1
+endi
+
+if $data10 != 0.000000000 then
+ return -1
+endi
+
+if $data11 != 2 then
+ return -1
+endi
+
+if $data20 != 0.000000000 then
+ return -1
+endi
+
+if $data21 != 3 then
+ return -1
+endi
+
+if $data30 != 0.000000000 then
+ return -1
+endi
+
+if $data31 != 4 then
+ return -1
+endi
+
+sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2;
+if $rows != 40 then
+ return -1
+endi
+
+if $data01 != 1.000000000 then
+ return -1
+endi
+if $data02 != t1 then
+ return -1
+endi
+if $data03 != 1 then
+ return -1
+endi
+if $data04 != 1 then
+ return -1
+endi
+
+if $data11 != 1.000000000 then
+ return -1
+endi
+if $data12 != t1 then
+ return -1
+endi
+if $data13 != 1 then
+ return -1
+endi
+if $data14 != 1 then
+ return -1
+endi
+
+sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1;
+if $rows != 2 then
+ return -1
+endi
+
+if $data11 != 1.000000000 then
+ return -1
+endi
+if $data12 != t2 then
+ return -1
+endi
+if $data13 != 1 then
+ return -1
+endi
+if $data14 != 2 then
+ return -1
+endi
+
+sql create table m1 (ts timestamp, k int, f1 int) tags(a int);
+sql create table tm0 using m1 tags(0);
+sql create table tm1 using m1 tags(1);
+
+sql insert into tm0 values('2020-1-1 1:1:1', 1, 10);
+sql insert into tm0 values('2020-1-1 1:1:2', 1, 20);
+sql insert into tm1 values('2020-2-1 1:1:1', 2, 10);
+sql insert into tm1 values('2020-2-1 1:1:2', 2, 20);
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+sleep 100
+system sh/exec.sh -n dnode1 -s start
+sleep 100
+
+sql connect
+sleep 100
+sql use group_db0;
+
+print =========================>TD-4894
+sql select count(*),k from m1 group by k;
+if $rows != 2 then
+ return -1
+endi
+
+if $data00 != 2 then
+ return -1
+endi
+
+if $data01 != 1 then
+ return -1
+endi
+
+if $data10 != 2 then
+ return -1
+endi
+
+if $data11 != 2 then
+ return -1
+endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/having.sim b/tests/script/general/parser/having.sim
index ddafdd73293d75bc99380969d98c7fb986420a38..a8d2102befeabf70d70e3a361ad5e933f021ce4a 100644
--- a/tests/script/general/parser/having.sim
+++ b/tests/script/general/parser/having.sim
@@ -1,6 +1,6 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c walLevel -v 0
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
system sh/exec.sh -n dnode1 -s start
diff --git a/tests/script/general/parser/import_file.sim b/tests/script/general/parser/import_file.sim
index e9f0f1ed085cc75238681dc08b9601a8d591f6c4..cf11194ba7c3b805725a665c6f92d6bb465b9e4e 100644
--- a/tests/script/general/parser/import_file.sim
+++ b/tests/script/general/parser/import_file.sim
@@ -15,6 +15,8 @@ $inFileName = '~/data.csv'
$numOfRows = 10000
system general/parser/gendata.sh
+sql create table stbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2)) tags(a int, b binary(12));
+
sql create table tbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2))
print ====== create tables success, starting import data
@@ -23,13 +25,48 @@ sql import into tbx file '~/data.sql'
sql select count(*) from tbx
if $rows != 1 then
+ print expect 1, actual: $rows
+ return -1
+endi
+
+if $data00 != 3 then
+ return -1
+endi
+
+sql drop table tbx;
+
+sql insert into tbx using stbx tags(1,'abc') file '~/data.sql';
+sql insert into tbx using stbx tags(1,'abc') file '~/data.sql';
+
+sql select count(*) from tbx
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != 3 then
return -1
endi
-#if $data00 != $numOfRows then
-# print "expect: $numOfRows, act: $data00"
-# return -1
-#endi
+sql drop table tbx;
+sql insert into tbx using stbx(b) tags('abcf') file '~/data.sql';
+
+sql select ts,a,b from tbx;
+if $rows != 3 then
+ return -1
+endi
+
+if $data00 != @20-01-01 01:01:01.000@ then
+ print expect 20-01-01 01:01:01.000 , actual: $data00
+ return -1
+endi
+
+if $data01 != NULL then
+ return -1
+endi
+
+if $data02 != @abcf@ then
+ return -1
+endi
system rm -f ~/data.sql
diff --git a/tests/script/general/parser/nestquery.sim b/tests/script/general/parser/nestquery.sim
index 8249d9197f55998ae26cb6dd232b6a701bf0a32c..fd56a91dd679bc52850520693dca41b66e475edc 100644
--- a/tests/script/general/parser/nestquery.sim
+++ b/tests/script/general/parser/nestquery.sim
@@ -180,20 +180,82 @@ if $data21 != 49.500000000 then
endi
#define TSDB_FUNC_APERCT 7
-#define TSDB_FUNC_LAST_ROW 10
#define TSDB_FUNC_TWA 14
#define TSDB_FUNC_LEASTSQR 15
-#define TSDB_FUNC_ARITHM 23
#define TSDB_FUNC_DIFF 24
#define TSDB_FUNC_INTERP 28
-#define TSDB_FUNC_RATE 29
#define TSDB_FUNC_IRATE 30
#define TSDB_FUNC_DERIVATIVE 32
sql_error select stddev(c1) from (select c1 from nest_tb0);
sql_error select percentile(c1, 20) from (select * from nest_tb0);
+sql_error select interp(c1) from (select * from nest_tb0);
+sql_error select derivative(val, 1s, 0) from (select c1 val from nest_tb0);
+sql_error select twa(c1) from (select c1 from nest_tb0);
+sql_error select irate(c1) from (select c1 from nest_tb0);
+sql_error select diff(c1), twa(c1) from (select * from nest_tb0);
+sql_error select irate(c1), interp(c1), twa(c1) from (select * from nest_tb0);
+
+sql select apercentile(c1, 50) from (select * from nest_tb0) interval(1d)
+sql select twa(c1) from (select * from nest_tb0);
+sql select leastsquares(c1, 1, 1) from (select * from nest_tb0);
+sql select irate(c1) from (select * from nest_tb0);
sql select avg(c1),sum(c2), max(c3), min(c4), count(*), first(c7), last(c7),spread(c6) from (select * from nest_tb0) interval(1d);
+if $rows != 7 then
+ return -1
+endi
+
+if $data00 != @20-09-15 00:00:00.000@ then
+ return -1
+endi
+
+if $data01 != 48.666666667 then
+ print expect 48.666666667, actual: $data01
+ return -1
+endi
+
+if $data02 != 70080.000000000 then
+ return -1
+endi
+
+if $data03 != 99 then
+ return -1
+endi
+
+if $data04 != 0 then
+ return -1
+endi
+
+if $data05 != 1440 then
+ return -1
+endi
+
+if $data06 != 0 then
+ print $data06
+ return -1
+endi
+
+if $data07 != 1 then
+ return -1
+endi
+
+if $data08 != 99.000000000 then
+ print expect 99.000000000, actual: $data08
+ return -1
+endi
+
+if $data10 != @20-09-16 00:00:00.000@ then
+ return -1
+endi
+
+if $data11 != 49.777777778 then
+ return -1
+endi
+
+if $data12 != 71680.000000000 then
+ return -1
+endi
sql select top(x, 20) from (select c1 x from nest_tb0);
@@ -207,6 +269,9 @@ print ===================> group by + having
+print =========================> ascending order/descending order
+
+
print =========================> nest query join
@@ -273,7 +338,6 @@ if $data03 != @20-09-15 00:00:00.000@ then
return -1
endi
-sql_error select derivative(val, 1s, 0) from (select c1 val from nest_tb0);
sql select diff(val) from (select c1 val from nest_tb0);
if $rows != 9999 then
return -1
diff --git a/tests/script/general/parser/select_with_tags.sim b/tests/script/general/parser/select_with_tags.sim
index 45f6f5c49fbe48f6b881e4f03c3ed1af18a4b1a9..f5c94d2ae6d643d987176c845a9803fe8336848f 100644
--- a/tests/script/general/parser/select_with_tags.sim
+++ b/tests/script/general/parser/select_with_tags.sim
@@ -68,6 +68,27 @@ endw
sleep 100
+
+#======================= only check first table tag, TD-4827
+sql select count(*) from $mt where t1 in (0)
+if $rows != 1 then
+ return -1
+endi
+if $data00 != $rowNum then
+ return -1;
+endi
+
+$secTag = ' . abc
+$secTag = $secTag . 0
+$secTag = $secTag . '
+sql select count(*) from $mt where t2 =$secTag and t1 in (0)
+if $rows != 1 then
+ return -1
+endi
+if $data00 != $rowNum then
+ return -1;
+endi
+
#================================
sql select ts from select_tags_mt0
print $rows
diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim
index 545e19edecba723685d1d5a0e2b4b1506c298b4b..5f711389662f16e660d6fdc88a2518b6d4221efc 100644
--- a/tests/script/general/parser/testSuite.sim
+++ b/tests/script/general/parser/testSuite.sim
@@ -63,4 +63,3 @@ run general/parser/between_and.sim
run general/parser/last_cache.sim
run general/parser/nestquery.sim
run general/parser/precision_ns.sim
-
diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim
index 00a22eede6a584e99c80bc80919ff04d4f80790b..d20f013a8e7cfa36cc4de05fd94cff83480149ad 100644
--- a/tests/script/general/parser/where.sim
+++ b/tests/script/general/parser/where.sim
@@ -139,18 +139,18 @@ sql_error select * from $mt where c1 like 1
sql create table wh_mt1 (ts timestamp, c1 smallint, c2 int, c3 bigint, c4 float, c5 double, c6 tinyint, c7 binary(10), c8 nchar(10), c9 bool, c10 timestamp) tags (t1 binary(10), t2 smallint, t3 int, t4 bigint, t5 float, t6 double)
sql create table wh_mt1_tb1 using wh_mt1 tags ('tb11', 1, 1, 1, 1, 1)
sql insert into wh_mt1_tb1 values (now, 1, 1, 1, 1, 1, 1, 'binary', 'nchar', true, '2019-01-01 00:00:00.000')
-sql_error select last(*) from wh_mt1 where c1 in ('1')
-sql_error select last(*) from wh_mt1_tb1 where c1 in ('1')
-sql_error select last(*) from wh_mt1 where c2 in ('1')
-sql_error select last(*) from wh_mt1_tb1 where c2 in ('1')
-sql_error select last(*) from wh_mt1 where c3 in ('1')
-sql_error select last(*) from wh_mt1_tb1 where c3 in ('1')
-sql_error select last(*) from wh_mt1 where c4 in ('1')
-sql_error select last(*) from wh_mt1_tb1 where c4 in ('1')
-sql_error select last(*) from wh_mt1 where c5 in ('1')
-sql_error select last(*) from wh_mt1_tb1 where c5 in ('1')
-sql_error select last(*) from wh_mt1 where c6 in ('1')
-sql_error select last(*) from wh_mt1_tb1 where c6 in ('1')
+#sql_error select last(*) from wh_mt1 where c1 in ('1')
+#sql_error select last(*) from wh_mt1_tb1 where c1 in ('1')
+#sql_error select last(*) from wh_mt1 where c2 in ('1')
+#sql_error select last(*) from wh_mt1_tb1 where c2 in ('1')
+#sql_error select last(*) from wh_mt1 where c3 in ('1')
+#sql_error select last(*) from wh_mt1_tb1 where c3 in ('1')
+#sql_error select last(*) from wh_mt1 where c4 in ('1')
+#sql_error select last(*) from wh_mt1_tb1 where c4 in ('1')
+#sql_error select last(*) from wh_mt1 where c5 in ('1')
+#sql_error select last(*) from wh_mt1_tb1 where c5 in ('1')
+#sql_error select last(*) from wh_mt1 where c6 in ('1')
+#sql_error select last(*) from wh_mt1_tb1 where c6 in ('1')
#sql_error select last(*) from wh_mt1 where c7 in ('binary')
#sql_error select last(*) from wh_mt1_tb1 where c7 in ('binary')
#sql_error select last(*) from wh_mt1 where c8 in ('nchar')
@@ -352,5 +352,18 @@ if $rows != 0 then
return -1
endi
-
+print ==========================> td-4783
+sql create table where_ts(ts timestamp, f int)
+sql insert into where_ts values('2021-06-19 16:22:00', 1);
+sql insert into where_ts values('2021-06-19 16:23:00', 2);
+sql insert into where_ts values('2021-06-19 16:24:00', 3);
+sql insert into where_ts values('2021-06-19 16:25:00', 1);
+sql select * from (select * from where_ts) where ts<'2021-06-19 16:25:00' and ts>'2021-06-19 16:22:00'
+if $row != 2 then
+ return -1
+endi
+print $data00, $data01
+if $data01 != 2 then
+ return -1
+endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/tsim/src/simMain.c b/tests/tsim/src/simMain.c
index 6a9d96bc3b2e6056c73fad73b1fa9f1b7dee6cbf..7d74c62c7daf391fed1bf1afac233f51b84c8f0b 100644
--- a/tests/tsim/src/simMain.c
+++ b/tests/tsim/src/simMain.c
@@ -35,7 +35,7 @@ int32_t main(int32_t argc, char *argv[]) {
for (int32_t i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-c") == 0 && i < argc - 1) {
- tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
+ tstrncpy(configDir, argv[++i], 128);
} else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) {
strcpy(scriptFile, argv[++i]);
} else if (strcmp(argv[i], "-a") == 0) {
@@ -75,4 +75,4 @@ int32_t main(int32_t argc, char *argv[]) {
simInfo("execute result %d", ret);
return ret;
-}
\ No newline at end of file
+}